diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 3556e1e..8d5a07b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,10 +10,10 @@ jobs:
steps:
- uses: actions/checkout@v2
- - name: Set up Go 1.14
+ - name: Set up Go
uses: actions/setup-go@v1
with:
- go-version: 1.14
+ go-version: 1.17
- name: Build
run: script/cibuild
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 0000000..abce58a
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,25 @@
+name: "CodeQL analysis"
+
+on:
+ push:
+ pull_request:
+ schedule:
+ - cron: '0 0 * * 0'
+
+jobs:
+ codeql:
+
+ strategy:
+ fail-fast: false
+
+ runs-on: ubuntu-latest # windows-latest and ubuntu-latest are supported. macos-latest is not supported at this time.
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v1
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v1
diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
new file mode 100644
index 0000000..07fa01d
--- /dev/null
+++ b/.github/workflows/golangci-lint.yml
@@ -0,0 +1,23 @@
+name: golangci-lint
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+permissions:
+ contents: read
+ # Optional: allow read access to pull request. Use with `only-new-issues` option.
+ # pull-requests: read
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/setup-go@v3
+ with:
+ go-version: 1.17
+ - uses: actions/checkout@v3
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v3
+ with:
+ version: v1.46.2
diff --git a/.github/workflows/replica-tests.yml b/.github/workflows/replica-tests.yml
index 31e2052..e28c2bc 100644
--- a/.github/workflows/replica-tests.yml
+++ b/.github/workflows/replica-tests.yml
@@ -6,14 +6,19 @@ jobs:
build:
runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ version: [mysql-5.7.25,mysql-8.0.16]
steps:
- uses: actions/checkout@v2
- - name: Set up Go 1.14
+ - name: Set up Go
uses: actions/setup-go@v1
with:
- go-version: 1.14
+ go-version: 1.17
- name: migration tests
+ env:
+ TEST_MYSQL_VERSION: ${{ matrix.version }}
run: script/cibuild-gh-ost-replica-tests
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..4621e5c
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,24 @@
+run:
+ timeout: 5m
+linters:
+ disable:
+ - errcheck
+ enable:
+ - contextcheck
+ - durationcheck
+ - errname
+ - execinquery
+ - gofmt
+ - ifshort
+ - misspell
+ - nilerr
+ - noctx
+ - nolintlint
+ - nosprintfhostport
+ - prealloc
+ - rowserrcheck
+ - sqlclosecheck
+ - unconvert
+ - unused
+ - wastedassign
+ - whitespace
diff --git a/Dockerfile.packaging b/Dockerfile.packaging
index 9c5cd29..be321fa 100644
--- a/Dockerfile.packaging
+++ b/Dockerfile.packaging
@@ -1,6 +1,4 @@
-#
-
-FROM golang:1.14.7
+FROM golang:1.17
RUN apt-get update
RUN apt-get install -y ruby ruby-dev rubygems build-essential
diff --git a/Dockerfile.test b/Dockerfile.test
index 8f56be3..e915576 100644
--- a/Dockerfile.test
+++ b/Dockerfile.test
@@ -1,4 +1,4 @@
-FROM golang:1.14.7
+FROM golang:1.17
LABEL maintainer="github@github.com"
RUN apt-get update
diff --git a/README.md b/README.md
index d4a17e9..e21344d 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# gh-ost
-[![build status](https://travis-ci.org/github/gh-ost.svg)](https://travis-ci.org/github/gh-ost) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
+[![ci](https://github.com/github/gh-ost/actions/workflows/ci.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/ci.yml) [![replica-tests](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
#### GitHub's online schema migration for MySQL
@@ -65,6 +65,7 @@ Also see:
- [the fine print](doc/the-fine-print.md)
- [Community questions](https://github.com/github/gh-ost/issues?q=label%3Aquestion)
- [Using `gh-ost` on AWS RDS](doc/rds.md)
+- [Using `gh-ost` on Azure Database for MySQL](doc/azure.md)
## What's in a name?
@@ -94,7 +95,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
-`gh-ost` is a Go project; it is built with Go `1.14` and above. To build on your own, use either:
+`gh-ost` is a Go project; it is built with Go `1.15` and above. To build on your own, use either:
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
diff --git a/RELEASE_VERSION b/RELEASE_VERSION
index 9084fa2..45a1b3f 100644
--- a/RELEASE_VERSION
+++ b/RELEASE_VERSION
@@ -1 +1 @@
-1.1.0
+1.1.2
diff --git a/build.sh b/build.sh
index b5d4659..b758915 100755
--- a/build.sh
+++ b/build.sh
@@ -18,30 +18,32 @@ function build {
GOOS=$3
GOARCH=$4
- if ! go version | egrep -q 'go(1\.1[456])' ; then
- echo "go version must be 1.14 or above"
+ if ! go version | egrep -q 'go1\.(1[5-9]|[2-9][0-9]{1})' ; then
+ echo "go version must be 1.15 or above"
exit 1
fi
- echo "Building ${osname} binary"
+ echo "Building ${osname}-${GOARCH} binary"
export GOOS
export GOARCH
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
if [ $? -ne 0 ]; then
- echo "Build failed for ${osname}"
+ echo "Build failed for ${osname} ${GOARCH}."
exit 1
fi
- (cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
+ (cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${GOARCH}-${timestamp}.tar.gz $target)
- if [ "$GOOS" == "linux" ] ; then
+ # build RPM and deb for Linux, x86-64 only
+ if [ "$GOOS" == "linux" ] && [ "$GOARCH" == "amd64" ] ; then
echo "Creating Distro full packages"
builddir=$(setuptree)
cp $buildpath/$target $builddir/gh-ost/usr/bin
cd $buildpath
- fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach ' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
- fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach ' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
+ fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" --rpm-os linux .
+ fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
+ cd -
fi
}
@@ -62,10 +64,15 @@ main() {
mkdir -p ${buildpath}
rm -rf ${buildpath:?}/*
build GNU/Linux linux linux amd64
- # build macOS osx darwin amd64
+ build GNU/Linux linux linux arm64
+ build macOS osx darwin amd64
+ build macOS osx darwin arm64
echo "Binaries found in:"
find $buildpath/gh-ost* -type f -maxdepth 1
+
+ echo "Checksums:"
+ (cd $buildpath && shasum -a256 gh-ost* 2>/dev/null)
}
main "$@"
diff --git a/doc/azure.md b/doc/azure.md
new file mode 100644
index 0000000..f544f37
--- /dev/null
+++ b/doc/azure.md
@@ -0,0 +1,26 @@
+`gh-ost` has been updated to work with Azure Database for MySQL however due to GitHub does not use it, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
+
+# Azure Database for MySQL
+
+## Limitations
+
+- `gh-ost` runs should be setup use [`--assume-rbr`][assume_rbr_docs] and use `binlog_row_image=FULL`.
+- Azure Database for MySQL does not use same user name suffix for master and replica, so master host, user and password need to be pointed out.
+
+## Step
+1. Change the replica server's `binlog_row_image` from `MINIMAL` to `FULL`. See [guide](https://docs.microsoft.com/en-us/azure/mysql/howto-server-parameters) on Azure document.
+2. Use your `gh-ost` always with additional 5 parameter
+```{bash}
+gh-ost \
+--azure \
+--assume-master-host=master-server-dns-name \
+--master-user="master-user-name" \
+--master-password="master-password" \
+--assume-rbr \
+[-- other paramters you need]
+```
+
+
+[new_issue]: https://github.com/github/gh-ost/issues/new
+[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
+[migrate_test_on_replica_docs]: https://github.com/github/gh-ost/blob/master/doc/cheatsheet.md#c-migratetest-on-replica
\ No newline at end of file
diff --git a/doc/coding-ghost.md b/doc/coding-ghost.md
index ee26f0c..24425e3 100644
--- a/doc/coding-ghost.md
+++ b/doc/coding-ghost.md
@@ -5,7 +5,7 @@
Getting started with gh-ost development is simple!
- First obtain the repository with `git clone` or `go get`.
-- From inside of the repository run `script/cibuild`
+- From inside of the repository run `script/cibuild`.
- This will bootstrap the environment if needed, format the code, build the code, and then run the unit test.
## CI build workflow
@@ -14,6 +14,12 @@ Getting started with gh-ost development is simple!
If additional steps are needed, please add them into this workflow so that the workflow remains simple.
+## `golang-ci` linter
+
+To enfore best-practices, Pull Requests are automatically linted by [`golang-ci`](https://golangci-lint.run/). The linter config is located at [`.golangci.yml`](https://github.com/github/gh-ost/blob/master/.golangci.yml) and the `golangci-lint` GitHub Action is located at [`.github/workflows/golangci-lint.yml`](https://github.com/github/gh-ost/blob/master/.github/workflows/golangci-lint.yml).
+
+To run the `golang-ci` linters locally _(recommended before push)_, use `script/lint`.
+
## Notes:
Currently, `script/ensure-go-installed` will install `go` for Mac OS X and Linux. We welcome PR's to add other platforms.
diff --git a/doc/command-line-flags.md b/doc/command-line-flags.md
index 81161db..1d3c6f4 100644
--- a/doc/command-line-flags.md
+++ b/doc/command-line-flags.md
@@ -6,6 +6,14 @@ A more in-depth discussion of various `gh-ost` command line flags: implementatio
Add this flag when executing on Aliyun RDS.
+### allow-zero-in-date
+
+Allows the user to make schema changes that include a zero date or zero in date (e.g. adding a `datetime default '0000-00-00 00:00:00'` column), even if global `sql_mode` on MySQL has `NO_ZERO_IN_DATE,NO_ZERO_DATE`.
+
+### azure
+
+Add this flag when executing on Azure Database for MySQL.
+
### allow-master-master
See [`--assume-master-host`](#assume-master-host).
@@ -18,7 +26,7 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c
### approve-renamed-columns
-When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
+When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise, the new structure may also look like some column was dropped and another was added.
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
@@ -28,7 +36,7 @@ If you think `gh-ost` is mistaken and that there's actually no _rename_ involved
`gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in:
-- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters and you prefer that it picks a specific one
+- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters, and you prefer that it picks a specific one
- _tungsten replicator_ topologies (together with [`--tungsten`](#tungsten)), where `gh-ost` is unable to crawl and detect the master
### assume-rbr
@@ -57,7 +65,13 @@ Comma delimited status-name=threshold, same format as [`--max-load`](#max-load).
`--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold.
-This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10 hour migration.
+This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10-hour migration.
+
+### critical-load-hibernate-seconds
+
+When `--critical-load-hibernate-seconds` is non-zero (e.g. `--critical-load-hibernate-seconds=300`), `critical-load` does not panic and bail out; instead, `gh-ost` goes into hibernation for the specified duration. It will not read/write anything from/to any server during this time. Execution then continues upon waking from hibernation.
+
+If `critical-load` is met again, `gh-ost` will repeat this cycle, and never panic and bail out.
### critical-load-interval-millis
@@ -94,7 +108,7 @@ Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `g
### exact-rowcount
-A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can, and often be, a large number. Exactly what that number is?
+A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can and often will be, a large number. Exactly what that number is?
`gh-ost` initially estimates the number of rows in your table by issuing an `explain select * from your_table`. This will use statistics on your table and return with a rough estimate. How rough? It might go as low as half or as high as double the actual number of rows in your table. This is the same method as used in [`pt-online-schema-change`](https://www.percona.com/doc/percona-toolkit/2.2/pt-online-schema-change.html).
`gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen:
@@ -131,6 +145,10 @@ Add this flag when executing on a 1st generation Google Cloud Platform (GCP).
Default 100. See [`subsecond-lag`](subsecond-lag.md) for details.
+### hooks-status-interval
+
+Defaults to 60 seconds. Configures how often the `gh-ost-on-status` hook is called, see [`hooks`](hooks.md) for full details on how to use hooks.
+
### initially-drop-ghost-table
`gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand.
@@ -177,6 +195,9 @@ Optionally involve the process ID, for example: `--replica-server-id=$((10000000
It's on you to choose a number that does not collide with another `gh-ost` or another running replica.
See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet.
+### serve-socket-file
+
+Defaults to an auto-determined and advertised upon startup file. Defines Unix socket file to serve on.
### skip-foreign-key-checks
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
@@ -223,7 +244,15 @@ Provide a command delimited list of replicas; `gh-ost` will throttle when any of
### throttle-http
-Provide a HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check.
+Provide an HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check.
+
+### throttle-http-interval-millis
+
+Defaults to 100. Configures the HTTP throttle check interval in milliseconds.
+
+### throttle-http-timeout-millis
+
+Defaults to 1000 (1 second). Configures the HTTP throttler check timeout in milliseconds.
### timestamp-old-table
diff --git a/doc/hooks.md b/doc/hooks.md
index 4c49c85..c1fe594 100644
--- a/doc/hooks.md
+++ b/doc/hooks.md
@@ -66,7 +66,9 @@ The following variables are available on all hooks:
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
+- `GH_OST_HEARTBEAT_LAG` - lag in seconds (floating point) of heartbeat
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
+- `GH_OST_ETA_SECONDS` - estimated duration until migration finishes in seconds
- `GH_OST_MIGRATED_HOST`
- `GH_OST_INSPECTED_HOST`
- `GH_OST_EXECUTING_HOST`
diff --git a/doc/interactive-commands.md b/doc/interactive-commands.md
index 591aa49..7ad44f1 100644
--- a/doc/interactive-commands.md
+++ b/doc/interactive-commands.md
@@ -18,6 +18,8 @@ Both interfaces may serve at the same time. Both respond to simple text command,
- `status`: returns a detailed status summary of migration progress and configuration
- `sup`: returns a brief status summary of migration progress
- `coordinates`: returns recent (though not exactly up to date) binary log coordinates of the inspected server
+- `applier`: returns the hostname of the applier
+- `inspector`: returns the hostname of the inspector
- `chunk-size=`: modify the `chunk-size`; applies on next running copy-iteration
- `dml-batch-size=`: modify the `dml-batch-size`; applies on next applying of binary log events
- `max-lag-millis=`: modify the maximum replication lag threshold (milliseconds, minimum value is `100`, i.e. `0.1` second)
diff --git a/doc/requirements-and-limitations.md b/doc/requirements-and-limitations.md
index f618af6..88642dc 100644
--- a/doc/requirements-and-limitations.md
+++ b/doc/requirements-and-limitations.md
@@ -2,6 +2,8 @@
### Requirements
+- `gh-ost` currently requires MySQL versions 5.7 and greater.
+
- You will need to have one server serving Row Based Replication (RBR) format binary logs. Right now `FULL` row image is supported. `MINIMAL` to be supported in the near future. `gh-ost` prefers to work with replicas. You may [still have your master configured with Statement Based Replication](migrating-with-sbr.md) (SBR).
- If you are using a replica, the table must have an identical schema between the master and replica.
@@ -18,6 +20,8 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- Switching your `binlog_format` to `ROW`, in the case where it is _not_ `ROW` and you explicitly specified `--switch-to-rbr`
- If your replication is already in RBR (`binlog_format=ROW`) you can specify `--assume-rbr` to avoid the `STOP SLAVE/START SLAVE` operations, hence no need for `SUPER`.
+- `gh-ost` uses the `REPEATABLE_READ` transaction isolation level for all MySQL connections, regardless of the server default.
+
- Running `--test-on-replica`: before the cut-over phase, `gh-ost` stops replication so that you can compare the two tables and satisfy that the migration is sound.
### Limitations
@@ -41,6 +45,7 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- Amazon RDS works, but has its own [limitations](rds.md).
- Google Cloud SQL works, `--gcp` flag required.
- Aliyun RDS works, `--aliyun-rds` flag required.
+- Azure Database for MySQL works, `--azure` flag required, and have detailed document about it. (azure.md)
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)
diff --git a/doc/throttle.md b/doc/throttle.md
index 2ebc2ee..8f06b2a 100644
--- a/doc/throttle.md
+++ b/doc/throttle.md
@@ -38,7 +38,7 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
`--max-load='Threads_running=100,Threads_connected=500'`
- Metrics must be valid, numeric [status variables](http://dev.mysql.com/doc/refman/5.6/en/server-status-variables.html)
+ Metrics must be valid, numeric [status variables](https://dev.mysql.com/doc/refman/5.7/en/server-status-variables.html)
#### Throttle query
@@ -97,7 +97,7 @@ Copy: 0/2915 0.0%; Applied: 0; Backlog: 0/100; Elapsed: 42s(copy), 42s(total); s
Throttling time is limited by the availability of the binary logs. When throttling begins, `gh-ost` suspends reading the binary logs, and expects to resume reading from same binary log where it paused.
-Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
+Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
Having said that, throttling for so long is far fetching, in that the `gh-ost` process itself must be kept alive during that time; and the amount of binary logs to process once it resumes will potentially take days to replay.
diff --git a/doc/triggerless-design.md b/doc/triggerless-design.md
index 510a301..10a4203 100644
--- a/doc/triggerless-design.md
+++ b/doc/triggerless-design.md
@@ -112,7 +112,7 @@ It is also interesting to observe that `gh-ost` is the only application writing
When `gh-ost` pauses (throttles), it issues no writes on the ghost table. Because there are no triggers, write workload is decoupled from the `gh-ost` write workload. And because we're using an asynchronous approach, the algorithm already handles a time difference between a master write time and the ghost apply time. A difference of a few microseconds is no different from a difference of minutes or hours.
-When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or and explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
+When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or an explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
An exception is the ongoing heartbeat writes onto the changelog table, which we consider to be negligible.
#### Testability
diff --git a/doc/why-triggerless.md b/doc/why-triggerless.md
index 2ea8c81..1e7d97a 100644
--- a/doc/why-triggerless.md
+++ b/doc/why-triggerless.md
@@ -7,7 +7,7 @@ Existing MySQL schema migration tools:
- [LHM](https://github.com/soundcloud/lhm)
- [oak-online-alter-table](https://github.com/shlomi-noach/openarkkit)
-are all using [triggers](http://dev.mysql.com/doc/refman/5.6/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
+are all using [triggers](https://dev.mysql.com/doc/refman/5.7/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
Use of triggers simplifies a lot of the flow in doing a live table migration, but also poses some limitations or difficulties. Here are reasons why we choose to [design a triggerless solution](triggerless-design.md) to schema migrations.
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..885d97e
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,27 @@
+module github.com/github/gh-ost
+
+go 1.17
+
+require (
+ github.com/go-ini/ini v1.62.0
+ github.com/go-mysql-org/go-mysql v1.3.0
+ github.com/go-sql-driver/mysql v1.6.0
+ github.com/openark/golib v0.0.0-20210531070646-355f37940af8
+ github.com/satori/go.uuid v1.2.0
+ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
+ golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
+ golang.org/x/text v0.3.6
+)
+
+require (
+ github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
+ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
+ github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
+ github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 // indirect
+ github.com/smartystreets/goconvey v1.6.4 // indirect
+ go.uber.org/atomic v1.7.0 // indirect
+ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
+ golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
+ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+ gopkg.in/ini.v1 v1.62.0 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..6b44084
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,136 @@
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
+github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
+github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM=
+github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
+github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
+github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-ini/ini v1.62.0 h1:7VJT/ZXjzqSrvtraFp4ONq80hTcRQth1c9ZnQ3uNQvU=
+github.com/go-ini/ini v1.62.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-mysql-org/go-mysql v1.3.0 h1:lpNqkwdPzIrYSZGdqt8HIgAXZaK6VxBNfr8f7Z4FgGg=
+github.com/go-mysql-org/go-mysql v1.3.0/go.mod h1:3lFZKf7l95Qo70+3XB2WpiSf9wu2s3na3geLMaIIrqQ=
+github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/openark/golib v0.0.0-20210531070646-355f37940af8 h1:9ciIHNuyFqRWi9NpMNw9sVLB6z1ItpP5ZhTY9Q1xVu4=
+github.com/openark/golib v0.0.0-20210531070646-355f37940af8/go.mod h1:1jj8x1eDVZxgc/Z4VyamX4qTbAdHPUQA6NeVtCd8Sl8=
+github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
+github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
+github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
+github.com/pingcap/errors v0.11.5-0.20201029093017-5a7df2af2ac7/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
+github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 h1:LllgC9eGfqzkfubMgjKIDyZYaa609nNWAyNZtpy2B3M=
+github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
+github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
+github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
+github.com/pingcap/parser v0.0.0-20210415081931-48e7f467fd74/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
+github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
+github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 h1:oI+RNwuC9jF2g2lP0u0cVEEZrc/AYBCuFdvwrLWM/6Q=
+github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07/go.mod h1:yFdBgwXP24JziuRl2NMUahT7nGLNOKi1SIiFxMttVD4=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
+go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
+golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
+golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
diff --git a/go/base/context.go b/go/base/context.go
index cee66ef..f3fe712 100644
--- a/go/base/context.go
+++ b/go/base/context.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -15,14 +15,13 @@ import (
"sync/atomic"
"time"
- "github.com/satori/go.uuid"
+ uuid "github.com/satori/go.uuid"
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
- "github.com/outbrain/golib/log"
+ "github.com/openark/golib/log"
- "gopkg.in/gcfg.v1"
- gcfgscanner "gopkg.in/gcfg.v1/scanner"
+ "github.com/go-ini/ini"
)
// RowsEstimateMethod is the type of row number estimation
@@ -52,6 +51,7 @@ const (
const (
HTTPStatusOK = 200
MaxEventsBatchSize = 1000
+ ETAUnknown = math.MinInt64
)
var (
@@ -82,6 +82,8 @@ type MigrationContext struct {
AlterStatement string
AlterStatementOptions string // anything following the 'ALTER TABLE [schema.]table' from AlterStatement
+ countMutex sync.Mutex
+ countTableRowsCancelFunc func()
CountTableRows bool
ConcurrentCountTableRows bool
AllowedRunningOnMaster bool
@@ -90,6 +92,7 @@ type MigrationContext struct {
AssumeRBR bool
SkipForeignKeyChecks bool
SkipStrictMode bool
+ AllowZeroInDate bool
NullableUniqueKeyAllowed bool
ApproveRenamedColumns bool
SkipRenamedColumns bool
@@ -97,6 +100,7 @@ type MigrationContext struct {
DiscardForeignKeys bool
AliyunRDS bool
GoogleCloudPlatform bool
+ AzureMySQL bool
config ContextConfig
configMutex *sync.Mutex
@@ -139,6 +143,7 @@ type MigrationContext struct {
HooksHintMessage string
HooksHintOwner string
HooksHintToken string
+ HooksStatusIntervalSec int64
DropServeSocket bool
ServeSocketFile string
@@ -177,9 +182,14 @@ type MigrationContext struct {
RenameTablesEndTime time.Time
pointOfInterestTime time.Time
pointOfInterestTimeMutex *sync.Mutex
+ lastHeartbeatOnChangelogTime time.Time
+ lastHeartbeatOnChangelogMutex *sync.Mutex
CurrentLag int64
currentProgress uint64
+ etaNanoseonds int64
+ ThrottleHTTPIntervalMillis int64
ThrottleHTTPStatusCode int64
+ ThrottleHTTPTimeoutMillis int64
controlReplicasLagResult mysql.ReplicationLagResult
TotalRowsCopied int64
TotalDMLEventsApplied int64
@@ -203,6 +213,7 @@ type MigrationContext struct {
OriginalTableColumns *sql.ColumnList
OriginalTableVirtualColumns *sql.ColumnList
OriginalTableUniqueKeys [](*sql.UniqueKey)
+ OriginalTableAutoIncrement uint64
GhostTableColumns *sql.ColumnList
GhostTableVirtualColumns *sql.ColumnList
GhostTableUniqueKeys [](*sql.UniqueKey)
@@ -263,6 +274,7 @@ func NewMigrationContext() *MigrationContext {
MaxLagMillisecondsThrottleThreshold: 1500,
CutOverLockTimeoutSeconds: 3,
DMLBatchSize: 10,
+ etaNanoseonds: ETAUnknown,
maxLoad: NewLoadMap(),
criticalLoad: NewLoadMap(),
throttleMutex: &sync.Mutex{},
@@ -270,6 +282,7 @@ func NewMigrationContext() *MigrationContext {
throttleControlReplicaKeys: mysql.NewInstanceKeyMap(),
configMutex: &sync.Mutex{},
pointOfInterestTimeMutex: &sync.Mutex{},
+ lastHeartbeatOnChangelogMutex: &sync.Mutex{},
ColumnRenameMap: make(map[string]string),
PanicAbort: make(chan error),
Log: NewDefaultLogger(),
@@ -418,6 +431,36 @@ func (this *MigrationContext) IsTransactionalTable() bool {
return false
}
+// SetCountTableRowsCancelFunc sets the cancel function for the CountTableRows query context
+func (this *MigrationContext) SetCountTableRowsCancelFunc(f func()) {
+ this.countMutex.Lock()
+ defer this.countMutex.Unlock()
+
+ this.countTableRowsCancelFunc = f
+}
+
+// IsCountingTableRows returns true if the migration has a table count query running
+func (this *MigrationContext) IsCountingTableRows() bool {
+ this.countMutex.Lock()
+ defer this.countMutex.Unlock()
+
+ return this.countTableRowsCancelFunc != nil
+}
+
+// CancelTableRowsCount cancels the CountTableRows query context. It is safe to
+// call function even when IsCountingTableRows is false.
+func (this *MigrationContext) CancelTableRowsCount() {
+ this.countMutex.Lock()
+ defer this.countMutex.Unlock()
+
+ if this.countTableRowsCancelFunc == nil {
+ return
+ }
+
+ this.countTableRowsCancelFunc()
+ this.countTableRowsCancelFunc = nil
+}
+
// ElapsedTime returns time since very beginning of the process
func (this *MigrationContext) ElapsedTime() time.Duration {
return time.Since(this.StartTime)
@@ -453,6 +496,10 @@ func (this *MigrationContext) MarkRowCopyEndTime() {
this.RowCopyEndTime = time.Now()
}
+func (this *MigrationContext) TimeSinceLastHeartbeatOnChangelog() time.Duration {
+ return time.Since(this.GetLastHeartbeatOnChangelogTime())
+}
+
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
}
@@ -465,6 +512,22 @@ func (this *MigrationContext) SetProgressPct(progressPct float64) {
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
}
+func (this *MigrationContext) GetETADuration() time.Duration {
+ return time.Duration(atomic.LoadInt64(&this.etaNanoseonds))
+}
+
+func (this *MigrationContext) SetETADuration(etaDuration time.Duration) {
+ atomic.StoreInt64(&this.etaNanoseonds, etaDuration.Nanoseconds())
+}
+
+func (this *MigrationContext) GetETASeconds() int64 {
+ nano := atomic.LoadInt64(&this.etaNanoseonds)
+ if nano < 0 {
+ return ETAUnknown
+ }
+ return nano / int64(time.Second)
+}
+
// math.Float64bits([f=0..100])
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
@@ -492,6 +555,20 @@ func (this *MigrationContext) TimeSincePointOfInterest() time.Duration {
return time.Since(this.pointOfInterestTime)
}
+func (this *MigrationContext) SetLastHeartbeatOnChangelogTime(t time.Time) {
+ this.lastHeartbeatOnChangelogMutex.Lock()
+ defer this.lastHeartbeatOnChangelogMutex.Unlock()
+
+ this.lastHeartbeatOnChangelogTime = t
+}
+
+func (this *MigrationContext) GetLastHeartbeatOnChangelogTime() time.Time {
+ this.lastHeartbeatOnChangelogMutex.Lock()
+ defer this.lastHeartbeatOnChangelogMutex.Unlock()
+
+ return this.lastHeartbeatOnChangelogTime
+}
+
func (this *MigrationContext) SetHeartbeatIntervalMilliseconds(heartbeatIntervalMilliseconds int64) {
if heartbeatIntervalMilliseconds < 100 {
heartbeatIntervalMilliseconds = 100
@@ -510,8 +587,8 @@ func (this *MigrationContext) SetMaxLagMillisecondsThrottleThreshold(maxLagMilli
}
func (this *MigrationContext) SetChunkSize(chunkSize int64) {
- if chunkSize < 100 {
- chunkSize = 100
+ if chunkSize < 10 {
+ chunkSize = 10
}
if chunkSize > 100000 {
chunkSize = 100000
@@ -765,10 +842,39 @@ func (this *MigrationContext) ReadConfigFile() error {
if this.ConfigFile == "" {
return nil
}
- gcfg.RelaxedParserMode = true
- gcfgscanner.RelaxedScannerMode = true
- if err := gcfg.ReadFileInto(&this.config, this.ConfigFile); err != nil {
- return fmt.Errorf("Error reading config file %s. Details: %s", this.ConfigFile, err.Error())
+ cfg, err := ini.Load(this.ConfigFile)
+ if err != nil {
+ return err
+ }
+
+ if cfg.Section("client").HasKey("user") {
+ this.config.Client.User = cfg.Section("client").Key("user").String()
+ }
+
+ if cfg.Section("client").HasKey("password") {
+ this.config.Client.Password = cfg.Section("client").Key("password").String()
+ }
+
+ if cfg.Section("osc").HasKey("chunk_size") {
+ this.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64()
+ if err != nil {
+ return fmt.Errorf("Unable to read osc chunk size: %s", err.Error())
+ }
+ }
+
+ if cfg.Section("osc").HasKey("max_load") {
+ this.config.Osc.Max_Load = cfg.Section("osc").Key("max_load").String()
+ }
+
+ if cfg.Section("osc").HasKey("replication_lag_query") {
+ this.config.Osc.Replication_Lag_Query = cfg.Section("osc").Key("replication_lag_query").String()
+ }
+
+ if cfg.Section("osc").HasKey("max_lag_millis") {
+ this.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64()
+ if err != nil {
+ return fmt.Errorf("Unable to read max lag millis: %s", err.Error())
+ }
}
// We accept user & password in the form "${SOME_ENV_VARIABLE}" in which case we pull
diff --git a/go/base/context_test.go b/go/base/context_test.go
index 8a9c6a5..de208ba 100644
--- a/go/base/context_test.go
+++ b/go/base/context_test.go
@@ -1,16 +1,18 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package base
import (
+ "io/ioutil"
+ "os"
"testing"
"time"
- "github.com/outbrain/golib/log"
- test "github.com/outbrain/golib/tests"
+ "github.com/openark/golib/log"
+ test "github.com/openark/golib/tests"
)
func init() {
@@ -56,3 +58,65 @@ func TestGetTableNames(t *testing.T) {
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_tmp_ghc")
}
}
+
+func TestReadConfigFile(t *testing.T) {
+ {
+ context := NewMigrationContext()
+ context.ConfigFile = "/does/not/exist"
+ if err := context.ReadConfigFile(); err == nil {
+ t.Fatal("Expected .ReadConfigFile() to return an error, got nil")
+ }
+ }
+ {
+ f, err := ioutil.TempFile("", t.Name())
+ if err != nil {
+ t.Fatalf("Failed to create tmp file: %v", err)
+ }
+ defer os.Remove(f.Name())
+
+ f.Write([]byte("[client]"))
+ context := NewMigrationContext()
+ context.ConfigFile = f.Name()
+ if err := context.ReadConfigFile(); err != nil {
+ t.Fatalf(".ReadConfigFile() failed: %v", err)
+ }
+ }
+ {
+ f, err := ioutil.TempFile("", t.Name())
+ if err != nil {
+ t.Fatalf("Failed to create tmp file: %v", err)
+ }
+ defer os.Remove(f.Name())
+
+ f.Write([]byte("[client]\nuser=test\npassword=123456"))
+ context := NewMigrationContext()
+ context.ConfigFile = f.Name()
+ if err := context.ReadConfigFile(); err != nil {
+ t.Fatalf(".ReadConfigFile() failed: %v", err)
+ }
+
+ if context.config.Client.User != "test" {
+ t.Fatalf("Expected client user %q, got %q", "test", context.config.Client.User)
+ } else if context.config.Client.Password != "123456" {
+ t.Fatalf("Expected client password %q, got %q", "123456", context.config.Client.Password)
+ }
+ }
+ {
+ f, err := ioutil.TempFile("", t.Name())
+ if err != nil {
+ t.Fatalf("Failed to create tmp file: %v", err)
+ }
+ defer os.Remove(f.Name())
+
+ f.Write([]byte("[osc]\nmax_load=10"))
+ context := NewMigrationContext()
+ context.ConfigFile = f.Name()
+ if err := context.ReadConfigFile(); err != nil {
+ t.Fatalf(".ReadConfigFile() failed: %v", err)
+ }
+
+ if context.config.Osc.Max_Load != "10" {
+ t.Fatalf("Expected osc 'max_load' %q, got %q", "10", context.config.Osc.Max_Load)
+ }
+ }
+}
diff --git a/go/base/default_logger.go b/go/base/default_logger.go
index be6b1f2..59563ff 100644
--- a/go/base/default_logger.go
+++ b/go/base/default_logger.go
@@ -1,7 +1,12 @@
+/*
+ Copyright 2022 GitHub Inc.
+ See https://github.com/github/gh-ost/blob/master/LICENSE
+*/
+
package base
import (
- "github.com/outbrain/golib/log"
+ "github.com/openark/golib/log"
)
type simpleLogger struct{}
@@ -12,22 +17,18 @@ func NewDefaultLogger() *simpleLogger {
func (*simpleLogger) Debug(args ...interface{}) {
log.Debug(args[0].(string), args[1:])
- return
}
func (*simpleLogger) Debugf(format string, args ...interface{}) {
log.Debugf(format, args...)
- return
}
func (*simpleLogger) Info(args ...interface{}) {
log.Info(args[0].(string), args[1:])
- return
}
func (*simpleLogger) Infof(format string, args ...interface{}) {
log.Infof(format, args...)
- return
}
func (*simpleLogger) Warning(args ...interface{}) error {
@@ -64,10 +65,8 @@ func (*simpleLogger) Fatale(err error) error {
func (*simpleLogger) SetLevel(level log.LogLevel) {
log.SetLevel(level)
- return
}
func (*simpleLogger) SetPrintStackTrace(printStackTraceFlag bool) {
log.SetPrintStackTrace(printStackTraceFlag)
- return
}
diff --git a/go/base/load_map_test.go b/go/base/load_map_test.go
index 3a58e78..fe5b2fa 100644
--- a/go/base/load_map_test.go
+++ b/go/base/load_map_test.go
@@ -8,8 +8,8 @@ package base
import (
"testing"
- "github.com/outbrain/golib/log"
- test "github.com/outbrain/golib/tests"
+ "github.com/openark/golib/log"
+ test "github.com/openark/golib/tests"
)
func init() {
diff --git a/go/base/utils.go b/go/base/utils.go
index 1476a21..e3950f2 100644
--- a/go/base/utils.go
+++ b/go/base/utils.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -13,6 +13,7 @@ import (
"time"
gosql "database/sql"
+
"github.com/github/gh-ost/go/mysql"
)
@@ -24,9 +25,7 @@ func PrettifyDurationOutput(d time.Duration) string {
if d < time.Second {
return "0s"
}
- result := fmt.Sprintf("%s", d)
- result = prettifyDurationRegexp.ReplaceAllString(result, "")
- return result
+ return prettifyDurationRegexp.ReplaceAllString(d.String(), "")
}
func FileExists(fileName string) bool {
@@ -62,7 +61,7 @@ func StringContainsAll(s string, substrings ...string) bool {
return nonEmptyStringsFound
}
-func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext) (string, error) {
+func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext, name string) (string, error) {
versionQuery := `select @@global.version`
var port, extraPort int
var version string
@@ -70,12 +69,13 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
return "", err
}
extraPortQuery := `select @@global.extra_port`
- if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil {
+ if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil { // nolint:staticcheck
// swallow this error. not all servers support extra_port
}
// AliyunRDS set users port to "NULL", replace it by gh-ost param
// GCP set users port to "NULL", replace it by gh-ost param
- if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform {
+ // Azure MySQL set users port to a different value by design, replace it by gh-ost para
+ if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform || migrationContext.AzureMySQL {
port = connectionConfig.Key.Port
} else {
portQuery := `select @@global.port`
@@ -85,7 +85,7 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
}
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
- migrationContext.Log.Infof("connection validated on %+v", connectionConfig.Key)
+ migrationContext.Log.Infof("%s connection validated on %+v", name, connectionConfig.Key)
return version, nil
} else if extraPort == 0 {
return "", fmt.Errorf("Unexpected database port reported: %+v", port)
diff --git a/go/base/utils_test.go b/go/base/utils_test.go
index d11cc21..da98aec 100644
--- a/go/base/utils_test.go
+++ b/go/base/utils_test.go
@@ -8,8 +8,8 @@ package base
import (
"testing"
- "github.com/outbrain/golib/log"
- test "github.com/outbrain/golib/tests"
+ "github.com/openark/golib/log"
+ test "github.com/openark/golib/tests"
)
func init() {
diff --git a/go/binlog/gomysql_reader.go b/go/binlog/gomysql_reader.go
index bc80cb5..6b7d06c 100644
--- a/go/binlog/gomysql_reader.go
+++ b/go/binlog/gomysql_reader.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -13,8 +13,8 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
- gomysql "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/replication"
+ gomysql "github.com/go-mysql-org/go-mysql/mysql"
+ "github.com/go-mysql-org/go-mysql/replication"
"golang.org/x/net/context"
)
@@ -28,31 +28,24 @@ type GoMySQLReader struct {
LastAppliedRowsEventHint mysql.BinlogCoordinates
}
-func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *GoMySQLReader, err error) {
- binlogReader = &GoMySQLReader{
+func NewGoMySQLReader(migrationContext *base.MigrationContext) *GoMySQLReader {
+ connectionConfig := migrationContext.InspectorConnectionConfig
+ return &GoMySQLReader{
migrationContext: migrationContext,
- connectionConfig: migrationContext.InspectorConnectionConfig,
+ connectionConfig: connectionConfig,
currentCoordinates: mysql.BinlogCoordinates{},
currentCoordinatesMutex: &sync.Mutex{},
- binlogSyncer: nil,
- binlogStreamer: nil,
+ binlogSyncer: replication.NewBinlogSyncer(replication.BinlogSyncerConfig{
+ ServerID: uint32(migrationContext.ReplicaServerId),
+ Flavor: gomysql.MySQLFlavor,
+ Host: connectionConfig.Key.Hostname,
+ Port: uint16(connectionConfig.Key.Port),
+ User: connectionConfig.User,
+ Password: connectionConfig.Password,
+ TLSConfig: connectionConfig.TLSConfig(),
+ UseDecimal: true,
+ }),
}
-
- serverId := uint32(migrationContext.ReplicaServerId)
-
- binlogSyncerConfig := replication.BinlogSyncerConfig{
- ServerID: serverId,
- Flavor: "mysql",
- Host: binlogReader.connectionConfig.Key.Hostname,
- Port: uint16(binlogReader.connectionConfig.Key.Port),
- User: binlogReader.connectionConfig.User,
- Password: binlogReader.connectionConfig.Password,
- TLSConfig: binlogReader.connectionConfig.TLSConfig(),
- UseDecimal: true,
- }
- binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
-
- return binlogReader, err
}
// ConnectBinlogStreamer
@@ -64,7 +57,10 @@ func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordin
this.currentCoordinates = coordinates
this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
// Start sync with specified binlog file and position
- this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
+ this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{
+ Name: this.currentCoordinates.LogFile,
+ Pos: uint32(this.currentCoordinates.LogPos),
+ })
return err
}
@@ -142,15 +138,17 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
defer this.currentCoordinatesMutex.Unlock()
this.currentCoordinates.LogPos = int64(ev.Header.LogPos)
}()
- if rotateEvent, ok := ev.Event.(*replication.RotateEvent); ok {
+
+ switch binlogEvent := ev.Event.(type) {
+ case *replication.RotateEvent:
func() {
this.currentCoordinatesMutex.Lock()
defer this.currentCoordinatesMutex.Unlock()
- this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
+ this.currentCoordinates.LogFile = string(binlogEvent.NextLogName)
}()
- this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), rotateEvent.NextLogName)
- } else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
- if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
+ this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), binlogEvent.NextLogName)
+ case *replication.RowsEvent:
+ if err := this.handleRowsEvent(ev, binlogEvent, entriesChannel); err != nil {
return err
}
}
diff --git a/go/cmd/gh-ost/main.go b/go/cmd/gh-ost/main.go
index f194508..b99e70b 100644
--- a/go/cmd/gh-ost/main.go
+++ b/go/cmd/gh-ost/main.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -8,6 +8,7 @@ package main
import (
"flag"
"fmt"
+ "net/url"
"os"
"os/signal"
"syscall"
@@ -16,9 +17,9 @@ import (
"github.com/github/gh-ost/go/logic"
"github.com/github/gh-ost/go/sql"
_ "github.com/go-sql-driver/mysql"
- "github.com/outbrain/golib/log"
+ "github.com/openark/golib/log"
- "golang.org/x/crypto/ssh/terminal"
+ "golang.org/x/term"
)
var AppVersion string
@@ -77,8 +78,10 @@ func main() {
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
+ flag.BoolVar(&migrationContext.AllowZeroInDate, "allow-zero-in-date", false, "explicitly tell gh-ost binlog applier to ignore NO_ZERO_IN_DATE,NO_ZERO_DATE in sql_mode")
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
+ flag.BoolVar(&migrationContext.AzureMySQL, "azure", false, "set to 'true' when you execute on Azure Database on MySQL.")
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
@@ -97,7 +100,7 @@ func main() {
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').")
exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.")
- chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
+ chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 10-100,000)")
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)")
@@ -108,6 +111,8 @@ func main() {
throttleControlReplicas := flag.String("throttle-control-replicas", "", "List of replicas on which to check for lag; comma delimited. Example: myhost1.com:3306,myhost2.com,myhost3.com:3307")
throttleQuery := flag.String("throttle-query", "", "when given, issued (every second) to check if operation should throttle. Expecting to return zero for no-throttle, >0 for throttle. Query is issued on the migrated server. Make sure this query is lightweight")
throttleHTTP := flag.String("throttle-http", "", "when given, gh-ost checks given URL via HEAD request; any response code other than 200 (OK) causes throttling; make sure it has low latency response")
+ flag.Int64Var(&migrationContext.ThrottleHTTPIntervalMillis, "throttle-http-interval-millis", 100, "Number of milliseconds to wait before triggering another HTTP throttle check")
+ flag.Int64Var(&migrationContext.ThrottleHTTPTimeoutMillis, "throttle-http-timeout-millis", 1000, "Number of milliseconds to use as an HTTP throttle check timeout")
ignoreHTTPErrors := flag.Bool("ignore-http-errors", false, "ignore HTTP connection errors during throttle check")
heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 100, "how frequently would gh-ost inject a heartbeat value")
flag.StringVar(&migrationContext.ThrottleFlagFile, "throttle-flag-file", "", "operation pauses when this file exists; hint: use a file that is specific to the table being altered")
@@ -123,13 +128,14 @@ func main() {
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience")
flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience")
+ flag.Int64Var(&migrationContext.HooksStatusIntervalSec, "hooks-status-interval", 60, "how many seconds to wait between calling onStatus hook")
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
maxLoad := flag.String("max-load", "", "Comma delimited status-name=threshold. e.g: 'Threads_running=100,Threads_connected=500'. When status exceeds threshold, app throttles writes")
criticalLoad := flag.String("critical-load", "", "Comma delimited status-name=threshold, same format as --max-load. When status exceeds threshold, app panics and quits")
flag.Int64Var(&migrationContext.CriticalLoadIntervalMilliseconds, "critical-load-interval-millis", 0, "When 0, migration immediately bails out upon meeting critical-load. When non-zero, a second check is done after given interval, and migration only bails out if 2nd check still meets critical load")
- flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When nonzero, critical-load does not panic and bail out; instead, gh-ost goes into hibernate for the specified duration. It will not read/write anything to from/to any server")
+ flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When non-zero, critical-load does not panic and bail out; instead, gh-ost goes into hibernation for the specified duration. It will not read/write anything from/to any server")
quiet := flag.Bool("quiet", false, "quiet")
verbose := flag.Bool("verbose", false, "verbose")
debug := flag.Bool("debug", false, "debug mode (very verbose)")
@@ -175,7 +181,7 @@ func main() {
}
if migrationContext.AlterStatement == "" {
- log.Fatalf("--alter must be provided and statement must not be empty")
+ log.Fatal("--alter must be provided and statement must not be empty")
}
parser := sql.NewParserFromAlterStatement(migrationContext.AlterStatement)
migrationContext.AlterStatementOptions = parser.GetAlterStatementOptions()
@@ -184,55 +190,60 @@ func main() {
if parser.HasExplicitSchema() {
migrationContext.DatabaseName = parser.GetExplicitSchema()
} else {
- log.Fatalf("--database must be provided and database name must not be empty, or --alter must specify database name")
+ log.Fatal("--database must be provided and database name must not be empty, or --alter must specify database name")
}
}
+
+ if err := flag.Set("database", url.QueryEscape(migrationContext.DatabaseName)); err != nil {
+ migrationContext.Log.Fatale(err)
+ }
+
if migrationContext.OriginalTableName == "" {
if parser.HasExplicitTable() {
migrationContext.OriginalTableName = parser.GetExplicitTable()
} else {
- log.Fatalf("--table must be provided and table name must not be empty, or --alter must specify table name")
+ log.Fatal("--table must be provided and table name must not be empty, or --alter must specify table name")
}
}
migrationContext.Noop = !(*executeFlag)
if migrationContext.AllowedRunningOnMaster && migrationContext.TestOnReplica {
- migrationContext.Log.Fatalf("--allow-on-master and --test-on-replica are mutually exclusive")
+ migrationContext.Log.Fatal("--allow-on-master and --test-on-replica are mutually exclusive")
}
if migrationContext.AllowedRunningOnMaster && migrationContext.MigrateOnReplica {
- migrationContext.Log.Fatalf("--allow-on-master and --migrate-on-replica are mutually exclusive")
+ migrationContext.Log.Fatal("--allow-on-master and --migrate-on-replica are mutually exclusive")
}
if migrationContext.MigrateOnReplica && migrationContext.TestOnReplica {
- migrationContext.Log.Fatalf("--migrate-on-replica and --test-on-replica are mutually exclusive")
+ migrationContext.Log.Fatal("--migrate-on-replica and --test-on-replica are mutually exclusive")
}
if migrationContext.SwitchToRowBinlogFormat && migrationContext.AssumeRBR {
- migrationContext.Log.Fatalf("--switch-to-rbr and --assume-rbr are mutually exclusive")
+ migrationContext.Log.Fatal("--switch-to-rbr and --assume-rbr are mutually exclusive")
}
if migrationContext.TestOnReplicaSkipReplicaStop {
if !migrationContext.TestOnReplica {
- migrationContext.Log.Fatalf("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
+ migrationContext.Log.Fatal("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
}
migrationContext.Log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
}
if migrationContext.CliMasterUser != "" && migrationContext.AssumeMasterHostname == "" {
- migrationContext.Log.Fatalf("--master-user requires --assume-master-host")
+ migrationContext.Log.Fatal("--master-user requires --assume-master-host")
}
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
- migrationContext.Log.Fatalf("--master-password requires --assume-master-host")
+ migrationContext.Log.Fatal("--master-password requires --assume-master-host")
}
if migrationContext.TLSCACertificate != "" && !migrationContext.UseTLS {
- migrationContext.Log.Fatalf("--ssl-ca requires --ssl")
+ migrationContext.Log.Fatal("--ssl-ca requires --ssl")
}
if migrationContext.TLSCertificate != "" && !migrationContext.UseTLS {
- migrationContext.Log.Fatalf("--ssl-cert requires --ssl")
+ migrationContext.Log.Fatal("--ssl-cert requires --ssl")
}
if migrationContext.TLSKey != "" && !migrationContext.UseTLS {
- migrationContext.Log.Fatalf("--ssl-key requires --ssl")
+ migrationContext.Log.Fatal("--ssl-key requires --ssl")
}
if migrationContext.TLSAllowInsecure && !migrationContext.UseTLS {
- migrationContext.Log.Fatalf("--ssl-allow-insecure requires --ssl")
+ migrationContext.Log.Fatal("--ssl-allow-insecure requires --ssl")
}
if *replicationLagQuery != "" {
- migrationContext.Log.Warningf("--replication-lag-query is deprecated")
+ migrationContext.Log.Warning("--replication-lag-query is deprecated")
}
switch *cutOver {
@@ -260,7 +271,7 @@ func main() {
}
if *askPass {
fmt.Println("Password:")
- bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
+ bytePassword, err := term.ReadPassword(syscall.Stdin)
if err != nil {
migrationContext.Log.Fatale(err)
}
@@ -289,11 +300,10 @@ func main() {
log.Infof("starting gh-ost %+v", AppVersion)
acceptSignals(migrationContext)
- migrator := logic.NewMigrator(migrationContext)
- err := migrator.Migrate()
- if err != nil {
+ migrator := logic.NewMigrator(migrationContext, AppVersion)
+ if err := migrator.Migrate(); err != nil {
migrator.ExecOnFailureHook()
migrationContext.Log.Fatale(err)
}
- fmt.Fprintf(os.Stdout, "# Done\n")
+ fmt.Fprintln(os.Stdout, "# Done")
}
diff --git a/go/logic/applier.go b/go/logic/applier.go
index 8aa1c9c..d81f075 100644
--- a/go/logic/applier.go
+++ b/go/logic/applier.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -8,6 +8,7 @@ package logic
import (
gosql "database/sql"
"fmt"
+ "sync"
"sync/atomic"
"time"
@@ -16,12 +17,13 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
- "github.com/outbrain/golib/sqlutils"
- "sync"
+ "github.com/openark/golib/log"
+ "github.com/openark/golib/sqlutils"
)
const (
- atomicCutOverMagicHint = "ghost-cut-over-sentry"
+ GhostChangelogTableComment = "gh-ost changelog"
+ atomicCutOverMagicHint = "ghost-cut-over-sentry"
)
type dmlBuildResult struct {
@@ -57,6 +59,7 @@ type Applier struct {
singletonDB *gosql.DB
migrationContext *base.MigrationContext
finishedMigrating int64
+ name string
}
func NewApplier(migrationContext *base.MigrationContext) *Applier {
@@ -64,11 +67,11 @@ func NewApplier(migrationContext *base.MigrationContext) *Applier {
connectionConfig: migrationContext.ApplierConnectionConfig,
migrationContext: migrationContext,
finishedMigrating: 0,
+ name: "applier",
}
}
func (this *Applier) InitDBConnections() (err error) {
-
applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
return err
@@ -78,18 +81,18 @@ func (this *Applier) InitDBConnections() (err error) {
return err
}
this.singletonDB.SetMaxOpenConns(1)
- version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
+ version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
if err != nil {
return err
}
- if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext); err != nil {
+ if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext, this.name); err != nil {
return err
}
this.migrationContext.ApplierMySQLVersion = version
if err := this.validateAndReadTimeZone(); err != nil {
return err
}
- if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
+ if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
@@ -114,6 +117,24 @@ func (this *Applier) validateAndReadTimeZone() error {
return nil
}
+// generateSqlModeQuery return a `sql_mode = ...` query, to be wrapped with a `set session` or `set global`,
+// based on gh-ost configuration:
+// - User may skip strict mode
+// - User may allow zero dats or zero in dates
+func (this *Applier) generateSqlModeQuery() string {
+ sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
+ if !this.migrationContext.SkipStrictMode {
+ sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
+ }
+ sqlModeQuery := fmt.Sprintf("CONCAT(@@session.sql_mode, ',%s')", sqlModeAddendum)
+ if this.migrationContext.AllowZeroInDate {
+ sqlModeQuery = fmt.Sprintf("REPLACE(REPLACE(%s, 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')", sqlModeQuery)
+ }
+ sqlModeQuery = fmt.Sprintf("sql_mode = %s", sqlModeQuery)
+
+ return sqlModeQuery
+}
+
// readTableColumns reads table columns on applier
func (this *Applier) readTableColumns() (err error) {
this.migrationContext.Log.Infof("Examining table structure on applier")
@@ -179,11 +200,33 @@ func (this *Applier) CreateGhostTable() error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
- if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
- return err
- }
- this.migrationContext.Log.Infof("Ghost table created")
- return nil
+
+ err := func() error {
+ tx, err := this.db.Begin()
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
+ sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
+
+ if _, err := tx.Exec(sessionQuery); err != nil {
+ return err
+ }
+ if _, err := tx.Exec(query); err != nil {
+ return err
+ }
+ this.migrationContext.Log.Infof("Ghost table created")
+ if err := tx.Commit(); err != nil {
+ // Neither SET SESSION nor ALTER are really transactional, so strictly speaking
+ // there's no need to commit; but let's do this the legit way anyway.
+ return err
+ }
+ return nil
+ }()
+
+ return err
}
// AlterGhost applies `alter` statement on ghost table
@@ -198,10 +241,51 @@ func (this *Applier) AlterGhost() error {
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
this.migrationContext.Log.Debugf("ALTER statement: %s", query)
+
+ err := func() error {
+ tx, err := this.db.Begin()
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
+ sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
+
+ if _, err := tx.Exec(sessionQuery); err != nil {
+ return err
+ }
+ if _, err := tx.Exec(query); err != nil {
+ return err
+ }
+ this.migrationContext.Log.Infof("Ghost table altered")
+ if err := tx.Commit(); err != nil {
+ // Neither SET SESSION nor ALTER are really transactional, so strictly speaking
+ // there's no need to commit; but let's do this the legit way anyway.
+ return err
+ }
+ return nil
+ }()
+
+ return err
+}
+
+// AlterGhost applies `alter` statement on ghost table
+func (this *Applier) AlterGhostAutoIncrement() error {
+ query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s AUTO_INCREMENT=%d`,
+ sql.EscapeName(this.migrationContext.DatabaseName),
+ sql.EscapeName(this.migrationContext.GetGhostTableName()),
+ this.migrationContext.OriginalTableAutoIncrement,
+ )
+ this.migrationContext.Log.Infof("Altering ghost table AUTO_INCREMENT value %s.%s",
+ sql.EscapeName(this.migrationContext.DatabaseName),
+ sql.EscapeName(this.migrationContext.GetGhostTableName()),
+ )
+ this.migrationContext.Log.Debugf("AUTO_INCREMENT ALTER statement: %s", query)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
- this.migrationContext.Log.Infof("Ghost table altered")
+ this.migrationContext.Log.Infof("Ghost table AUTO_INCREMENT altered")
return nil
}
@@ -211,16 +295,16 @@ func (this *Applier) CreateChangelogTable() error {
return err
}
query := fmt.Sprintf(`create /* gh-ost */ table %s.%s (
- id bigint auto_increment,
+ id bigint unsigned auto_increment,
last_update timestamp not null DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
hint varchar(64) charset ascii not null,
value varchar(4096) charset ascii not null,
primary key(id),
unique key hint_uidx(hint)
- ) auto_increment=256
- `,
+ ) auto_increment=256 comment='%s'`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
+ GhostChangelogTableComment,
)
this.migrationContext.Log.Infof("Creating changelog table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
@@ -322,8 +406,9 @@ func (this *Applier) InitiateHeartbeat() {
}
injectHeartbeat()
- heartbeatTick := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
- for range heartbeatTick {
+ ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
+ defer ticker.Stop()
+ for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@@ -360,10 +445,13 @@ func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
if err != nil {
return err
}
+
rows, err := this.db.Query(query)
if err != nil {
return err
}
+ defer rows.Close()
+
for rows.Next() {
this.migrationContext.MigrationRangeMinValues = sql.NewColumnValues(uniqueKey.Len())
if err = rows.Scan(this.migrationContext.MigrationRangeMinValues.ValuesPointers...); err != nil {
@@ -372,8 +460,7 @@ func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
}
this.migrationContext.Log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues)
- err = rows.Err()
- return err
+ return rows.Err()
}
// ReadMigrationMaxValues returns the maximum values to be iterated on rowcopy
@@ -383,10 +470,13 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
if err != nil {
return err
}
+
rows, err := this.db.Query(query)
if err != nil {
return err
}
+ defer rows.Close()
+
for rows.Next() {
this.migrationContext.MigrationRangeMaxValues = sql.NewColumnValues(uniqueKey.Len())
if err = rows.Scan(this.migrationContext.MigrationRangeMaxValues.ValuesPointers...); err != nil {
@@ -395,12 +485,31 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
}
this.migrationContext.Log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues)
- err = rows.Err()
- return err
+ return rows.Err()
}
-// ReadMigrationRangeValues reads min/max values that will be used for rowcopy
+// ReadMigrationRangeValues reads min/max values that will be used for rowcopy.
+// Before read min/max, write a changelog state into the ghc table to avoid lost data in mysql two-phase commit.
+/*
+Detail description of the lost data in mysql two-phase commit issue by @Fanduzi:
+ When using semi-sync and setting rpl_semi_sync_master_wait_point=AFTER_SYNC,
+ if an INSERT statement is being committed but blocks due to an unmet ack count,
+ the data inserted by the transaction is not visible to ReadMigrationRangeValues,
+ so the copy of the existing data in the table does not include the new row inserted by the transaction.
+ However, the binlog event for the transaction is already written to the binlog,
+ so the addDMLEventsListener only captures the binlog event after the transaction,
+ and thus the transaction's binlog event is not captured, resulting in data loss.
+
+ If write a changelog into ghc table before ReadMigrationRangeValues, and the transaction commit blocks
+ because the ack is not met, then the changelog will not be able to write, so the ReadMigrationRangeValues
+ will not be run. When the changelog writes successfully, the ReadMigrationRangeValues will read the
+ newly inserted data, thus Avoiding data loss due to the above problem.
+*/
func (this *Applier) ReadMigrationRangeValues() error {
+ if _, err := this.WriteChangelogState(string(ReadMigrationRangeValues)); err != nil {
+ return err
+ }
+
if err := this.ReadMigrationMinValues(this.migrationContext.UniqueKey); err != nil {
return err
}
@@ -437,10 +546,13 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo
if err != nil {
return hasFurtherRange, err
}
+
rows, err := this.db.Query(query, explodedArgs...)
if err != nil {
return hasFurtherRange, err
}
+ defer rows.Close()
+
iterationRangeMaxValues := sql.NewColumnValues(this.migrationContext.UniqueKey.Len())
for rows.Next() {
if err = rows.Scan(iterationRangeMaxValues.ValuesPointers...); err != nil {
@@ -489,12 +601,9 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
return nil, err
}
defer tx.Rollback()
+
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
- sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
- if !this.migrationContext.SkipStrictMode {
- sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
- }
- sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
+ sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return nil, err
@@ -992,7 +1101,6 @@ func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (result
// ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table
func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error {
-
var totalDelta int64
err := func() error {
@@ -1007,12 +1115,7 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
}
sessionQuery := "SET SESSION time_zone = '+00:00'"
-
- sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
- if !this.migrationContext.SkipStrictMode {
- sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
- }
- sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
+ sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return rollback(err)
@@ -1022,11 +1125,20 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
if buildResult.err != nil {
return rollback(buildResult.err)
}
- if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil {
+ result, err := tx.Exec(buildResult.query, buildResult.args...)
+ if err != nil {
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args)
return rollback(err)
}
- totalDelta += buildResult.rowsDelta
+
+ rowsAffected, err := result.RowsAffected()
+ if err != nil {
+ log.Warningf("error getting rows affected from DML event query: %s. i'm going to assume that the DML affected a single row, but this may result in inaccurate statistics", err)
+ rowsAffected = 1
+ }
+ // each DML is either a single insert (delta +1), update (delta +0) or delete (delta -1).
+ // multiplying by the rows actually affected (either 0 or 1) will give an accurate row delta for this DML event
+ totalDelta += buildResult.rowsDelta * rowsAffected
}
}
if err := tx.Commit(); err != nil {
diff --git a/go/logic/hooks.go b/go/logic/hooks.go
index fa5011e..0ff296d 100644
--- a/go/logic/hooks.go
+++ b/go/logic/hooks.go
@@ -1,6 +1,5 @@
/*
-/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -14,7 +13,7 @@ import (
"sync/atomic"
"github.com/github/gh-ost/go/base"
- "github.com/outbrain/golib/log"
+ "github.com/openark/golib/log"
)
const (
@@ -64,15 +63,15 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
+ env = append(env, fmt.Sprintf("GH_OST_HEARTBEAT_LAG=%f", this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds()))
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
+ env = append(env, fmt.Sprintf("GH_OST_ETA_SECONDS=%d", this.migrationContext.GetETASeconds()))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
- for _, variable := range extraVariables {
- env = append(env, variable)
- }
+ env = append(env, extraVariables...)
return env
}
diff --git a/go/logic/inspect.go b/go/logic/inspect.go
index bc10830..a562102 100644
--- a/go/logic/inspect.go
+++ b/go/logic/inspect.go
@@ -1,11 +1,12 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
+ "context"
gosql "database/sql"
"fmt"
"reflect"
@@ -17,7 +18,7 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
- "github.com/outbrain/golib/sqlutils"
+ "github.com/openark/golib/sqlutils"
)
const startSlavePostWaitMilliseconds = 500 * time.Millisecond
@@ -29,12 +30,14 @@ type Inspector struct {
db *gosql.DB
informationSchemaDb *gosql.DB
migrationContext *base.MigrationContext
+ name string
}
func NewInspector(migrationContext *base.MigrationContext) *Inspector {
return &Inspector{
connectionConfig: migrationContext.InspectorConnectionConfig,
migrationContext: migrationContext,
+ name: "inspector",
}
}
@@ -52,7 +55,7 @@ func (this *Inspector) InitDBConnections() (err error) {
if err := this.validateConnection(); err != nil {
return err
}
- if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
+ if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
@@ -109,6 +112,10 @@ func (this *Inspector) InspectOriginalTable() (err error) {
if err != nil {
return err
}
+ this.migrationContext.OriginalTableAutoIncrement, err = this.getAutoIncrementValue(this.migrationContext.OriginalTableName)
+ if err != nil {
+ return err
+ }
return nil
}
@@ -181,9 +188,17 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
if column.Name == mappedColumn.Name && column.Type == sql.DateTimeColumnType && mappedColumn.Type == sql.TimestampColumnType {
this.migrationContext.MappedSharedColumns.SetConvertDatetimeToTimestamp(column.Name, this.migrationContext.ApplierTimeZone)
}
+ if column.Name == mappedColumn.Name && column.Type == sql.EnumColumnType && mappedColumn.Charset != "" {
+ this.migrationContext.MappedSharedColumns.SetEnumToTextConversion(column.Name)
+ this.migrationContext.MappedSharedColumns.SetEnumValues(column.Name, column.EnumValues)
+ }
}
for _, column := range this.migrationContext.UniqueKey.Columns.Columns() {
+ if this.migrationContext.GhostTableVirtualColumns.GetColumn(column.Name) != nil {
+ // this is a virtual column
+ continue
+ }
if this.migrationContext.MappedSharedColumns.HasTimezoneConversion(column.Name) {
return fmt.Errorf("No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key. Column: %s, key: %s", column.Name, this.migrationContext.UniqueKey.Name)
}
@@ -198,7 +213,7 @@ func (this *Inspector) validateConnection() error {
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
}
- version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
+ version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
this.migrationContext.InspectorMySQLVersion = version
return err
}
@@ -269,7 +284,7 @@ func (this *Inspector) validateGrants() error {
// It is entirely possible, for example, that the replication is using 'STATEMENT'
// binlog format even as the variable says 'ROW'
func (this *Inspector) restartReplication() error {
- this.migrationContext.Log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
+ this.migrationContext.Log.Infof("Restarting replication on %s to make sure binlog settings apply to replication thread", this.connectionConfig.Key.String())
masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.connectionConfig)
if masterKey == nil {
@@ -328,13 +343,13 @@ func (this *Inspector) validateBinlogs() error {
return err
}
if !hasBinaryLogs {
- return fmt.Errorf("%s:%d must have binary logs enabled", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
+ return fmt.Errorf("%s must have binary logs enabled", this.connectionConfig.Key.String())
}
if this.migrationContext.RequiresBinlogFormatChange() {
if !this.migrationContext.SwitchToRowBinlogFormat {
- return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s:%d doesn't have replicas", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
+ return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", this.connectionConfig.Key.String())
}
- query := fmt.Sprintf(`show /* gh-ost */ slave hosts`)
+ query := `show /* gh-ost */ slave hosts`
countReplicas := 0
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
countReplicas++
@@ -344,21 +359,20 @@ func (this *Inspector) validateBinlogs() error {
return err
}
if countReplicas > 0 {
- return fmt.Errorf("%s:%d has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
+ return fmt.Errorf("%s has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
}
- this.migrationContext.Log.Infof("%s:%d has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
+ this.migrationContext.Log.Infof("%s has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
}
query = `select @@global.binlog_row_image`
if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil {
- // Only as of 5.6. We wish to support 5.5 as well
- this.migrationContext.OriginalBinlogRowImage = "FULL"
+ return err
}
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
- return fmt.Errorf("%s:%d has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogRowImage)
+ return fmt.Errorf("%s has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogRowImage)
}
- this.migrationContext.Log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
+ this.migrationContext.Log.Infof("binary logs validated on %s", this.connectionConfig.Key.String())
return nil
}
@@ -371,25 +385,25 @@ func (this *Inspector) validateLogSlaveUpdates() error {
}
if logSlaveUpdates {
- this.migrationContext.Log.Infof("log_slave_updates validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
+ this.migrationContext.Log.Infof("log_slave_updates validated on %s", this.connectionConfig.Key.String())
return nil
}
if this.migrationContext.IsTungsten {
- this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
+ this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.String())
return nil
}
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
- return fmt.Errorf("%s:%d must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
+ return fmt.Errorf("%s must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.String())
}
if this.migrationContext.InspectorIsAlsoApplier() {
- this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
+ this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.String())
return nil
}
- return fmt.Errorf("%s:%d must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
+ return fmt.Errorf("%s must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.String())
}
// validateTable makes sure the table we need to operate on actually exists
@@ -520,17 +534,39 @@ func (this *Inspector) estimateTableRowsViaExplain() error {
}
// CountTableRows counts exact number of rows on the original table
-func (this *Inspector) CountTableRows() error {
+func (this *Inspector) CountTableRows(ctx context.Context) error {
atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1)
defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0)
this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
- query := fmt.Sprintf(`select /* gh-ost */ count(*) as rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
- var rowsEstimate int64
- if err := this.db.QueryRow(query).Scan(&rowsEstimate); err != nil {
+ conn, err := this.db.Conn(ctx)
+ if err != nil {
return err
}
+ defer conn.Close()
+
+ var connectionID string
+ if err := conn.QueryRowContext(ctx, `SELECT /* gh-ost */ CONNECTION_ID()`).Scan(&connectionID); err != nil {
+ return err
+ }
+
+ query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
+ var rowsEstimate int64
+ if err := conn.QueryRowContext(ctx, query).Scan(&rowsEstimate); err != nil {
+ switch err {
+ case context.Canceled, context.DeadlineExceeded:
+ this.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err())
+ return mysql.Kill(this.db, connectionID)
+ default:
+ return err
+ }
+ }
+
+ // row count query finished. nil out the cancel func, so the main migration thread
+ // doesn't bother calling it after row copy is done.
+ this.migrationContext.SetCountTableRowsCancelFunc(nil)
+
atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate)
this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate
@@ -553,6 +589,7 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
columnName := m.GetString("COLUMN_NAME")
columnType := m.GetString("COLUMN_TYPE")
+ columnOctetLength := m.GetUint("CHARACTER_OCTET_LENGTH")
for _, columnsList := range columnsLists {
column := columnsList.GetColumn(columnName)
if column == nil {
@@ -579,6 +616,11 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
}
if strings.HasPrefix(columnType, "enum") {
column.Type = sql.EnumColumnType
+ column.EnumValues = sql.ParseEnumValues(m.GetString("COLUMN_TYPE"))
+ }
+ if strings.HasPrefix(columnType, "binary") {
+ column.Type = sql.BinaryColumnType
+ column.BinaryOctetLength = columnOctetLength
}
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
column.Charset = charset
@@ -589,6 +631,24 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
return err
}
+// getAutoIncrementValue get's the original table's AUTO_INCREMENT value, if exists (0 value if not exists)
+func (this *Inspector) getAutoIncrementValue(tableName string) (autoIncrement uint64, err error) {
+ query := `
+ SELECT
+ AUTO_INCREMENT
+ FROM INFORMATION_SCHEMA.TABLES
+ WHERE
+ TABLES.TABLE_SCHEMA = ?
+ AND TABLES.TABLE_NAME = ?
+ AND AUTO_INCREMENT IS NOT NULL
+ `
+ err = sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
+ autoIncrement = m.GetUint64("AUTO_INCREMENT")
+ return nil
+ }, this.migrationContext.DatabaseName, tableName)
+ return autoIncrement, err
+}
+
// getCandidateUniqueKeys investigates a table and returns the list of unique keys
// candidate for chunking
func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) {
@@ -767,5 +827,4 @@ func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err er
func (this *Inspector) Teardown() {
this.db.Close()
this.informationSchemaDb.Close()
- return
}
diff --git a/go/logic/migrator.go b/go/logic/migrator.go
index 291a490..8dc9910 100644
--- a/go/logic/migrator.go
+++ b/go/logic/migrator.go
@@ -1,11 +1,12 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
+ "context"
"fmt"
"io"
"math"
@@ -24,8 +25,10 @@ import (
type ChangelogState string
const (
+ AllEventsUpToLockProcessed ChangelogState = "AllEventsUpToLockProcessed"
GhostTableMigrated ChangelogState = "GhostTableMigrated"
- AllEventsUpToLockProcessed = "AllEventsUpToLockProcessed"
+ Migrated ChangelogState = "Migrated"
+ ReadMigrationRangeValues ChangelogState = "ReadMigrationRangeValues"
)
func ReadChangelogState(s string) ChangelogState {
@@ -61,6 +64,7 @@ const (
// Migrator is the main schema migration flow manager.
type Migrator struct {
+ appVersion string
parser *sql.AlterTableParser
inspector *Inspector
applier *Applier
@@ -86,8 +90,9 @@ type Migrator struct {
finishedMigrating int64
}
-func NewMigrator(context *base.MigrationContext) *Migrator {
+func NewMigrator(context *base.MigrationContext, appVersion string) *Migrator {
migrator := &Migrator{
+ appVersion: appVersion,
migrationContext: context,
parser: sql.NewAlterTableParser(),
ghostTableMigrated: make(chan bool),
@@ -176,16 +181,6 @@ func (this *Migrator) retryOperationWithExponentialBackoff(operation func() erro
return err
}
-// executeAndThrottleOnError executes a given function. If it errors, it
-// throttles.
-func (this *Migrator) executeAndThrottleOnError(operation func() error) (err error) {
- if err := operation(); err != nil {
- this.throttler.throttle(nil)
- return err
- }
- return nil
-}
-
// consumeRowCopyComplete blocks on the rowCopyComplete channel once, and then
// consumes and drops any further incoming events that may be left hanging.
func (this *Migrator) consumeRowCopyComplete() {
@@ -207,16 +202,26 @@ func (this *Migrator) canStopStreaming() bool {
return atomic.LoadInt64(&this.migrationContext.CutOverCompleteFlag) != 0
}
-// onChangelogStateEvent is called when a binlog event operation on the changelog table is intercepted.
-func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
+// onChangelogEvent is called when a binlog event operation on the changelog table is intercepted.
+func (this *Migrator) onChangelogEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
// Hey, I created the changelog table, I know the type of columns it has!
- if hint := dmlEvent.NewColumnValues.StringColumn(2); hint != "state" {
+ switch hint := dmlEvent.NewColumnValues.StringColumn(2); hint {
+ case "state":
+ return this.onChangelogStateEvent(dmlEvent)
+ case "heartbeat":
+ return this.onChangelogHeartbeatEvent(dmlEvent)
+ default:
return nil
}
+}
+
+func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
changelogStateString := dmlEvent.NewColumnValues.StringColumn(3)
changelogState := ReadChangelogState(changelogStateString)
this.migrationContext.Log.Infof("Intercepted changelog state %s", changelogState)
switch changelogState {
+ case Migrated, ReadMigrationRangeValues:
+ // no-op event
case GhostTableMigrated:
{
this.ghostTableMigrated <- true
@@ -245,6 +250,18 @@ func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (er
return nil
}
+func (this *Migrator) onChangelogHeartbeatEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
+ changelogHeartbeatString := dmlEvent.NewColumnValues.StringColumn(3)
+
+ heartbeatTime, err := time.Parse(time.RFC3339Nano, changelogHeartbeatString)
+ if err != nil {
+ return this.migrationContext.Log.Errore(err)
+ } else {
+ this.migrationContext.SetLastHeartbeatOnChangelogTime(heartbeatTime)
+ return nil
+ }
+}
+
// listenOnPanicAbort aborts on abort request
func (this *Migrator) listenOnPanicAbort() {
err := <-this.migrationContext.PanicAbort
@@ -280,8 +297,8 @@ func (this *Migrator) countTableRows() (err error) {
return nil
}
- countRowsFunc := func() error {
- if err := this.inspector.CountTableRows(); err != nil {
+ countRowsFunc := func(ctx context.Context) error {
+ if err := this.inspector.CountTableRows(ctx); err != nil {
return err
}
if err := this.hooksExecutor.onRowCountComplete(); err != nil {
@@ -291,12 +308,17 @@ func (this *Migrator) countTableRows() (err error) {
}
if this.migrationContext.ConcurrentCountTableRows {
+ // store a cancel func so we can stop this query before a cut over
+ rowCountContext, rowCountCancel := context.WithCancel(context.Background())
+ this.migrationContext.SetCountTableRowsCancelFunc(rowCountCancel)
+
this.migrationContext.Log.Infof("As instructed, counting rows in the background; meanwhile I will use an estimated count, and will update it later on")
- go countRowsFunc()
+ go countRowsFunc(rowCountContext)
+
// and we ignore errors, because this turns to be a background job
return nil
}
- return countRowsFunc()
+ return countRowsFunc(context.Background())
}
func (this *Migrator) createFlagFiles() (err error) {
@@ -400,6 +422,10 @@ func (this *Migrator) Migrate() (err error) {
}
this.printStatus(ForcePrintStatusRule)
+ if this.migrationContext.IsCountingTableRows() {
+ this.migrationContext.Log.Info("stopping query for exact row count, because that can accidentally lock out the cut over")
+ this.migrationContext.CancelTableRowsCount()
+ }
if err := this.hooksExecutor.onBeforeCutOver(); err != nil {
return err
}
@@ -476,6 +502,13 @@ func (this *Migrator) cutOver() (err error) {
this.migrationContext.Log.Debugf("checking for cut-over postpone")
this.sleepWhileTrue(
func() (bool, error) {
+ heartbeatLag := this.migrationContext.TimeSinceLastHeartbeatOnChangelog()
+ maxLagMillisecondsThrottle := time.Duration(atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)) * time.Millisecond
+ cutOverLockTimeout := time.Duration(this.migrationContext.CutOverLockTimeoutSeconds) * time.Second
+ if heartbeatLag > maxLagMillisecondsThrottle || heartbeatLag > cutOverLockTimeout {
+ this.migrationContext.Log.Debugf("current HeartbeatLag (%.2fs) is too high, it needs to be less than both --max-lag-millis (%.2fs) and --cut-over-lock-timeout-seconds (%.2fs) to continue", heartbeatLag.Seconds(), maxLagMillisecondsThrottle.Seconds(), cutOverLockTimeout.Seconds())
+ return true, nil
+ }
if this.migrationContext.PostponeCutOverFlagFile == "" {
return false, nil
}
@@ -517,19 +550,19 @@ func (this *Migrator) cutOver() (err error) {
}
}
}
- if this.migrationContext.CutOverType == base.CutOverAtomic {
+
+ switch this.migrationContext.CutOverType {
+ case base.CutOverAtomic:
// Atomic solution: we use low timeout and multiple attempts. But for
// each failed attempt, we throttle until replication lag is back to normal
- err := this.atomicCutOver()
- this.handleCutOverResult(err)
- return err
+ err = this.atomicCutOver()
+ case base.CutOverTwoStep:
+ err = this.cutOverTwoStep()
+ default:
+ return this.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType)
}
- if this.migrationContext.CutOverType == base.CutOverTwoStep {
- err := this.cutOverTwoStep()
- this.handleCutOverResult(err)
- return err
- }
- return this.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType)
+ this.handleCutOverResult(err)
+ return err
}
// Inject the "AllEventsUpToLockProcessed" state hint, wait for it to appear in the binary logs,
@@ -777,17 +810,16 @@ func (this *Migrator) initiateInspector() (err error) {
}
// initiateStatus sets and activates the printStatus() ticker
-func (this *Migrator) initiateStatus() error {
+func (this *Migrator) initiateStatus() {
this.printStatus(ForcePrintStatusAndHintRule)
- statusTick := time.Tick(1 * time.Second)
- for range statusTick {
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+ for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
- return nil
+ return
}
go this.printStatus(HeuristicPrintStatusRule)
}
-
- return nil
}
// printMigrationStatusHint prints a detailed configuration dump, that is useful
@@ -796,57 +828,57 @@ func (this *Migrator) initiateStatus() error {
// migration, and as response to the "status" interactive command.
func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
w := io.MultiWriter(writers...)
- fmt.Fprintln(w, fmt.Sprintf("# Migrating %s.%s; Ghost table is %s.%s",
+ fmt.Fprintf(w, "# Migrating %s.%s; Ghost table is %s.%s\n",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
- ))
- fmt.Fprintln(w, fmt.Sprintf("# Migrating %+v; inspecting %+v; executing on %+v",
+ )
+ fmt.Fprintf(w, "# Migrating %+v; inspecting %+v; executing on %+v\n",
*this.applier.connectionConfig.ImpliedKey,
*this.inspector.connectionConfig.ImpliedKey,
this.migrationContext.Hostname,
- ))
- fmt.Fprintln(w, fmt.Sprintf("# Migration started at %+v",
+ )
+ fmt.Fprintf(w, "# Migration started at %+v\n",
this.migrationContext.StartTime.Format(time.RubyDate),
- ))
+ )
maxLoad := this.migrationContext.GetMaxLoad()
criticalLoad := this.migrationContext.GetCriticalLoad()
- fmt.Fprintln(w, fmt.Sprintf("# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f",
+ fmt.Fprintf(w, "# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f\n",
atomic.LoadInt64(&this.migrationContext.ChunkSize),
atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold),
atomic.LoadInt64(&this.migrationContext.DMLBatchSize),
maxLoad.String(),
criticalLoad.String(),
this.migrationContext.GetNiceRatio(),
- ))
+ )
if this.migrationContext.ThrottleFlagFile != "" {
setIndicator := ""
if base.FileExists(this.migrationContext.ThrottleFlagFile) {
setIndicator = "[set]"
}
- fmt.Fprintln(w, fmt.Sprintf("# throttle-flag-file: %+v %+v",
+ fmt.Fprintf(w, "# throttle-flag-file: %+v %+v\n",
this.migrationContext.ThrottleFlagFile, setIndicator,
- ))
+ )
}
if this.migrationContext.ThrottleAdditionalFlagFile != "" {
setIndicator := ""
if base.FileExists(this.migrationContext.ThrottleAdditionalFlagFile) {
setIndicator = "[set]"
}
- fmt.Fprintln(w, fmt.Sprintf("# throttle-additional-flag-file: %+v %+v",
+ fmt.Fprintf(w, "# throttle-additional-flag-file: %+v %+v\n",
this.migrationContext.ThrottleAdditionalFlagFile, setIndicator,
- ))
+ )
}
if throttleQuery := this.migrationContext.GetThrottleQuery(); throttleQuery != "" {
- fmt.Fprintln(w, fmt.Sprintf("# throttle-query: %+v",
+ fmt.Fprintf(w, "# throttle-query: %+v\n",
throttleQuery,
- ))
+ )
}
if throttleControlReplicaKeys := this.migrationContext.GetThrottleControlReplicaKeys(); throttleControlReplicaKeys.Len() > 0 {
- fmt.Fprintln(w, fmt.Sprintf("# throttle-control-replicas count: %+v",
+ fmt.Fprintf(w, "# throttle-control-replicas count: %+v\n",
throttleControlReplicaKeys.Len(),
- ))
+ )
}
if this.migrationContext.PostponeCutOverFlagFile != "" {
@@ -854,20 +886,20 @@ func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
if base.FileExists(this.migrationContext.PostponeCutOverFlagFile) {
setIndicator = "[set]"
}
- fmt.Fprintln(w, fmt.Sprintf("# postpone-cut-over-flag-file: %+v %+v",
+ fmt.Fprintf(w, "# postpone-cut-over-flag-file: %+v %+v\n",
this.migrationContext.PostponeCutOverFlagFile, setIndicator,
- ))
+ )
}
if this.migrationContext.PanicFlagFile != "" {
- fmt.Fprintln(w, fmt.Sprintf("# panic-flag-file: %+v",
+ fmt.Fprintf(w, "# panic-flag-file: %+v\n",
this.migrationContext.PanicFlagFile,
- ))
+ )
}
- fmt.Fprintln(w, fmt.Sprintf("# Serving on unix socket: %+v",
+ fmt.Fprintf(w, "# Serving on unix socket: %+v\n",
this.migrationContext.ServeSocketFile,
- ))
+ )
if this.migrationContext.ServeTCPPort != 0 {
- fmt.Fprintln(w, fmt.Sprintf("# Serving on TCP port: %+v", this.migrationContext.ServeTCPPort))
+ fmt.Fprintf(w, "# Serving on TCP port: %+v\n", this.migrationContext.ServeTCPPort)
}
}
@@ -912,20 +944,29 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
}
var etaSeconds float64 = math.MaxFloat64
- eta := "N/A"
+ var etaDuration = time.Duration(base.ETAUnknown)
if progressPct >= 100.0 {
- eta = "due"
+ etaDuration = 0
} else if progressPct >= 0.1 {
elapsedRowCopySeconds := this.migrationContext.ElapsedRowCopyTime().Seconds()
totalExpectedSeconds := elapsedRowCopySeconds * float64(rowsEstimate) / float64(totalRowsCopied)
etaSeconds = totalExpectedSeconds - elapsedRowCopySeconds
if etaSeconds >= 0 {
- etaDuration := time.Duration(etaSeconds) * time.Second
- eta = base.PrettifyDurationOutput(etaDuration)
+ etaDuration = time.Duration(etaSeconds) * time.Second
} else {
- eta = "due"
+ etaDuration = 0
}
}
+ this.migrationContext.SetETADuration(etaDuration)
+ var eta string
+ switch etaDuration {
+ case 0:
+ eta = "due"
+ case time.Duration(base.ETAUnknown):
+ eta = "N/A"
+ default:
+ eta = base.PrettifyDurationOutput(etaDuration)
+ }
state := "migrating"
if atomic.LoadInt64(&this.migrationContext.CountingRowsFlag) > 0 && !this.migrationContext.ConcurrentCountTableRows {
@@ -937,7 +978,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
state = fmt.Sprintf("throttled, %s", throttleReason)
}
- shouldPrintStatus := false
+ var shouldPrintStatus bool
if rule == HeuristicPrintStatusRule {
if elapsedSeconds <= 60 {
shouldPrintStatus = true
@@ -962,13 +1003,14 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
currentBinlogCoordinates := *this.eventsStreamer.GetCurrentBinlogCoordinates()
- status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, State: %s; ETA: %s",
+ status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, HeartbeatLag: %.2fs, State: %s; ETA: %s",
totalRowsCopied, rowsEstimate, progressPct,
atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied),
len(this.applyEventsQueue), cap(this.applyEventsQueue),
base.PrettifyDurationOutput(elapsedTime), base.PrettifyDurationOutput(this.migrationContext.ElapsedRowCopyTime()),
currentBinlogCoordinates,
this.migrationContext.GetCurrentLagDuration().Seconds(),
+ this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds(),
state,
eta,
)
@@ -979,7 +1021,8 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
w := io.MultiWriter(writers...)
fmt.Fprintln(w, status)
- if elapsedSeconds%60 == 0 {
+ hooksStatusIntervalSec := this.migrationContext.HooksStatusIntervalSec
+ if hooksStatusIntervalSec > 0 && elapsedSeconds%hooksStatusIntervalSec == 0 {
this.hooksExecutor.onStatus(status)
}
}
@@ -995,7 +1038,7 @@ func (this *Migrator) initiateStreaming() error {
this.migrationContext.DatabaseName,
this.migrationContext.GetChangelogTableName(),
func(dmlEvent *binlog.BinlogDMLEvent) error {
- return this.onChangelogStateEvent(dmlEvent)
+ return this.onChangelogEvent(dmlEvent)
},
)
@@ -1009,8 +1052,9 @@ func (this *Migrator) initiateStreaming() error {
}()
go func() {
- ticker := time.Tick(1 * time.Second)
- for range ticker {
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+ for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@@ -1037,7 +1081,7 @@ func (this *Migrator) addDMLEventsListener() error {
// initiateThrottler kicks in the throttling collection and the throttling checks.
func (this *Migrator) initiateThrottler() error {
- this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector)
+ this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector, this.appVersion)
go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected)
this.migrationContext.Log.Infof("Waiting for first throttle metrics to be collected")
@@ -1072,6 +1116,14 @@ func (this *Migrator) initiateApplier() error {
return err
}
+ if this.migrationContext.OriginalTableAutoIncrement > 0 && !this.parser.IsAutoIncrementDefined() {
+ // Original table has AUTO_INCREMENT value and the -alter statement does not indicate any override,
+ // so we should copy AUTO_INCREMENT value onto our ghost table.
+ if err := this.applier.AlterGhostAutoIncrement(); err != nil {
+ this.migrationContext.Log.Errorf("Unable to ALTER ghost table AUTO_INCREMENT value, see further error details. Bailing out")
+ return err
+ }
+ }
this.applier.WriteChangelogState(string(GhostTableMigrated))
go this.applier.InitiateHeartbeat()
return nil
@@ -1150,7 +1202,6 @@ func (this *Migrator) iterateChunks() error {
// Enqueue copy operation; to be executed by executeWriteFuncs()
this.copyRowsQueue <- copyRowsFunc
}
- return nil
}
func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error {
@@ -1241,7 +1292,7 @@ func (this *Migrator) executeWriteFuncs() error {
if niceRatio := this.migrationContext.GetNiceRatio(); niceRatio > 0 {
copyRowsDuration := time.Since(copyRowsStartTime)
sleepTimeNanosecondFloat64 := niceRatio * float64(copyRowsDuration.Nanoseconds())
- sleepTime := time.Duration(time.Duration(int64(sleepTimeNanosecondFloat64)) * time.Nanosecond)
+ sleepTime := time.Duration(int64(sleepTimeNanosecondFloat64)) * time.Nanosecond
time.Sleep(sleepTime)
}
}
@@ -1256,13 +1307,17 @@ func (this *Migrator) executeWriteFuncs() error {
}
}
}
- return nil
}
// finalCleanup takes actions at very end of migration, dropping tables etc.
func (this *Migrator) finalCleanup() error {
atomic.StoreInt64(&this.migrationContext.CleanupImminentFlag, 1)
+ this.migrationContext.Log.Infof("Writing changelog state: %+v", Migrated)
+ if _, err := this.applier.WriteChangelogState(string(Migrated)); err != nil {
+ return err
+ }
+
if this.migrationContext.Noop {
if createTableStatement, err := this.inspector.showCreateTable(this.migrationContext.GetGhostTableName()); err == nil {
this.migrationContext.Log.Infof("New table structure follows")
diff --git a/go/logic/server.go b/go/logic/server.go
index 1606884..4b1b870 100644
--- a/go/logic/server.go
+++ b/go/logic/server.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -122,8 +122,6 @@ func (this *Server) onServerCommand(command string, writer *bufio.Writer) (err e
// applyServerCommand parses and executes commands by user
func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (printStatusRule PrintStatusRule, err error) {
- printStatusRule = NoPrintStatusRule
-
tokens := strings.SplitN(command, "=", 2)
command = strings.TrimSpace(tokens[0])
arg := ""
@@ -134,7 +132,7 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
}
}
argIsQuestion := (arg == "?")
- throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged\n"
+ throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged"
if err := this.hooksExecutor.onInteractiveCommand(command); err != nil {
return NoPrintStatusRule, err
@@ -146,7 +144,9 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
fmt.Fprint(writer, `available commands:
status # Print a detailed status message
sup # Print a short status message
-coordinates # Print the currently inspected coordinates
+coordinates # Print the currently inspected coordinates
+applier # Print the hostname of the applier
+inspector # Print the hostname of the inspector
chunk-size= # Set a new chunk-size
dml-batch-size= # Set a new dml-batch-size
nice-ratio= # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
@@ -177,6 +177,22 @@ help # This message
}
return NoPrintStatusRule, fmt.Errorf("coordinates are read-only")
}
+ case "applier":
+ if this.migrationContext.ApplierConnectionConfig != nil && this.migrationContext.ApplierConnectionConfig.ImpliedKey != nil {
+ fmt.Fprintf(writer, "Host: %s, Version: %s\n",
+ this.migrationContext.ApplierConnectionConfig.ImpliedKey.String(),
+ this.migrationContext.ApplierMySQLVersion,
+ )
+ }
+ return NoPrintStatusRule, nil
+ case "inspector":
+ if this.migrationContext.InspectorConnectionConfig != nil && this.migrationContext.InspectorConnectionConfig.ImpliedKey != nil {
+ fmt.Fprintf(writer, "Host: %s, Version: %s\n",
+ this.migrationContext.InspectorConnectionConfig.ImpliedKey.String(),
+ this.migrationContext.InspectorMySQLVersion,
+ )
+ }
+ return NoPrintStatusRule, nil
case "chunk-size":
{
if argIsQuestion {
@@ -264,7 +280,7 @@ help # This message
return NoPrintStatusRule, nil
}
this.migrationContext.SetThrottleQuery(arg)
- fmt.Fprintf(writer, throttleHint)
+ fmt.Fprintln(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "throttle-http":
@@ -274,7 +290,7 @@ help # This message
return NoPrintStatusRule, nil
}
this.migrationContext.SetThrottleHTTP(arg)
- fmt.Fprintf(writer, throttleHint)
+ fmt.Fprintln(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "throttle-control-replicas":
@@ -297,7 +313,7 @@ help # This message
return NoPrintStatusRule, err
}
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
- fmt.Fprintf(writer, throttleHint)
+ fmt.Fprintln(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "no-throttle", "unthrottle", "resume", "continue":
diff --git a/go/logic/streamer.go b/go/logic/streamer.go
index 5f11fd0..dc40ca3 100644
--- a/go/logic/streamer.go
+++ b/go/logic/streamer.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -16,7 +16,7 @@ import (
"github.com/github/gh-ost/go/binlog"
"github.com/github/gh-ost/go/mysql"
- "github.com/outbrain/golib/sqlutils"
+ "github.com/openark/golib/sqlutils"
)
type BinlogEventListener struct {
@@ -42,6 +42,7 @@ type EventsStreamer struct {
listenersMutex *sync.Mutex
eventsChannel chan *binlog.BinlogEntry
binlogReader *binlog.GoMySQLReader
+ name string
}
func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer {
@@ -51,13 +52,13 @@ func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer
listeners: [](*BinlogEventListener){},
listenersMutex: &sync.Mutex{},
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
+ name: "streamer",
}
}
// AddListener registers a new listener for binlog events, on a per-table basis
func (this *EventsStreamer) AddListener(
async bool, databaseName string, tableName string, onDmlEvent func(event *binlog.BinlogDMLEvent) error) (err error) {
-
this.listenersMutex.Lock()
defer this.listenersMutex.Unlock()
@@ -85,10 +86,10 @@ func (this *EventsStreamer) notifyListeners(binlogEvent *binlog.BinlogDMLEvent)
for _, listener := range this.listeners {
listener := listener
- if strings.ToLower(listener.databaseName) != strings.ToLower(binlogEvent.DatabaseName) {
+ if !strings.EqualFold(listener.databaseName, binlogEvent.DatabaseName) {
continue
}
- if strings.ToLower(listener.tableName) != strings.ToLower(binlogEvent.TableName) {
+ if !strings.EqualFold(listener.tableName, binlogEvent.TableName) {
continue
}
if listener.async {
@@ -106,7 +107,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
return err
}
- if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext); err != nil {
+ if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name); err != nil {
return err
}
if err := this.readCurrentBinlogCoordinates(); err != nil {
@@ -121,10 +122,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
- goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext)
- if err != nil {
- return err
- }
+ goMySQLReader := binlog.NewGoMySQLReader(this.migrationContext)
if err := goMySQLReader.ConnectBinlogStreamer(*binlogCoordinates); err != nil {
return err
}
@@ -218,5 +216,4 @@ func (this *EventsStreamer) Close() (err error) {
func (this *EventsStreamer) Teardown() {
this.db.Close()
- return
}
diff --git a/go/logic/throttler.go b/go/logic/throttler.go
index d234ea6..1e7bc97 100644
--- a/go/logic/throttler.go
+++ b/go/logic/throttler.go
@@ -1,11 +1,12 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
+ "context"
"fmt"
"net/http"
"strings"
@@ -42,16 +43,22 @@ const frenoMagicHint = "freno"
// Throttler collects metrics related to throttling and makes informed decision
// whether throttling should take place.
type Throttler struct {
+ appVersion string
migrationContext *base.MigrationContext
applier *Applier
+ httpClient *http.Client
+ httpClientTimeout time.Duration
inspector *Inspector
finishedMigrating int64
}
-func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector) *Throttler {
+func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector, appVersion string) *Throttler {
return &Throttler{
+ appVersion: appVersion,
migrationContext: migrationContext,
applier: applier,
+ httpClient: &http.Client{},
+ httpClientTimeout: time.Duration(migrationContext.ThrottleHTTPTimeoutMillis) * time.Millisecond,
inspector: inspector,
finishedMigrating: 0,
}
@@ -161,8 +168,9 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
collectFunc()
firstThrottlingCollected <- true
- ticker := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
- for range ticker {
+ ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
+ defer ticker.Stop()
+ for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@@ -172,7 +180,6 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
// collectControlReplicasLag polls all the control replicas to get maximum lag value
func (this *Throttler) collectControlReplicasLag() {
-
if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 {
return
}
@@ -188,9 +195,12 @@ func (this *Throttler) collectControlReplicasLag() {
dbUri := connectionConfig.GetDBUri("information_schema")
var heartbeatValue string
- if db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri); err != nil {
+ db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri)
+ if err != nil {
return lag, err
- } else if err = db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
+ }
+
+ if err := db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
return lag, err
}
@@ -234,12 +244,14 @@ func (this *Throttler) collectControlReplicasLag() {
}
this.migrationContext.SetControlReplicasLagResult(readControlReplicasLag())
}
- aggressiveTicker := time.Tick(100 * time.Millisecond)
+
relaxedFactor := 10
counter := 0
shouldReadLagAggressively := false
- for range aggressiveTicker {
+ ticker := time.NewTicker(100 * time.Millisecond)
+ defer ticker.Stop()
+ for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@@ -282,7 +294,17 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
if url == "" {
return true, nil
}
- resp, err := http.Head(url)
+
+ ctx, cancel := context.WithTimeout(context.Background(), this.httpClientTimeout)
+ defer cancel()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil)
+ if err != nil {
+ return false, err
+ }
+ req.Header.Set("User-Agent", fmt.Sprintf("gh-ost/%s", this.appVersion))
+
+ resp, err := this.httpClient.Do(req)
if err != nil {
return false, err
}
@@ -300,8 +322,10 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
firstThrottlingCollected <- true
- ticker := time.Tick(100 * time.Millisecond)
- for range ticker {
+ collectInterval := time.Duration(this.migrationContext.ThrottleHTTPIntervalMillis) * time.Millisecond
+ ticker := time.NewTicker(collectInterval)
+ defer ticker.Stop()
+ for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@@ -420,8 +444,9 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
this.collectGeneralThrottleMetrics()
firstThrottlingCollected <- true
- throttlerMetricsTick := time.Tick(1 * time.Second)
- for range throttlerMetricsTick {
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+ for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@@ -432,9 +457,7 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
}
// initiateThrottlerChecks initiates the throttle ticker and sets the basic behavior of throttling.
-func (this *Throttler) initiateThrottlerChecks() error {
- throttlerTick := time.Tick(100 * time.Millisecond)
-
+func (this *Throttler) initiateThrottlerChecks() {
throttlerFunction := func() {
alreadyThrottling, currentReason, _ := this.migrationContext.IsThrottled()
shouldThrottle, throttleReason, throttleReasonHint := this.shouldThrottle()
@@ -451,14 +474,15 @@ func (this *Throttler) initiateThrottlerChecks() error {
this.migrationContext.SetThrottled(shouldThrottle, throttleReason, throttleReasonHint)
}
throttlerFunction()
- for range throttlerTick {
+
+ ticker := time.NewTicker(100 * time.Millisecond)
+ defer ticker.Stop()
+ for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
- return nil
+ return
}
throttlerFunction()
}
-
- return nil
}
// throttle sees if throttling needs take place, and if so, continuously sleeps (blocks)
diff --git a/go/mysql/binlog.go b/go/mysql/binlog.go
index 50279ce..ad5e56f 100644
--- a/go/mysql/binlog.go
+++ b/go/mysql/binlog.go
@@ -1,36 +1,21 @@
/*
Copyright 2015 Shlomi Noach, courtesy Booking.com
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package mysql
import (
- "errors"
"fmt"
- "regexp"
"strconv"
"strings"
)
-var detachPattern *regexp.Regexp
-
-func init() {
- detachPattern, _ = regexp.Compile(`//([^/:]+):([\d]+)`) // e.g. `//binlog.01234:567890`
-}
-
-type BinlogType int
-
-const (
- BinaryLog BinlogType = iota
- RelayLog
-)
-
// BinlogCoordinates described binary log coordinates in the form of log file & log position.
type BinlogCoordinates struct {
LogFile string
LogPos int64
- Type BinlogType
}
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
@@ -62,7 +47,7 @@ func (this *BinlogCoordinates) Equals(other *BinlogCoordinates) bool {
if other == nil {
return false
}
- return this.LogFile == other.LogFile && this.LogPos == other.LogPos && this.Type == other.Type
+ return this.LogFile == other.LogFile && this.LogPos == other.LogPos
}
// IsEmpty returns true if the log file is empty, unnamed
@@ -87,76 +72,5 @@ func (this *BinlogCoordinates) SmallerThanOrEquals(other *BinlogCoordinates) boo
if this.SmallerThan(other) {
return true
}
- return this.LogFile == other.LogFile && this.LogPos == other.LogPos // No Type comparison
-}
-
-// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
-func (this *BinlogCoordinates) FileSmallerThan(other *BinlogCoordinates) bool {
- return this.LogFile < other.LogFile
-}
-
-// FileNumberDistance returns the numeric distance between this coordinate's file number and the other's.
-// Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's"
-func (this *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int {
- thisNumber, _ := this.FileNumber()
- otherNumber, _ := other.FileNumber()
- return otherNumber - thisNumber
-}
-
-// FileNumber returns the numeric value of the file, and the length in characters representing the number in the filename.
-// Example: FileNumber() of mysqld.log.000789 is (789, 6)
-func (this *BinlogCoordinates) FileNumber() (int, int) {
- tokens := strings.Split(this.LogFile, ".")
- numPart := tokens[len(tokens)-1]
- numLen := len(numPart)
- fileNum, err := strconv.Atoi(numPart)
- if err != nil {
- return 0, 0
- }
- return fileNum, numLen
-}
-
-// PreviousFileCoordinatesBy guesses the filename of the previous binlog/relaylog, by given offset (number of files back)
-func (this *BinlogCoordinates) PreviousFileCoordinatesBy(offset int) (BinlogCoordinates, error) {
- result := BinlogCoordinates{LogPos: 0, Type: this.Type}
-
- fileNum, numLen := this.FileNumber()
- if fileNum == 0 {
- return result, errors.New("Log file number is zero, cannot detect previous file")
- }
- newNumStr := fmt.Sprintf("%d", (fileNum - offset))
- newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
-
- tokens := strings.Split(this.LogFile, ".")
- tokens[len(tokens)-1] = newNumStr
- result.LogFile = strings.Join(tokens, ".")
- return result, nil
-}
-
-// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
-func (this *BinlogCoordinates) PreviousFileCoordinates() (BinlogCoordinates, error) {
- return this.PreviousFileCoordinatesBy(1)
-}
-
-// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
-func (this *BinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) {
- result := BinlogCoordinates{LogPos: 0, Type: this.Type}
-
- fileNum, numLen := this.FileNumber()
- newNumStr := fmt.Sprintf("%d", (fileNum + 1))
- newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
-
- tokens := strings.Split(this.LogFile, ".")
- tokens[len(tokens)-1] = newNumStr
- result.LogFile = strings.Join(tokens, ".")
- return result, nil
-}
-
-// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
-func (this *BinlogCoordinates) DetachedCoordinates() (isDetached bool, detachedLogFile string, detachedLogPos string) {
- detachedCoordinatesSubmatch := detachPattern.FindStringSubmatch(this.LogFile)
- if len(detachedCoordinatesSubmatch) == 0 {
- return false, "", ""
- }
- return true, detachedCoordinatesSubmatch[1], detachedCoordinatesSubmatch[2]
+ return this.LogFile == other.LogFile && this.LogPos == other.LogPos
}
diff --git a/go/mysql/binlog_test.go b/go/mysql/binlog_test.go
index b878c58..1bb7c05 100644
--- a/go/mysql/binlog_test.go
+++ b/go/mysql/binlog_test.go
@@ -8,8 +8,8 @@ package mysql
import (
"testing"
- "github.com/outbrain/golib/log"
- test "github.com/outbrain/golib/tests"
+ "github.com/openark/golib/log"
+ test "github.com/openark/golib/tests"
)
func init() {
@@ -37,57 +37,6 @@ func TestBinlogCoordinates(t *testing.T) {
test.S(t).ExpectTrue(c1.SmallerThanOrEquals(&c3))
}
-func TestBinlogNext(t *testing.T) {
- c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
- cres, err := c1.NextFileCoordinates()
-
- test.S(t).ExpectNil(err)
- test.S(t).ExpectEquals(c1.Type, cres.Type)
- test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00018")
-
- c2 := BinlogCoordinates{LogFile: "mysql-bin.00099", LogPos: 104}
- cres, err = c2.NextFileCoordinates()
-
- test.S(t).ExpectNil(err)
- test.S(t).ExpectEquals(c1.Type, cres.Type)
- test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00100")
-
- c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00099", LogPos: 104}
- cres, err = c3.NextFileCoordinates()
-
- test.S(t).ExpectNil(err)
- test.S(t).ExpectEquals(c1.Type, cres.Type)
- test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00100")
-}
-
-func TestBinlogPrevious(t *testing.T) {
- c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
- cres, err := c1.PreviousFileCoordinates()
-
- test.S(t).ExpectNil(err)
- test.S(t).ExpectEquals(c1.Type, cres.Type)
- test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00016")
-
- c2 := BinlogCoordinates{LogFile: "mysql-bin.00100", LogPos: 104}
- cres, err = c2.PreviousFileCoordinates()
-
- test.S(t).ExpectNil(err)
- test.S(t).ExpectEquals(c1.Type, cres.Type)
- test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00099")
-
- c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00100", LogPos: 104}
- cres, err = c3.PreviousFileCoordinates()
-
- test.S(t).ExpectNil(err)
- test.S(t).ExpectEquals(c1.Type, cres.Type)
- test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00099")
-
- c4 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00000", LogPos: 104}
- _, err = c4.PreviousFileCoordinates()
-
- test.S(t).ExpectNotNil(err)
-}
-
func TestBinlogCoordinatesAsKey(t *testing.T) {
m := make(map[BinlogCoordinates]bool)
@@ -103,20 +52,3 @@ func TestBinlogCoordinatesAsKey(t *testing.T) {
test.S(t).ExpectEquals(len(m), 3)
}
-
-func TestBinlogFileNumber(t *testing.T) {
- c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
- c2 := BinlogCoordinates{LogFile: "mysql-bin.00022", LogPos: 104}
-
- test.S(t).ExpectEquals(c1.FileNumberDistance(&c1), 0)
- test.S(t).ExpectEquals(c1.FileNumberDistance(&c2), 5)
- test.S(t).ExpectEquals(c2.FileNumberDistance(&c1), -5)
-}
-
-func TestBinlogFileNumberDistance(t *testing.T) {
- c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
- fileNum, numLen := c1.FileNumber()
-
- test.S(t).ExpectEquals(fileNum, 17)
- test.S(t).ExpectEquals(numLen, 5)
-}
diff --git a/go/mysql/connection.go b/go/mysql/connection.go
index 6855ee0..6a5c890 100644
--- a/go/mysql/connection.go
+++ b/go/mysql/connection.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -12,12 +12,14 @@ import (
"fmt"
"io/ioutil"
"net"
+ "strings"
"github.com/go-sql-driver/mysql"
)
const (
- TLS_CONFIG_KEY = "ghost"
+ transactionIsolation = "REPEATABLE-READ"
+ TLS_CONFIG_KEY = "ghost"
)
// ConnectionConfig is the minimal configuration required to connect to a MySQL server
@@ -92,6 +94,7 @@ func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clien
}
this.tlsConfig = &tls.Config{
+ ServerName: this.Key.Hostname,
Certificates: certs,
RootCAs: rootCertPool,
InsecureSkipVerify: allowInsecure,
@@ -111,12 +114,23 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string {
// Wrap IPv6 literals in square brackets
hostname = fmt.Sprintf("[%s]", hostname)
}
- interpolateParams := true
+
// go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to
// simplify construction of the DSN below.
tlsOption := "false"
if this.tlsConfig != nil {
tlsOption = TLS_CONFIG_KEY
}
- return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?timeout=%fs&readTimeout=%fs&writeTimeout=%fs&interpolateParams=%t&autocommit=true&charset=utf8mb4,utf8,latin1&tls=%s", this.User, this.Password, hostname, this.Key.Port, databaseName, this.Timeout, this.Timeout, this.Timeout, interpolateParams, tlsOption)
+ connectionParams := []string{
+ "autocommit=true",
+ "charset=utf8mb4,utf8,latin1",
+ "interpolateParams=true",
+ fmt.Sprintf("tls=%s", tlsOption),
+ fmt.Sprintf("transaction_isolation=%q", transactionIsolation),
+ fmt.Sprintf("timeout=%fs", this.Timeout),
+ fmt.Sprintf("readTimeout=%fs", this.Timeout),
+ fmt.Sprintf("writeTimeout=%fs", this.Timeout),
+ }
+
+ return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?%s", this.User, this.Password, hostname, this.Key.Port, databaseName, strings.Join(connectionParams, "&"))
}
diff --git a/go/mysql/connection_test.go b/go/mysql/connection_test.go
index 2befbc9..390774c 100644
--- a/go/mysql/connection_test.go
+++ b/go/mysql/connection_test.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -9,8 +9,8 @@ import (
"crypto/tls"
"testing"
- "github.com/outbrain/golib/log"
- test "github.com/outbrain/golib/tests"
+ "github.com/openark/golib/log"
+ test "github.com/openark/golib/tests"
)
func init() {
@@ -67,9 +67,10 @@ func TestGetDBUri(t *testing.T) {
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
+ c.Timeout = 1.2345
uri := c.GetDBUri("test")
- test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?timeout=0.000000s&readTimeout=0.000000s&writeTimeout=0.000000s&interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=false")
+ test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=false&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
}
func TestGetDBUriWithTLSSetup(t *testing.T) {
@@ -77,8 +78,9 @@ func TestGetDBUriWithTLSSetup(t *testing.T) {
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
+ c.Timeout = 1.2345
c.tlsConfig = &tls.Config{}
uri := c.GetDBUri("test")
- test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?timeout=0.000000s&readTimeout=0.000000s&writeTimeout=0.000000s&interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=ghost")
+ test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=ghost&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
}
diff --git a/go/mysql/instance_key.go b/go/mysql/instance_key.go
index eb108d8..3d2bff1 100644
--- a/go/mysql/instance_key.go
+++ b/go/mysql/instance_key.go
@@ -1,5 +1,6 @@
/*
Copyright 2015 Shlomi Noach, courtesy Booking.com
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -12,15 +13,16 @@ import (
"strings"
)
-const (
- DefaultInstancePort = 3306
-)
+const DefaultInstancePort = 3306
var (
ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$")
ipv4HostRegexp = regexp.MustCompile("^([^:]+)$")
- ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
- ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") // e.g. 2001:db8:1f70::999:de8:7648:6e8
+
+ // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
+ ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") //nolint:gosimple
+ // e.g. 2001:db8:1f70::999:de8:7648:6e8
+ ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$")
)
// InstanceKey is an instance indicator, identified by hostname and port
@@ -33,8 +35,7 @@ const detachHint = "//"
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
func NewRawInstanceKey(hostPort string) (*InstanceKey, error) {
- hostname := ""
- port := ""
+ var hostname, port string
if submatch := ipv4HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
port = submatch[2]
diff --git a/go/mysql/instance_key_test.go b/go/mysql/instance_key_test.go
index 778a5b3..fa6f45a 100644
--- a/go/mysql/instance_key_test.go
+++ b/go/mysql/instance_key_test.go
@@ -8,8 +8,8 @@ package mysql
import (
"testing"
- "github.com/outbrain/golib/log"
- test "github.com/outbrain/golib/tests"
+ "github.com/openark/golib/log"
+ test "github.com/openark/golib/tests"
)
func init() {
diff --git a/go/mysql/utils.go b/go/mysql/utils.go
index 17bb5fc..c69a3f2 100644
--- a/go/mysql/utils.go
+++ b/go/mysql/utils.go
@@ -14,12 +14,15 @@ import (
"github.com/github/gh-ost/go/sql"
- "github.com/outbrain/golib/log"
- "github.com/outbrain/golib/sqlutils"
+ "github.com/openark/golib/log"
+ "github.com/openark/golib/sqlutils"
)
-const MaxTableNameLength = 64
-const MaxReplicationPasswordLength = 32
+const (
+ MaxTableNameLength = 64
+ MaxReplicationPasswordLength = 32
+ MaxDBPoolConnections = 3
+)
type ReplicationLagResult struct {
Key InstanceKey
@@ -39,23 +42,22 @@ func (this *ReplicationLagResult) HasLag() bool {
var knownDBs map[string]*gosql.DB = make(map[string]*gosql.DB)
var knownDBsMutex = &sync.Mutex{}
-func GetDB(migrationUuid string, mysql_uri string) (*gosql.DB, bool, error) {
+func GetDB(migrationUuid string, mysql_uri string) (db *gosql.DB, exists bool, err error) {
cacheKey := migrationUuid + ":" + mysql_uri
knownDBsMutex.Lock()
- defer func() {
- knownDBsMutex.Unlock()
- }()
+ defer knownDBsMutex.Unlock()
- var exists bool
- if _, exists = knownDBs[cacheKey]; !exists {
- if db, err := gosql.Open("mysql", mysql_uri); err == nil {
- knownDBs[cacheKey] = db
- } else {
- return db, exists, err
+ if db, exists = knownDBs[cacheKey]; !exists {
+ db, err = gosql.Open("mysql", mysql_uri)
+ if err != nil {
+ return nil, false, err
}
+ db.SetMaxOpenConns(MaxDBPoolConnections)
+ db.SetMaxIdleConns(MaxDBPoolConnections)
+ knownDBs[cacheKey] = db
}
- return knownDBs[cacheKey], exists, nil
+ return db, exists, nil
}
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
@@ -203,3 +205,9 @@ func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnL
}
return sql.NewColumnList(columnNames), sql.NewColumnList(virtualColumnNames), nil
}
+
+// Kill executes a KILL QUERY by connection id
+func Kill(db *gosql.DB, connectionID string) error {
+ _, err := db.Exec(`KILL QUERY %s`, connectionID)
+ return err
+}
diff --git a/go/os/process.go b/go/os/process.go
deleted file mode 100644
index 0d4d242..0000000
--- a/go/os/process.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- Copyright 2014 Outbrain Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package os
-
-import (
- "github.com/outbrain/golib/log"
- "io/ioutil"
- "os"
- "os/exec"
-)
-
-func execCmd(commandText string, arguments ...string) (*exec.Cmd, string, error) {
- commandBytes := []byte(commandText)
- tmpFile, err := ioutil.TempFile("", "gh-ost-process-cmd-")
- if err != nil {
- return nil, "", log.Errore(err)
- }
- ioutil.WriteFile(tmpFile.Name(), commandBytes, 0644)
- log.Debugf("execCmd: %s", commandText)
- shellArguments := append([]string{}, tmpFile.Name())
- shellArguments = append(shellArguments, arguments...)
- log.Debugf("%+v", shellArguments)
- return exec.Command("bash", shellArguments...), tmpFile.Name(), nil
-}
-
-// CommandRun executes a command
-func CommandRun(commandText string, arguments ...string) error {
- cmd, tmpFileName, err := execCmd(commandText, arguments...)
- defer os.Remove(tmpFileName)
- if err != nil {
- return log.Errore(err)
- }
- err = cmd.Run()
- return log.Errore(err)
-}
-
-// RunCommandWithOutput executes a command and return output bytes
-func RunCommandWithOutput(commandText string) ([]byte, error) {
- cmd, tmpFileName, err := execCmd(commandText)
- defer os.Remove(tmpFileName)
- if err != nil {
- return nil, log.Errore(err)
- }
-
- outputBytes, err := cmd.Output()
- if err != nil {
- return nil, log.Errore(err)
- }
-
- return outputBytes, nil
-}
diff --git a/go/sql/builder.go b/go/sql/builder.go
index 4b019bc..025d4a8 100644
--- a/go/sql/builder.go
+++ b/go/sql/builder.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -33,11 +33,13 @@ func EscapeName(name string) string {
}
func buildColumnsPreparedValues(columns *ColumnList) []string {
- values := make([]string, columns.Len(), columns.Len())
+ values := make([]string, columns.Len())
for i, column := range columns.Columns() {
var token string
if column.timezoneConversion != nil {
token = fmt.Sprintf("convert_tz(?, '%s', '%s')", column.timezoneConversion.ToTimezone, "+00:00")
+ } else if column.enumToTextConversion {
+ token = fmt.Sprintf("ELT(?, %s)", column.EnumValues)
} else if column.Type == JSONColumnType {
token = "convert(? using utf8mb4)"
} else {
@@ -49,7 +51,7 @@ func buildColumnsPreparedValues(columns *ColumnList) []string {
}
func buildPreparedValues(length int) []string {
- values := make([]string, length, length)
+ values := make([]string, length)
for i := 0; i < length; i++ {
values[i] = "?"
}
@@ -57,7 +59,7 @@ func buildPreparedValues(length int) []string {
}
func duplicateNames(names []string) []string {
- duplicate := make([]string, len(names), len(names))
+ duplicate := make([]string, len(names))
copy(duplicate, names)
return duplicate
}
@@ -108,6 +110,8 @@ func BuildSetPreparedClause(columns *ColumnList) (result string, err error) {
var setToken string
if column.timezoneConversion != nil {
setToken = fmt.Sprintf("%s=convert_tz(?, '%s', '%s')", EscapeName(column.Name), column.timezoneConversion.ToTimezone, "+00:00")
+ } else if column.enumToTextConversion {
+ setToken = fmt.Sprintf("%s=ELT(?, %s)", EscapeName(column.Name), column.EnumValues)
} else if column.Type == JSONColumnType {
setToken = fmt.Sprintf("%s=convert(? using utf8mb4)", EscapeName(column.Name))
} else {
@@ -163,7 +167,7 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
if includeEquals {
comparison, err := BuildEqualsComparison(columns, values)
if err != nil {
- return "", explodedArgs, nil
+ return "", explodedArgs, err
}
comparisons = append(comparisons, comparison)
explodedArgs = append(explodedArgs, args...)
@@ -257,8 +261,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
- uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
- uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
+ uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
+ uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@@ -312,8 +316,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, tableName str
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
- uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
- uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
+ uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
+ uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@@ -364,7 +368,7 @@ func buildUniqueKeyMinMaxValuesPreparedQuery(databaseName, tableName string, uni
tableName = EscapeName(tableName)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
- uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
+ uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@@ -396,7 +400,7 @@ func BuildDMLDeleteQuery(databaseName, tableName string, tableColumns, uniqueKey
}
for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
- arg := column.convertArg(args[tableOrdinal])
+ arg := column.convertArg(args[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg)
}
databaseName = EscapeName(databaseName)
@@ -433,7 +437,7 @@ func BuildDMLInsertQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
- arg := column.convertArg(args[tableOrdinal])
+ arg := column.convertArg(args[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg)
}
@@ -481,13 +485,13 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
- arg := column.convertArg(valueArgs[tableOrdinal])
+ arg := column.convertArg(valueArgs[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg)
}
for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
- arg := column.convertArg(whereArgs[tableOrdinal])
+ arg := column.convertArg(whereArgs[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg)
}
@@ -497,6 +501,9 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
}
equalsComparison, err := BuildEqualsPreparedComparison(uniqueKeyColumns.Names())
+ if err != nil {
+ return "", sharedArgs, uniqueKeyArgs, err
+ }
result = fmt.Sprintf(`
update /* gh-ost %s.%s */
%s.%s
diff --git a/go/sql/builder_test.go b/go/sql/builder_test.go
index a178c4c..2998242 100644
--- a/go/sql/builder_test.go
+++ b/go/sql/builder_test.go
@@ -12,8 +12,8 @@ import (
"regexp"
"strings"
- "github.com/outbrain/golib/log"
- test "github.com/outbrain/golib/tests"
+ "github.com/openark/golib/log"
+ test "github.com/openark/golib/tests"
)
var (
diff --git a/go/sql/parser.go b/go/sql/parser.go
index ebb8b38..a72af33 100644
--- a/go/sql/parser.go
+++ b/go/sql/parser.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -16,6 +16,7 @@ var (
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
+ autoIncrementRegexp = regexp.MustCompile(`(?i)\bauto_increment[\s]*=[\s]*([0-9]+)`)
alterTableExplicitSchemaTableRegexps = []*regexp.Regexp{
// ALTER TABLE `scm`.`tbl` something
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
@@ -32,12 +33,14 @@ var (
// ALTER TABLE tbl something
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)\s+(.*$)`),
}
+ enumValuesRegexp = regexp.MustCompile("^enum[(](.*)[)]$")
)
type AlterTableParser struct {
- columnRenameMap map[string]string
- droppedColumns map[string]bool
- isRenameTable bool
+ columnRenameMap map[string]string
+ droppedColumns map[string]bool
+ isRenameTable bool
+ isAutoIncrementDefined bool
alterStatementOptions string
alterTokens []string
@@ -122,11 +125,16 @@ func (this *AlterTableParser) parseAlterToken(alterToken string) (err error) {
this.isRenameTable = true
}
}
+ {
+ // auto_increment
+ if autoIncrementRegexp.MatchString(alterToken) {
+ this.isAutoIncrementDefined = true
+ }
+ }
return nil
}
func (this *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) {
-
this.alterStatementOptions = alterStatement
for _, alterTableRegexp := range alterTableExplicitSchemaTableRegexps {
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
@@ -173,6 +181,11 @@ func (this *AlterTableParser) DroppedColumnsMap() map[string]bool {
func (this *AlterTableParser) IsRenameTable() bool {
return this.isRenameTable
}
+
+func (this *AlterTableParser) IsAutoIncrementDefined() bool {
+ return this.isAutoIncrementDefined
+}
+
func (this *AlterTableParser) GetExplicitSchema() string {
return this.explicitSchema
}
@@ -192,3 +205,10 @@ func (this *AlterTableParser) HasExplicitTable() bool {
func (this *AlterTableParser) GetAlterStatementOptions() string {
return this.alterStatementOptions
}
+
+func ParseEnumValues(enumColumnType string) string {
+ if submatch := enumValuesRegexp.FindStringSubmatch(enumColumnType); len(submatch) > 0 {
+ return submatch[1]
+ }
+ return enumColumnType
+}
diff --git a/go/sql/parser_test.go b/go/sql/parser_test.go
index 79faa63..79a9b7b 100644
--- a/go/sql/parser_test.go
+++ b/go/sql/parser_test.go
@@ -1,5 +1,5 @@
/*
- Copyright 2016 GitHub Inc.
+ Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@@ -9,8 +9,8 @@ import (
"reflect"
"testing"
- "github.com/outbrain/golib/log"
- test "github.com/outbrain/golib/tests"
+ "github.com/openark/golib/log"
+ test "github.com/openark/golib/tests"
)
func init() {
@@ -24,6 +24,7 @@ func TestParseAlterStatement(t *testing.T) {
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
+ test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
}
func TestParseAlterStatementTrivialRename(t *testing.T) {
@@ -33,10 +34,30 @@ func TestParseAlterStatementTrivialRename(t *testing.T) {
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
+ test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 1)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
}
+func TestParseAlterStatementWithAutoIncrement(t *testing.T) {
+ statements := []string{
+ "auto_increment=7",
+ "auto_increment = 7",
+ "AUTO_INCREMENT = 71",
+ "add column t int, change ts ts timestamp, auto_increment=7 engine=innodb",
+ "add column t int, change ts ts timestamp, auto_increment =7 engine=innodb",
+ "add column t int, change ts ts timestamp, AUTO_INCREMENT = 7 engine=innodb",
+ "add column t int, change ts ts timestamp, engine=innodb auto_increment=73425",
+ }
+ for _, statement := range statements {
+ parser := NewAlterTableParser()
+ err := parser.ParseAlterStatement(statement)
+ test.S(t).ExpectNil(err)
+ test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
+ test.S(t).ExpectTrue(parser.IsAutoIncrementDefined())
+ }
+}
+
func TestParseAlterStatementTrivialRenames(t *testing.T) {
statement := "add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb"
parser := NewAlterTableParser()
@@ -44,6 +65,7 @@ func TestParseAlterStatementTrivialRenames(t *testing.T) {
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
+ test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 2)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
test.S(t).ExpectEquals(parser.columnRenameMap["f"], "f")
@@ -64,6 +86,7 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
+ test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
renames := parser.GetNonTrivialRenames()
test.S(t).ExpectEquals(len(renames), 2)
@@ -126,7 +149,6 @@ func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
}
func TestParseAlterStatementDroppedColumns(t *testing.T) {
-
{
parser := NewAlterTableParser()
statement := "drop column b"
@@ -166,7 +188,6 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
}
func TestParseAlterStatementRenameTable(t *testing.T) {
-
{
parser := NewAlterTableParser()
statement := "drop column b"
@@ -206,7 +227,6 @@ func TestParseAlterStatementRenameTable(t *testing.T) {
}
func TestParseAlterStatementExplicitTable(t *testing.T) {
-
{
parser := NewAlterTableParser()
statement := "drop column b"
@@ -298,3 +318,21 @@ func TestParseAlterStatementExplicitTable(t *testing.T) {
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b", "add index idx(i)"}))
}
}
+
+func TestParseEnumValues(t *testing.T) {
+ {
+ s := "enum('red','green','blue','orange')"
+ values := ParseEnumValues(s)
+ test.S(t).ExpectEquals(values, "'red','green','blue','orange'")
+ }
+ {
+ s := "('red','green','blue','orange')"
+ values := ParseEnumValues(s)
+ test.S(t).ExpectEquals(values, "('red','green','blue','orange')")
+ }
+ {
+ s := "zzz"
+ values := ParseEnumValues(s)
+ test.S(t).ExpectEquals(values, "zzz")
+ }
+}
diff --git a/go/sql/types.go b/go/sql/types.go
index ef83819..3c4ce5e 100644
--- a/go/sql/types.go
+++ b/go/sql/types.go
@@ -6,6 +6,7 @@
package sql
import (
+ "bytes"
"fmt"
"reflect"
"strconv"
@@ -22,6 +23,7 @@ const (
MediumIntColumnType
JSONColumnType
FloatColumnType
+ BinaryColumnType
)
const maxMediumintUnsigned int32 = 16777215
@@ -31,19 +33,37 @@ type TimezoneConversion struct {
}
type Column struct {
- Name string
- IsUnsigned bool
- Charset string
- Type ColumnType
- timezoneConversion *TimezoneConversion
+ Name string
+ IsUnsigned bool
+ Charset string
+ Type ColumnType
+ EnumValues string
+ timezoneConversion *TimezoneConversion
+ enumToTextConversion bool
+ // add Octet length for binary type, fix bytes with suffix "00" get clipped in mysql binlog.
+ // https://github.com/github/gh-ost/issues/909
+ BinaryOctetLength uint
}
-func (this *Column) convertArg(arg interface{}) interface{} {
+func (this *Column) convertArg(arg interface{}, isUniqueKeyColumn bool) interface{} {
if s, ok := arg.(string); ok {
// string, charset conversion
if encoding, ok := charsetEncodingMap[this.Charset]; ok {
arg, _ = encoding.NewDecoder().String(s)
}
+
+ if this.Type == BinaryColumnType && isUniqueKeyColumn {
+ arg2Bytes := []byte(arg.(string))
+ size := len(arg2Bytes)
+ if uint(size) < this.BinaryOctetLength {
+ buf := bytes.NewBuffer(arg2Bytes)
+ for i := uint(0); i < (this.BinaryOctetLength - uint(size)); i++ {
+ buf.Write([]byte{0})
+ }
+ arg = buf.String()
+ }
+ }
+
return arg
}
@@ -179,6 +199,18 @@ func (this *ColumnList) HasTimezoneConversion(columnName string) bool {
return this.GetColumn(columnName).timezoneConversion != nil
}
+func (this *ColumnList) SetEnumToTextConversion(columnName string) {
+ this.GetColumn(columnName).enumToTextConversion = true
+}
+
+func (this *ColumnList) IsEnumToTextConversion(columnName string) bool {
+ return this.GetColumn(columnName).enumToTextConversion
+}
+
+func (this *ColumnList) SetEnumValues(columnName string, enumValues string) {
+ this.GetColumn(columnName).EnumValues = enumValues
+}
+
func (this *ColumnList) String() string {
return strings.Join(this.Names(), ",")
}
diff --git a/go/sql/types_test.go b/go/sql/types_test.go
index 0fd062d..58b06ca 100644
--- a/go/sql/types_test.go
+++ b/go/sql/types_test.go
@@ -10,8 +10,8 @@ import (
"reflect"
- "github.com/outbrain/golib/log"
- test "github.com/outbrain/golib/tests"
+ "github.com/openark/golib/log"
+ test "github.com/openark/golib/tests"
)
func init() {
diff --git a/localtests/autoinc-copy-deletes-user-defined/create.sql b/localtests/autoinc-copy-deletes-user-defined/create.sql
new file mode 100644
index 0000000..2058b0b
--- /dev/null
+++ b/localtests/autoinc-copy-deletes-user-defined/create.sql
@@ -0,0 +1,17 @@
+drop event if exists gh_ost_test;
+
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int auto_increment,
+ i int not null,
+ primary key(id)
+) auto_increment=1;
+
+insert into gh_ost_test values (NULL, 11);
+insert into gh_ost_test values (NULL, 13);
+insert into gh_ost_test values (NULL, 17);
+insert into gh_ost_test values (NULL, 23);
+insert into gh_ost_test values (NULL, 29);
+insert into gh_ost_test values (NULL, 31);
+insert into gh_ost_test values (NULL, 37);
+delete from gh_ost_test where id>=5;
diff --git a/localtests/autoinc-copy-deletes-user-defined/expect_table_structure b/localtests/autoinc-copy-deletes-user-defined/expect_table_structure
new file mode 100644
index 0000000..5e180af
--- /dev/null
+++ b/localtests/autoinc-copy-deletes-user-defined/expect_table_structure
@@ -0,0 +1 @@
+AUTO_INCREMENT=7
diff --git a/localtests/autoinc-copy-deletes-user-defined/extra_args b/localtests/autoinc-copy-deletes-user-defined/extra_args
new file mode 100644
index 0000000..cce91e1
--- /dev/null
+++ b/localtests/autoinc-copy-deletes-user-defined/extra_args
@@ -0,0 +1 @@
+--alter='AUTO_INCREMENT=7'
diff --git a/localtests/autoinc-copy-deletes/create.sql b/localtests/autoinc-copy-deletes/create.sql
new file mode 100644
index 0000000..2058b0b
--- /dev/null
+++ b/localtests/autoinc-copy-deletes/create.sql
@@ -0,0 +1,17 @@
+drop event if exists gh_ost_test;
+
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int auto_increment,
+ i int not null,
+ primary key(id)
+) auto_increment=1;
+
+insert into gh_ost_test values (NULL, 11);
+insert into gh_ost_test values (NULL, 13);
+insert into gh_ost_test values (NULL, 17);
+insert into gh_ost_test values (NULL, 23);
+insert into gh_ost_test values (NULL, 29);
+insert into gh_ost_test values (NULL, 31);
+insert into gh_ost_test values (NULL, 37);
+delete from gh_ost_test where id>=5;
diff --git a/localtests/autoinc-copy-deletes/expect_table_structure b/localtests/autoinc-copy-deletes/expect_table_structure
new file mode 100644
index 0000000..5a755ff
--- /dev/null
+++ b/localtests/autoinc-copy-deletes/expect_table_structure
@@ -0,0 +1 @@
+AUTO_INCREMENT=8
diff --git a/localtests/autoinc-copy-simple/create.sql b/localtests/autoinc-copy-simple/create.sql
new file mode 100644
index 0000000..677f08e
--- /dev/null
+++ b/localtests/autoinc-copy-simple/create.sql
@@ -0,0 +1,13 @@
+drop event if exists gh_ost_test;
+
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int auto_increment,
+ i int not null,
+ primary key(id)
+) auto_increment=1;
+
+insert into gh_ost_test values (NULL, 11);
+insert into gh_ost_test values (NULL, 13);
+insert into gh_ost_test values (NULL, 17);
+insert into gh_ost_test values (NULL, 23);
diff --git a/localtests/autoinc-copy-simple/expect_table_structure b/localtests/autoinc-copy-simple/expect_table_structure
new file mode 100644
index 0000000..3ed5902
--- /dev/null
+++ b/localtests/autoinc-copy-simple/expect_table_structure
@@ -0,0 +1 @@
+AUTO_INCREMENT=5
diff --git a/localtests/compound-pk-ts/create.sql b/localtests/compound-pk-ts/create.sql
new file mode 100644
index 0000000..1bab87a
--- /dev/null
+++ b/localtests/compound-pk-ts/create.sql
@@ -0,0 +1,40 @@
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int auto_increment,
+ i int not null,
+ ts0 timestamp(6) default current_timestamp(6),
+ updated tinyint unsigned default 0,
+ primary key(id, ts0)
+) auto_increment=1;
+
+drop event if exists gh_ost_test;
+delimiter ;;
+create event gh_ost_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into gh_ost_test values (null, 11, sysdate(6), 0);
+ update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 13, sysdate(6), 0);
+ update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 17, sysdate(6), 0);
+ update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 19, sysdate(6), 0);
+ update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 23, sysdate(6), 0);
+ update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 29, sysdate(6), 0);
+ insert into gh_ost_test values (null, 31, sysdate(6), 0);
+ insert into gh_ost_test values (null, 37, sysdate(6), 0);
+ insert into gh_ost_test values (null, 41, sysdate(6), 0);
+ delete from gh_ost_test where i = 31 order by id desc limit 1;
+end ;;
diff --git a/localtests/compound-pk/create.sql b/localtests/compound-pk/create.sql
new file mode 100644
index 0000000..cf838d0
--- /dev/null
+++ b/localtests/compound-pk/create.sql
@@ -0,0 +1,40 @@
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int auto_increment,
+ i int not null,
+ v varchar(128),
+ updated tinyint unsigned default 0,
+ primary key(id, v)
+) auto_increment=1;
+
+drop event if exists gh_ost_test;
+delimiter ;;
+create event gh_ost_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into gh_ost_test values (null, 11, 'eleven', 0);
+ update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 13, 'thirteen', 0);
+ update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 17, 'seventeen', 0);
+ update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 19, 'nineteen', 0);
+ update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 23, 'twenty three', 0);
+ update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
+
+ insert into gh_ost_test values (null, 29, 'twenty nine', 0);
+ insert into gh_ost_test values (null, 31, 'thirty one', 0);
+ insert into gh_ost_test values (null, 37, 'thirty seven', 0);
+ insert into gh_ost_test values (null, 41, 'forty one', 0);
+ delete from gh_ost_test where i = 31 order by id desc limit 1;
+end ;;
diff --git a/localtests/datetime-submillis-zeroleading/ignore_versions b/localtests/datetime-submillis-zeroleading/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/datetime-submillis-zeroleading/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/localtests/datetime-submillis/ignore_versions b/localtests/datetime-submillis/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/datetime-submillis/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/localtests/datetime-to-timestamp-pk-fail/ignore_versions b/localtests/datetime-to-timestamp-pk-fail/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/datetime-to-timestamp-pk-fail/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/localtests/datetime-with-zero/create.sql b/localtests/datetime-with-zero/create.sql
new file mode 100644
index 0000000..526d1e6
--- /dev/null
+++ b/localtests/datetime-with-zero/create.sql
@@ -0,0 +1,20 @@
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int unsigned auto_increment,
+ i int not null,
+ dt datetime,
+ primary key(id)
+) auto_increment=1;
+
+drop event if exists gh_ost_test;
+delimiter ;;
+create event gh_ost_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
+end ;;
diff --git a/localtests/datetime-with-zero/extra_args b/localtests/datetime-with-zero/extra_args
new file mode 100644
index 0000000..0d60fb4
--- /dev/null
+++ b/localtests/datetime-with-zero/extra_args
@@ -0,0 +1 @@
+--allow-zero-in-date --alter="change column dt dt datetime not null default '1970-00-00 00:00:00'"
diff --git a/localtests/datetime/ignore_versions b/localtests/datetime/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/datetime/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/localtests/enum-to-varchar/create.sql b/localtests/enum-to-varchar/create.sql
new file mode 100644
index 0000000..0dbab17
--- /dev/null
+++ b/localtests/enum-to-varchar/create.sql
@@ -0,0 +1,26 @@
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int auto_increment,
+ i int not null,
+ e enum('red', 'green', 'blue', 'orange') null default null collate 'utf8_bin',
+ primary key(id)
+) auto_increment=1;
+
+insert into gh_ost_test values (null, 7, 'red');
+
+drop event if exists gh_ost_test;
+delimiter ;;
+create event gh_ost_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into gh_ost_test values (null, 11, 'red');
+ insert into gh_ost_test values (null, 13, 'green');
+ insert into gh_ost_test values (null, 17, 'blue');
+ set @last_insert_id := last_insert_id();
+ update gh_ost_test set e='orange' where id = @last_insert_id;
+end ;;
diff --git a/localtests/enum-to-varchar/extra_args b/localtests/enum-to-varchar/extra_args
new file mode 100644
index 0000000..68524e4
--- /dev/null
+++ b/localtests/enum-to-varchar/extra_args
@@ -0,0 +1 @@
+--alter="change e e varchar(32) not null default ''"
diff --git a/localtests/existing-datetime-with-zero/create.sql b/localtests/existing-datetime-with-zero/create.sql
new file mode 100644
index 0000000..5320d2c
--- /dev/null
+++ b/localtests/existing-datetime-with-zero/create.sql
@@ -0,0 +1,21 @@
+set session sql_mode='';
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int unsigned auto_increment,
+ i int not null,
+ dt datetime not null default '1970-00-00 00:00:00',
+ primary key(id)
+) auto_increment=1;
+
+drop event if exists gh_ost_test;
+delimiter ;;
+create event gh_ost_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
+end ;;
diff --git a/localtests/existing-datetime-with-zero/extra_args b/localtests/existing-datetime-with-zero/extra_args
new file mode 100644
index 0000000..eb0e2ff
--- /dev/null
+++ b/localtests/existing-datetime-with-zero/extra_args
@@ -0,0 +1 @@
+--allow-zero-in-date --alter="engine=innodb"
diff --git a/localtests/fail-datetime-with-zero/create.sql b/localtests/fail-datetime-with-zero/create.sql
new file mode 100644
index 0000000..526d1e6
--- /dev/null
+++ b/localtests/fail-datetime-with-zero/create.sql
@@ -0,0 +1,20 @@
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int unsigned auto_increment,
+ i int not null,
+ dt datetime,
+ primary key(id)
+) auto_increment=1;
+
+drop event if exists gh_ost_test;
+delimiter ;;
+create event gh_ost_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
+end ;;
diff --git a/localtests/fail-datetime-with-zero/expect_failure b/localtests/fail-datetime-with-zero/expect_failure
new file mode 100644
index 0000000..79356a1
--- /dev/null
+++ b/localtests/fail-datetime-with-zero/expect_failure
@@ -0,0 +1 @@
+Invalid default value for 'dt'
diff --git a/localtests/fail-datetime-with-zero/extra_args b/localtests/fail-datetime-with-zero/extra_args
new file mode 100644
index 0000000..9b72ac2
--- /dev/null
+++ b/localtests/fail-datetime-with-zero/extra_args
@@ -0,0 +1 @@
+--alter="change column dt dt datetime not null default '1970-00-00 00:00:00'"
diff --git a/localtests/fail-existing-datetime-with-zero/create.sql b/localtests/fail-existing-datetime-with-zero/create.sql
new file mode 100644
index 0000000..5320d2c
--- /dev/null
+++ b/localtests/fail-existing-datetime-with-zero/create.sql
@@ -0,0 +1,21 @@
+set session sql_mode='';
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int unsigned auto_increment,
+ i int not null,
+ dt datetime not null default '1970-00-00 00:00:00',
+ primary key(id)
+) auto_increment=1;
+
+drop event if exists gh_ost_test;
+delimiter ;;
+create event gh_ost_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
+end ;;
diff --git a/localtests/fail-existing-datetime-with-zero/expect_failure b/localtests/fail-existing-datetime-with-zero/expect_failure
new file mode 100644
index 0000000..79356a1
--- /dev/null
+++ b/localtests/fail-existing-datetime-with-zero/expect_failure
@@ -0,0 +1 @@
+Invalid default value for 'dt'
diff --git a/localtests/fail-existing-datetime-with-zero/extra_args b/localtests/fail-existing-datetime-with-zero/extra_args
new file mode 100644
index 0000000..31bc479
--- /dev/null
+++ b/localtests/fail-existing-datetime-with-zero/extra_args
@@ -0,0 +1 @@
+--alter="engine=innodb"
diff --git a/localtests/generated-columns-add57/create.sql b/localtests/generated-columns-add/create.sql
similarity index 100%
rename from localtests/generated-columns-add57/create.sql
rename to localtests/generated-columns-add/create.sql
diff --git a/localtests/generated-columns-add57/extra_args b/localtests/generated-columns-add/extra_args
similarity index 100%
rename from localtests/generated-columns-add57/extra_args
rename to localtests/generated-columns-add/extra_args
diff --git a/localtests/generated-columns-add57/ghost_columns b/localtests/generated-columns-add/ghost_columns
similarity index 100%
rename from localtests/generated-columns-add57/ghost_columns
rename to localtests/generated-columns-add/ghost_columns
diff --git a/localtests/generated-columns-add57/order_by b/localtests/generated-columns-add/order_by
similarity index 100%
rename from localtests/generated-columns-add57/order_by
rename to localtests/generated-columns-add/order_by
diff --git a/localtests/generated-columns-add57/orig_columns b/localtests/generated-columns-add/orig_columns
similarity index 100%
rename from localtests/generated-columns-add57/orig_columns
rename to localtests/generated-columns-add/orig_columns
diff --git a/localtests/generated-columns-add57/ignore_versions b/localtests/generated-columns-add57/ignore_versions
deleted file mode 100644
index b6de5f8..0000000
--- a/localtests/generated-columns-add57/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5|5.6)
diff --git a/localtests/generated-columns-rename57/create.sql b/localtests/generated-columns-rename/create.sql
similarity index 100%
rename from localtests/generated-columns-rename57/create.sql
rename to localtests/generated-columns-rename/create.sql
diff --git a/localtests/generated-columns-rename57/extra_args b/localtests/generated-columns-rename/extra_args
similarity index 100%
rename from localtests/generated-columns-rename57/extra_args
rename to localtests/generated-columns-rename/extra_args
diff --git a/localtests/generated-columns-rename57/ignore_versions b/localtests/generated-columns-rename57/ignore_versions
deleted file mode 100644
index b6de5f8..0000000
--- a/localtests/generated-columns-rename57/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5|5.6)
diff --git a/localtests/generated-columns-unique/create.sql b/localtests/generated-columns-unique/create.sql
new file mode 100644
index 0000000..7a63dd9
--- /dev/null
+++ b/localtests/generated-columns-unique/create.sql
@@ -0,0 +1,30 @@
+drop table if exists gh_ost_test;
+create table gh_ost_test (
+ id int auto_increment,
+ `idb` varchar(36) CHARACTER SET utf8mb4 GENERATED ALWAYS AS (json_unquote(json_extract(`jsonobj`,_utf8mb4'$._id'))) STORED NOT NULL,
+ `jsonobj` json NOT NULL,
+ PRIMARY KEY (`id`,`idb`)
+) auto_increment=1;
+
+insert into gh_ost_test (id, jsonobj) values (null, '{"_id":2}');
+insert into gh_ost_test (id, jsonobj) values (null, '{"_id":3}');
+
+drop event if exists gh_ost_test;
+delimiter ;;
+create event gh_ost_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into gh_ost_test (id, jsonobj) values (null, '{"_id":5}');
+ insert into gh_ost_test (id, jsonobj) values (null, '{"_id":7}');
+ insert into gh_ost_test (id, jsonobj) values (null, '{"_id":11}');
+ insert into gh_ost_test (id, jsonobj) values (null, '{"_id":13}');
+ insert into gh_ost_test (id, jsonobj) values (null, '{"_id":17}');
+ insert into gh_ost_test (id, jsonobj) values (null, '{"_id":19}');
+ insert into gh_ost_test (id, jsonobj) values (null, '{"_id":23}');
+ insert into gh_ost_test (id, jsonobj) values (null, '{"_id":27}');
+end ;;
diff --git a/localtests/generated-columns57/create.sql b/localtests/generated-columns/create.sql
similarity index 100%
rename from localtests/generated-columns57/create.sql
rename to localtests/generated-columns/create.sql
diff --git a/localtests/generated-columns57/ignore_versions b/localtests/generated-columns57/ignore_versions
deleted file mode 100644
index b6de5f8..0000000
--- a/localtests/generated-columns57/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5|5.6)
diff --git a/localtests/geometry57/create.sql b/localtests/geometry/create.sql
similarity index 100%
rename from localtests/geometry57/create.sql
rename to localtests/geometry/create.sql
diff --git a/localtests/geometry57/ignore_versions b/localtests/geometry57/ignore_versions
deleted file mode 100644
index b6de5f8..0000000
--- a/localtests/geometry57/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5|5.6)
diff --git a/localtests/json57dml/create.sql b/localtests/json-dml/create.sql
similarity index 100%
rename from localtests/json57dml/create.sql
rename to localtests/json-dml/create.sql
diff --git a/localtests/json57/create.sql b/localtests/json/create.sql
similarity index 100%
rename from localtests/json57/create.sql
rename to localtests/json/create.sql
diff --git a/localtests/json57/ignore_versions b/localtests/json57/ignore_versions
deleted file mode 100644
index b6de5f8..0000000
--- a/localtests/json57/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5|5.6)
diff --git a/localtests/json57dml/ignore_versions b/localtests/json57dml/ignore_versions
deleted file mode 100644
index b6de5f8..0000000
--- a/localtests/json57dml/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5|5.6)
diff --git a/localtests/spatial57/create.sql b/localtests/spatial/create.sql
similarity index 100%
rename from localtests/spatial57/create.sql
rename to localtests/spatial/create.sql
diff --git a/localtests/spatial57/ignore_versions b/localtests/spatial57/ignore_versions
deleted file mode 100644
index b6de5f8..0000000
--- a/localtests/spatial57/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5|5.6)
diff --git a/localtests/swap-pk-uk/ignore_versions b/localtests/swap-pk-uk/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/swap-pk-uk/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/localtests/swap-uk-uk/ignore_versions b/localtests/swap-uk-uk/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/swap-uk-uk/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/localtests/test.sh b/localtests/test.sh
index d4b3f17..f66c813 100755
--- a/localtests/test.sh
+++ b/localtests/test.sh
@@ -12,6 +12,7 @@ test_logfile=/tmp/gh-ost-test.log
default_ghost_binary=/tmp/gh-ost-test
ghost_binary=""
exec_command_file=/tmp/gh-ost-test.bash
+ghost_structure_output_file=/tmp/gh-ost-test.ghost.structure.sql
orig_content_output_file=/tmp/gh-ost-test.orig.content.csv
ghost_content_output_file=/tmp/gh-ost-test.ghost.content.csv
throttle_flag_file=/tmp/gh-ost-test.ghost.throttle.flag
@@ -204,6 +205,18 @@ test_single() {
return 1
fi
+ gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "show create table _gh_ost_test_gho\G" -ss > $ghost_structure_output_file
+
+ if [ -f $tests_path/$test_name/expect_table_structure ] ; then
+ expected_table_structure="$(cat $tests_path/$test_name/expect_table_structure)"
+ if ! grep -q "$expected_table_structure" $ghost_structure_output_file ; then
+ echo
+ echo "ERROR $test_name: table structure was expected to include ${expected_table_structure} but did not. cat $ghost_structure_output_file:"
+ cat $ghost_structure_output_file
+ return 1
+ fi
+ fi
+
echo_dot
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test ${order_by}" -ss > $orig_content_output_file
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho ${order_by}" -ss > $ghost_content_output_file
@@ -211,6 +224,8 @@ test_single() {
ghost_checksum=$(cat $ghost_content_output_file | md5sum)
if [ "$orig_checksum" != "$ghost_checksum" ] ; then
+ gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test" -ss > $orig_content_output_file
+ gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho" -ss > $ghost_content_output_file
echo "ERROR $test_name: checksum mismatch"
echo "---"
diff $orig_content_output_file $ghost_content_output_file
@@ -229,7 +244,9 @@ build_binary() {
echo "Using binary: $ghost_binary"
return 0
fi
+
go build -o $ghost_binary go/cmd/gh-ost/main.go
+
if [ $? -ne 0 ] ; then
echo "Build failure"
exit 1
diff --git a/localtests/timestamp-to-datetime/ignore_versions b/localtests/timestamp-to-datetime/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/timestamp-to-datetime/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/localtests/timestamp/ignore_versions b/localtests/timestamp/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/timestamp/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/localtests/tz-datetime-ts/ignore_versions b/localtests/tz-datetime-ts/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/tz-datetime-ts/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/localtests/tz/ignore_versions b/localtests/tz/ignore_versions
deleted file mode 100644
index 7acd3f0..0000000
--- a/localtests/tz/ignore_versions
+++ /dev/null
@@ -1 +0,0 @@
-(5.5)
diff --git a/script/build-deploy-tarball b/script/build-deploy-tarball
index 95da838..dc28b43 100755
--- a/script/build-deploy-tarball
+++ b/script/build-deploy-tarball
@@ -30,8 +30,6 @@ cp ${tarball}.gz "$BUILD_ARTIFACT_DIR"/gh-ost/
### HACK HACK HACK HACK ###
# blame @carlosmn, @mattr and @timvaillancourt-
-# Allow builds on buster to also be used for stretch + jessie
+# Allow builds on buster to also be used for stretch
stretch_tarball_name=$(echo $(basename "${tarball}") | sed s/-buster-/-stretch-/)
-jessie_tarball_name=$(echo $(basename "${stretch_tarball_name}") | sed s/-stretch-/-jessie-/)
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${stretch_tarball_name}.gz"
-cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${jessie_tarball_name}.gz"
diff --git a/script/cibuild-gh-ost-replica-tests b/script/cibuild-gh-ost-replica-tests
index 3de9e05..c4dbfd2 100755
--- a/script/cibuild-gh-ost-replica-tests
+++ b/script/cibuild-gh-ost-replica-tests
@@ -4,19 +4,25 @@ set -e
whoami
-# Clone gh-ost-ci-env
-# Only clone if not already running locally at latest commit
-remote_commit=$(git ls-remote https://github.com/github/gh-ost-ci-env.git HEAD | cut -f1)
-local_commit="unknown"
-[ -d "gh-ost-ci-env" ] && local_commit=$(cd gh-ost-ci-env && git log --format="%H" -n 1)
+fetch_ci_env() {
+ # Clone gh-ost-ci-env
+ # Only clone if not already running locally at latest commit
+ remote_commit=$(git ls-remote https://github.com/github/gh-ost-ci-env.git HEAD | cut -f1)
+ local_commit="unknown"
+ [ -d "gh-ost-ci-env" ] && local_commit=$(cd gh-ost-ci-env && git log --format="%H" -n 1)
-echo "remote commit is: $remote_commit"
-echo "local commit is: $local_commit"
+ echo "remote commit is: $remote_commit"
+ echo "local commit is: $local_commit"
-if [ "$remote_commit" != "$local_commit" ] ; then
- rm -rf ./gh-ost-ci-env
- git clone https://github.com/github/gh-ost-ci-env.git
-fi
+ if [ "$remote_commit" != "$local_commit" ] ; then
+ rm -rf ./gh-ost-ci-env
+ git clone https://github.com/github/gh-ost-ci-env.git
+ fi
+}
+
+test_dbdeployer() {
+ gh-ost-ci-env/bin/linux/dbdeployer --version
+}
test_mysql_version() {
local mysql_version
@@ -30,17 +36,18 @@ test_mysql_version() {
mkdir -p sandbox/binary
rm -rf sandbox/binary/*
- gh-ost-ci-env/bin/linux/dbdeployer unpack gh-ost-ci-env/mysql-tarballs/"$mysql_version".tar.gz --unpack-version="$mysql_version" --sandbox-binary ${PWD}/sandbox/binary
+ gh-ost-ci-env/bin/linux/dbdeployer unpack gh-ost-ci-env/mysql-tarballs/"$mysql_version".tar.xz --sandbox-binary ${PWD}/sandbox/binary
mkdir -p sandboxes
rm -rf sandboxes/*
- if echo "$mysql_version" | egrep "5[.]5[.]" ; then
+ local mysql_version_num=${mysql_version#*-}
+ if echo "$mysql_version_num" | egrep "5[.]5[.]" ; then
gtid=""
else
gtid="--gtid"
fi
- gh-ost-ci-env/bin/linux/dbdeployer deploy replication "$mysql_version" --nodes 2 --sandbox-binary ${PWD}/sandbox/binary --sandbox-home ${PWD}/sandboxes ${gtid} --my-cnf-options log_slave_updates --my-cnf-options log_bin --my-cnf-options binlog_format=ROW --sandbox-directory rsandbox
+ gh-ost-ci-env/bin/linux/dbdeployer deploy replication "$mysql_version_num" --nodes 2 --sandbox-binary ${PWD}/sandbox/binary --sandbox-home ${PWD}/sandboxes ${gtid} --my-cnf-options log_slave_updates --my-cnf-options log_bin --my-cnf-options binlog_format=ROW --sandbox-directory rsandbox
sed '/sandboxes/d' -i gh-ost-ci-env/bin/gh-ost-test-mysql-master
echo 'sandboxes/rsandbox/m "$@"' >> gh-ost-ci-env/bin/gh-ost-test-mysql-master
@@ -59,12 +66,26 @@ test_mysql_version() {
find sandboxes -name "stop_all" | bash
}
-echo "Building..."
-. script/build
-# Test all versions:
-find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.gz" | while read f ; do basename $f ".tar.gz" ; done | sort -r | while read mysql_version ; do
- echo "found MySQL version: $mysql_version"
-done
-find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.gz" | while read f ; do basename $f ".tar.gz" ; done | sort -r | while read mysql_version ; do
- test_mysql_version "$mysql_version"
-done
+main() {
+ fetch_ci_env
+ test_dbdeployer
+
+ echo "Building..."
+ . script/build
+
+ # TEST_MYSQL_VERSION is set by the replica-tests CI job
+ if [ -z "$TEST_MYSQL_VERSION" ]; then
+ # Test all versions:
+ find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.xz" | while read f ; do basename $f ".tar.xz" ; done | sort -r | while read mysql_version ; do
+ echo "found MySQL version: $mysql_version"
+ done
+ find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.xz" | while read f ; do basename $f ".tar.xz" ; done | sort -r | while read mysql_version ; do
+ test_mysql_version "$mysql_version"
+ done
+ else
+ echo "found MySQL version: $TEST_MYSQL_VERSION"
+ test_mysql_version "$TEST_MYSQL_VERSION"
+ fi
+}
+
+main
diff --git a/script/ensure-go-installed b/script/ensure-go-installed
index baa5bd7..3d33ae4 100755
--- a/script/ensure-go-installed
+++ b/script/ensure-go-installed
@@ -1,13 +1,13 @@
#!/bin/bash
-PREFERRED_GO_VERSION=go1.14.7
-SUPPORTED_GO_VERSIONS='go1.1[456]'
+PREFERRED_GO_VERSION=go1.17.11
+SUPPORTED_GO_VERSIONS='go1.1[567]'
GO_PKG_DARWIN=${PREFERRED_GO_VERSION}.darwin-amd64.pkg
-GO_PKG_DARWIN_SHA=0f215de06019a054a3da46a0722989986c956d719c7a0a8fc38a5f3c216d6f6b
+GO_PKG_DARWIN_SHA=4f924c534230de8f0e1c7369f611c0310efd21fc2d9438b13bc2703af9dda25a
GO_PKG_LINUX=${PREFERRED_GO_VERSION}.linux-amd64.tar.gz
-GO_PKG_LINUX_SHA=4a7fa60f323ee1416a4b1425aefc37ea359e9d64df19c326a58953a97ad41ea5
+GO_PKG_LINUX_SHA=d69a4fe2694f795d8e525c72b497ededc209cb7185f4c3b62d7a98dd6227b3fe
export ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
cd $ROOTDIR
diff --git a/script/ensure-golangci-lint-installed b/script/ensure-golangci-lint-installed
new file mode 100755
index 0000000..e4f49f5
--- /dev/null
+++ b/script/ensure-golangci-lint-installed
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# See https://github.com/golangci/golangci-lint/releases
+GOLANGCI_RELEASE=v1.46.2
+GOLANGCI_INSTALL_SCRIPT=https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh
+
+if [ -z "$GOPATH" ]; then
+ echo "GOPATH must be set"
+ exit 1
+fi
+
+if [ ! -x "$GOPATH/bin/golangci-lint" ]; then
+ echo "Installing golangci-lint $GOLANGCI_RELEASE using script: $GOLANGCI_INSTALL_SCRIPT"
+ curl -sSfL $GOLANGCI_INSTALL_SCRIPT | sh -s -- -b $(go env GOPATH)/bin $GOLANGCI_RELEASE
+fi
+
+$GOPATH/bin/golangci-lint --version
diff --git a/script/lint b/script/lint
new file mode 100755
index 0000000..e29aa8b
--- /dev/null
+++ b/script/lint
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -e
+
+. script/ensure-go-installed
+. script/ensure-golangci-lint-installed
+
+if [ -x "$GOPATH/bin/golangci-lint" ]; then
+ echo "Running golangci-lint run"
+ $GOPATH/bin/golangci-lint run --config=.golangci.yml
+ echo "Done, exit code: $?"
+else
+ echo "ERROR: cannot find golangci-lint at $GOPATH/bin"
+ exit 1
+fi
diff --git a/test.sh b/test.sh
index ff514fa..72215a6 100755
--- a/test.sh
+++ b/test.sh
@@ -5,7 +5,9 @@ retval=0
for testsuite in base mysql sql
do
pushd go/${testsuite} > /dev/null;
+
go test $*;
+
[ $? -ne 0 ] && retval=1
popd > /dev/null;
done
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
deleted file mode 100644
index 0cd3800..0000000
--- a/vendor/github.com/BurntSushi/toml/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-TAGS
-tags
-.*.swp
-tomlcheck/tomlcheck
-toml.test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
deleted file mode 100644
index 43caf6d..0000000
--- a/vendor/github.com/BurntSushi/toml/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-go:
- - 1.1
- - 1.2
- - tip
-install:
- - go install ./...
- - go get github.com/BurntSushi/toml-test
-script:
- - export PATH="$PATH:$HOME/gopath/bin"
- - make test
-
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
deleted file mode 100644
index 21e0938..0000000
--- a/vendor/github.com/BurntSushi/toml/COMPATIBLE
+++ /dev/null
@@ -1,3 +0,0 @@
-Compatible with TOML version
-[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
-
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
deleted file mode 100644
index 5a8e332..0000000
--- a/vendor/github.com/BurntSushi/toml/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
deleted file mode 100644
index 3600848..0000000
--- a/vendor/github.com/BurntSushi/toml/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-install:
- go install ./...
-
-test: install
- go test -v
- toml-test toml-test-decoder
- toml-test -encoder toml-test-encoder
-
-fmt:
- gofmt -w *.go */*.go
- colcheck *.go */*.go
-
-tags:
- find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
-
-push:
- git push origin master
- git push github master
-
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
deleted file mode 100644
index 380bb36..0000000
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ /dev/null
@@ -1,220 +0,0 @@
-## TOML parser and encoder for Go with reflection
-
-TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
-reflection interface similar to Go's standard library `json` and `xml`
-packages. This package also supports the `encoding.TextUnmarshaler` and
-`encoding.TextMarshaler` interfaces so that you can define custom data
-representations. (There is an example of this below.)
-
-Spec: https://github.com/mojombo/toml
-
-Compatible with TOML version
-[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
-
-Documentation: http://godoc.org/github.com/BurntSushi/toml
-
-Installation:
-
-```bash
-go get github.com/BurntSushi/toml
-```
-
-Try the toml validator:
-
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
-
-[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
-
-
-### Testing
-
-This package passes all tests in
-[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
-and the encoder.
-
-### Examples
-
-This package works similarly to how the Go standard library handles `XML`
-and `JSON`. Namely, data is loaded into Go values via reflection.
-
-For the simplest example, consider some TOML file as just a list of keys
-and values:
-
-```toml
-Age = 25
-Cats = [ "Cauchy", "Plato" ]
-Pi = 3.14
-Perfection = [ 6, 28, 496, 8128 ]
-DOB = 1987-07-05T05:45:00Z
-```
-
-Which could be defined in Go as:
-
-```go
-type Config struct {
- Age int
- Cats []string
- Pi float64
- Perfection []int
- DOB time.Time // requires `import time`
-}
-```
-
-And then decoded with:
-
-```go
-var conf Config
-if _, err := toml.Decode(tomlData, &conf); err != nil {
- // handle error
-}
-```
-
-You can also use struct tags if your struct field name doesn't map to a TOML
-key value directly:
-
-```toml
-some_key_NAME = "wat"
-```
-
-```go
-type TOML struct {
- ObscureKey string `toml:"some_key_NAME"`
-}
-```
-
-### Using the `encoding.TextUnmarshaler` interface
-
-Here's an example that automatically parses duration strings into
-`time.Duration` values:
-
-```toml
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-```
-
-Which can be decoded with:
-
-```go
-type song struct {
- Name string
- Duration duration
-}
-type songs struct {
- Song []song
-}
-var favorites songs
-if _, err := Decode(blob, &favorites); err != nil {
- log.Fatal(err)
-}
-
-for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
-}
-```
-
-And you'll also need a `duration` type that satisfies the
-`encoding.TextUnmarshaler` interface:
-
-```go
-type duration struct {
- time.Duration
-}
-
-func (d *duration) UnmarshalText(text []byte) error {
- var err error
- d.Duration, err = time.ParseDuration(string(text))
- return err
-}
-```
-
-### More complex usage
-
-Here's an example of how to load the example from the official spec page:
-
-```toml
-# This is a TOML document. Boom.
-
-title = "TOML Example"
-
-[owner]
-name = "Tom Preston-Werner"
-organization = "GitHub"
-bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
-dob = 1979-05-27T07:32:00Z # First class dates? Why not?
-
-[database]
-server = "192.168.1.1"
-ports = [ 8001, 8001, 8002 ]
-connection_max = 5000
-enabled = true
-
-[servers]
-
- # You can indent as you please. Tabs or spaces. TOML don't care.
- [servers.alpha]
- ip = "10.0.0.1"
- dc = "eqdc10"
-
- [servers.beta]
- ip = "10.0.0.2"
- dc = "eqdc10"
-
-[clients]
-data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
-
-# Line breaks are OK when inside arrays
-hosts = [
- "alpha",
- "omega"
-]
-```
-
-And the corresponding Go types are:
-
-```go
-type tomlConfig struct {
- Title string
- Owner ownerInfo
- DB database `toml:"database"`
- Servers map[string]server
- Clients clients
-}
-
-type ownerInfo struct {
- Name string
- Org string `toml:"organization"`
- Bio string
- DOB time.Time
-}
-
-type database struct {
- Server string
- Ports []int
- ConnMax int `toml:"connection_max"`
- Enabled bool
-}
-
-type server struct {
- IP string
- DC string
-}
-
-type clients struct {
- Data [][]interface{}
- Hosts []string
-}
-```
-
-Note that a case insensitive match will be tried if an exact match can't be
-found.
-
-A working example of the above can be found in `_examples/example.{go,toml}`.
-
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
deleted file mode 100644
index 5a8e332..0000000
--- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
deleted file mode 100644
index 24421eb..0000000
--- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Implements the TOML test suite interface
-
-This is an implementation of the interface expected by
-[toml-test](https://github.com/BurntSushi/toml-test) for my
-[toml parser written in Go](https://github.com/BurntSushi/toml).
-In particular, it maps TOML data on `stdin` to a JSON format on `stdout`.
-
-
-Compatible with TOML version
-[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
-
-Compatible with `toml-test` version
-[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
-
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
deleted file mode 100644
index 14e7557..0000000
--- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Command toml-test-decoder satisfies the toml-test interface for testing
-// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
-package main
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "log"
- "os"
- "path"
- "time"
-
- "github.com/BurntSushi/toml"
-)
-
-func init() {
- log.SetFlags(0)
-
- flag.Usage = usage
- flag.Parse()
-}
-
-func usage() {
- log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
- flag.PrintDefaults()
-
- os.Exit(1)
-}
-
-func main() {
- if flag.NArg() != 0 {
- flag.Usage()
- }
-
- var tmp interface{}
- if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
- log.Fatalf("Error decoding TOML: %s", err)
- }
-
- typedTmp := translate(tmp)
- if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
- log.Fatalf("Error encoding JSON: %s", err)
- }
-}
-
-func translate(tomlData interface{}) interface{} {
- switch orig := tomlData.(type) {
- case map[string]interface{}:
- typed := make(map[string]interface{}, len(orig))
- for k, v := range orig {
- typed[k] = translate(v)
- }
- return typed
- case []map[string]interface{}:
- typed := make([]map[string]interface{}, len(orig))
- for i, v := range orig {
- typed[i] = translate(v).(map[string]interface{})
- }
- return typed
- case []interface{}:
- typed := make([]interface{}, len(orig))
- for i, v := range orig {
- typed[i] = translate(v)
- }
-
- // We don't really need to tag arrays, but let's be future proof.
- // (If TOML ever supports tuples, we'll need this.)
- return tag("array", typed)
- case time.Time:
- return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
- case bool:
- return tag("bool", fmt.Sprintf("%v", orig))
- case int64:
- return tag("integer", fmt.Sprintf("%d", orig))
- case float64:
- return tag("float", fmt.Sprintf("%v", orig))
- case string:
- return tag("string", orig)
- }
-
- panic(fmt.Sprintf("Unknown type: %T", tomlData))
-}
-
-func tag(typeName string, data interface{}) map[string]interface{} {
- return map[string]interface{}{
- "type": typeName,
- "value": data,
- }
-}
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
deleted file mode 100644
index 5a8e332..0000000
--- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
deleted file mode 100644
index 45a603f..0000000
--- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Implements the TOML test suite interface for TOML encoders
-
-This is an implementation of the interface expected by
-[toml-test](https://github.com/BurntSushi/toml-test) for the
-[TOML encoder](https://github.com/BurntSushi/toml).
-In particular, it maps JSON data on `stdin` to a TOML format on `stdout`.
-
-
-Compatible with TOML version
-[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
-
-Compatible with `toml-test` version
-[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
-
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
deleted file mode 100644
index 092cc68..0000000
--- a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Command toml-test-encoder satisfies the toml-test interface for testing
-// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
-package main
-
-import (
- "encoding/json"
- "flag"
- "log"
- "os"
- "path"
- "strconv"
- "time"
-
- "github.com/BurntSushi/toml"
-)
-
-func init() {
- log.SetFlags(0)
-
- flag.Usage = usage
- flag.Parse()
-}
-
-func usage() {
- log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
- flag.PrintDefaults()
-
- os.Exit(1)
-}
-
-func main() {
- if flag.NArg() != 0 {
- flag.Usage()
- }
-
- var tmp interface{}
- if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
- log.Fatalf("Error decoding JSON: %s", err)
- }
-
- tomlData := translate(tmp)
- if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
- log.Fatalf("Error encoding TOML: %s", err)
- }
-}
-
-func translate(typedJson interface{}) interface{} {
- switch v := typedJson.(type) {
- case map[string]interface{}:
- if len(v) == 2 && in("type", v) && in("value", v) {
- return untag(v)
- }
- m := make(map[string]interface{}, len(v))
- for k, v2 := range v {
- m[k] = translate(v2)
- }
- return m
- case []interface{}:
- tabArray := make([]map[string]interface{}, len(v))
- for i := range v {
- if m, ok := translate(v[i]).(map[string]interface{}); ok {
- tabArray[i] = m
- } else {
- log.Fatalf("JSON arrays may only contain objects. This " +
- "corresponds to only tables being allowed in " +
- "TOML table arrays.")
- }
- }
- return tabArray
- }
- log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
- panic("unreachable")
-}
-
-func untag(typed map[string]interface{}) interface{} {
- t := typed["type"].(string)
- v := typed["value"]
- switch t {
- case "string":
- return v.(string)
- case "integer":
- v := v.(string)
- n, err := strconv.Atoi(v)
- if err != nil {
- log.Fatalf("Could not parse '%s' as integer: %s", v, err)
- }
- return n
- case "float":
- v := v.(string)
- f, err := strconv.ParseFloat(v, 64)
- if err != nil {
- log.Fatalf("Could not parse '%s' as float64: %s", v, err)
- }
- return f
- case "datetime":
- v := v.(string)
- t, err := time.Parse("2006-01-02T15:04:05Z", v)
- if err != nil {
- log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
- }
- return t
- case "bool":
- v := v.(string)
- switch v {
- case "true":
- return true
- case "false":
- return false
- }
- log.Fatalf("Could not parse '%s' as a boolean.", v)
- case "array":
- v := v.([]interface{})
- array := make([]interface{}, len(v))
- for i := range v {
- if m, ok := v[i].(map[string]interface{}); ok {
- array[i] = untag(m)
- } else {
- log.Fatalf("Arrays may only contain other arrays or "+
- "primitive values, but found a '%T'.", m)
- }
- }
- return array
- }
- log.Fatalf("Unrecognized tag type '%s'.", t)
- panic("unreachable")
-}
-
-func in(key string, m map[string]interface{}) bool {
- _, ok := m[key]
- return ok
-}
diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
deleted file mode 100644
index 5a8e332..0000000
--- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md b/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md
deleted file mode 100644
index 5df0dc3..0000000
--- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# TOML Validator
-
-If Go is installed, it's simple to try it out:
-
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
-
-You can see the types of every key in a TOML file with:
-
-```bash
-tomlv -types some-toml-file.toml
-```
-
-At the moment, only one error message is reported at a time. Error messages
-include line numbers. No output means that the files given are valid TOML, or
-there is a bug in `tomlv`.
-
-Compatible with TOML version
-[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md)
-
diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go b/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
deleted file mode 100644
index c7d689a..0000000
--- a/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Command tomlv validates TOML documents and prints each key's type.
-package main
-
-import (
- "flag"
- "fmt"
- "log"
- "os"
- "path"
- "strings"
- "text/tabwriter"
-
- "github.com/BurntSushi/toml"
-)
-
-var (
- flagTypes = false
-)
-
-func init() {
- log.SetFlags(0)
-
- flag.BoolVar(&flagTypes, "types", flagTypes,
- "When set, the types of every defined key will be shown.")
-
- flag.Usage = usage
- flag.Parse()
-}
-
-func usage() {
- log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
- path.Base(os.Args[0]))
- flag.PrintDefaults()
-
- os.Exit(1)
-}
-
-func main() {
- if flag.NArg() < 1 {
- flag.Usage()
- }
- for _, f := range flag.Args() {
- var tmp interface{}
- md, err := toml.DecodeFile(f, &tmp)
- if err != nil {
- log.Fatalf("Error in '%s': %s", f, err)
- }
- if flagTypes {
- printTypes(md)
- }
- }
-}
-
-func printTypes(md toml.MetaData) {
- tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
- for _, key := range md.Keys() {
- fmt.Fprintf(tabw, "%s%s\t%s\n",
- strings.Repeat(" ", len(key)-1), key, md.Type(key...))
- }
- tabw.Flush()
-}
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
deleted file mode 100644
index b6d75d0..0000000
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ /dev/null
@@ -1,472 +0,0 @@
-package toml
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "reflect"
- "strings"
- "time"
-)
-
-var e = fmt.Errorf
-
-// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
-//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
-//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
-//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
-type Primitive struct {
- undecoded interface{}
- context Key
-}
-
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
- md := MetaData{decoded: make(map[string]bool)}
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// PrimitiveDecode is just like the other `Decode*` functions, except it
-// decodes a TOML value that has already been parsed. Valid primitive values
-// can *only* be obtained from values filled by the decoder functions,
-// including this method. (i.e., `v` may contain more `Primitive`
-// values.)
-//
-// Meta data for primitive values is included in the meta data returned by
-// the `Decode*` functions with one exception: keys returned by the Undecoded
-// method will only reflect keys that were decoded. Namely, any keys hidden
-// behind a Primitive will be considered undecoded. Executing this method will
-// update the undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
- md.context = primValue.context
- defer func() { md.context = nil }()
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
-//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
-//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
-//
-// TOML datetimes correspond to Go `time.Time` values.
-//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
-//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
-//
-// Key mapping
-//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
-//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
-//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
- p, err := parse(data)
- if err != nil {
- return MetaData{}, err
- }
- md := MetaData{
- p.mapping, p.types, p.ordered,
- make(map[string]bool, len(p.ordered)), nil,
- }
- return md, md.unify(p.mapping, rvalue(v))
-}
-
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadFile(fpath)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadAll(r)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// unify performs a sort of type unification based on the structure of `rv`,
-// which is the client representation.
-//
-// Any type mismatch produces an error. Finding a type that we don't know
-// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
- // Special case. Look for a `Primitive` value.
- if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
- // Save the undecoded data and the key context into the primitive
- // value.
- context := make(Key, len(md.context))
- copy(context, md.context)
- rv.Set(reflect.ValueOf(Primitive{
- undecoded: data,
- context: context,
- }))
- return nil
- }
-
- // Special case. Handle time.Time values specifically.
- // TODO: Remove this code when we decide to drop support for Go 1.1.
- // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
- // interfaces.
- if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
- return md.unifyDatetime(data, rv)
- }
-
- // Special case. Look for a value satisfying the TextUnmarshaler interface.
- if v, ok := rv.Interface().(TextUnmarshaler); ok {
- return md.unifyText(data, v)
- }
- // BUG(burntsushi)
- // The behavior here is incorrect whenever a Go type satisfies the
- // encoding.TextUnmarshaler interface but also corresponds to a TOML
- // hash or array. In particular, the unmarshaler should only be applied
- // to primitive TOML values. But at this point, it will be applied to
- // all kinds of values and produce an incorrect error whenever those values
- // are hashes or arrays (including arrays of tables).
-
- k := rv.Kind()
-
- // laziness
- if k >= reflect.Int && k <= reflect.Uint64 {
- return md.unifyInt(data, rv)
- }
- switch k {
- case reflect.Ptr:
- elem := reflect.New(rv.Type().Elem())
- err := md.unify(data, reflect.Indirect(elem))
- if err != nil {
- return err
- }
- rv.Set(elem)
- return nil
- case reflect.Struct:
- return md.unifyStruct(data, rv)
- case reflect.Map:
- return md.unifyMap(data, rv)
- case reflect.Array:
- return md.unifyArray(data, rv)
- case reflect.Slice:
- return md.unifySlice(data, rv)
- case reflect.String:
- return md.unifyString(data, rv)
- case reflect.Bool:
- return md.unifyBool(data, rv)
- case reflect.Interface:
- // we only support empty interfaces.
- if rv.NumMethod() > 0 {
- return e("Unsupported type '%s'.", rv.Kind())
- }
- return md.unifyAnything(data, rv)
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- return md.unifyFloat64(data, rv)
- }
- return e("Unsupported type '%s'.", rv.Kind())
-}
-
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- return mismatch(rv, "map", mapping)
- }
-
- for key, datum := range tmap {
- var f *field
- fields := cachedTypeFields(rv.Type())
- for i := range fields {
- ff := &fields[i]
- if ff.name == key {
- f = ff
- break
- }
- if f == nil && strings.EqualFold(ff.name, key) {
- f = ff
- }
- }
- if f != nil {
- subv := rv
- for _, i := range f.index {
- subv = indirect(subv.Field(i))
- }
- if isUnifiable(subv) {
- md.decoded[md.context.add(key).String()] = true
- md.context = append(md.context, key)
- if err := md.unify(datum, subv); err != nil {
- return e("Type mismatch for '%s.%s': %s",
- rv.Type().String(), f.name, err)
- }
- md.context = md.context[0 : len(md.context)-1]
- } else if f.name != "" {
- // Bad user! No soup for you!
- return e("Field '%s.%s' is unexported, and therefore cannot "+
- "be loaded with reflection.", rv.Type().String(), f.name)
- }
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- return badtype("map", mapping)
- }
- if rv.IsNil() {
- rv.Set(reflect.MakeMap(rv.Type()))
- }
- for k, v := range tmap {
- md.decoded[md.context.add(k).String()] = true
- md.context = append(md.context, k)
-
- rvkey := indirect(reflect.New(rv.Type().Key()))
- rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
- if err := md.unify(v, rvval); err != nil {
- return err
- }
- md.context = md.context[0 : len(md.context)-1]
-
- rvkey.SetString(k)
- rv.SetMapIndex(rvkey, rvval)
- }
- return nil
-}
-
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- return badtype("slice", data)
- }
- sliceLen := datav.Len()
- if sliceLen != rv.Len() {
- return e("expected array length %d; got TOML array of length %d",
- rv.Len(), sliceLen)
- }
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- return badtype("slice", data)
- }
- sliceLen := datav.Len()
- if rv.IsNil() {
- rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen))
- }
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
- sliceLen := data.Len()
- for i := 0; i < sliceLen; i++ {
- v := data.Index(i).Interface()
- sliceval := indirect(rv.Index(i))
- if err := md.unify(v, sliceval); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
- if _, ok := data.(time.Time); ok {
- rv.Set(reflect.ValueOf(data))
- return nil
- }
- return badtype("time.Time", data)
-}
-
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
- if s, ok := data.(string); ok {
- rv.SetString(s)
- return nil
- }
- return badtype("string", data)
-}
-
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
- if num, ok := data.(float64); ok {
- switch rv.Kind() {
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- rv.SetFloat(num)
- default:
- panic("bug")
- }
- return nil
- }
- return badtype("float", data)
-}
-
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
- if num, ok := data.(int64); ok {
- if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
- switch rv.Kind() {
- case reflect.Int, reflect.Int64:
- // No bounds checking necessary.
- case reflect.Int8:
- if num < math.MinInt8 || num > math.MaxInt8 {
- return e("Value '%d' is out of range for int8.", num)
- }
- case reflect.Int16:
- if num < math.MinInt16 || num > math.MaxInt16 {
- return e("Value '%d' is out of range for int16.", num)
- }
- case reflect.Int32:
- if num < math.MinInt32 || num > math.MaxInt32 {
- return e("Value '%d' is out of range for int32.", num)
- }
- }
- rv.SetInt(num)
- } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
- unum := uint64(num)
- switch rv.Kind() {
- case reflect.Uint, reflect.Uint64:
- // No bounds checking necessary.
- case reflect.Uint8:
- if num < 0 || unum > math.MaxUint8 {
- return e("Value '%d' is out of range for uint8.", num)
- }
- case reflect.Uint16:
- if num < 0 || unum > math.MaxUint16 {
- return e("Value '%d' is out of range for uint16.", num)
- }
- case reflect.Uint32:
- if num < 0 || unum > math.MaxUint32 {
- return e("Value '%d' is out of range for uint32.", num)
- }
- }
- rv.SetUint(unum)
- } else {
- panic("unreachable")
- }
- return nil
- }
- return badtype("integer", data)
-}
-
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
- if b, ok := data.(bool); ok {
- rv.SetBool(b)
- return nil
- }
- return badtype("boolean", data)
-}
-
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
- rv.Set(reflect.ValueOf(data))
- return nil
-}
-
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
- var s string
- switch sdata := data.(type) {
- case TextMarshaler:
- text, err := sdata.MarshalText()
- if err != nil {
- return err
- }
- s = string(text)
- case fmt.Stringer:
- s = sdata.String()
- case string:
- s = sdata
- case bool:
- s = fmt.Sprintf("%v", sdata)
- case int64:
- s = fmt.Sprintf("%d", sdata)
- case float64:
- s = fmt.Sprintf("%f", sdata)
- default:
- return badtype("primitive (string-like)", data)
- }
- if err := v.UnmarshalText([]byte(s)); err != nil {
- return err
- }
- return nil
-}
-
-// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
- return indirect(reflect.ValueOf(v))
-}
-
-// indirect returns the value pointed to by a pointer.
-// Pointers are followed until the value is not a pointer.
-// New values are allocated for each nil pointer.
-//
-// An exception to this rule is if the value satisfies an interface of
-// interest to us (like encoding.TextUnmarshaler).
-func indirect(v reflect.Value) reflect.Value {
- if v.Kind() != reflect.Ptr {
- if v.CanAddr() {
- pv := v.Addr()
- if _, ok := pv.Interface().(TextUnmarshaler); ok {
- return pv
- }
- }
- return v
- }
- if v.IsNil() {
- v.Set(reflect.New(v.Type().Elem()))
- }
- return indirect(reflect.Indirect(v))
-}
-
-func isUnifiable(rv reflect.Value) bool {
- if rv.CanSet() {
- return true
- }
- if _, ok := rv.Interface().(TextUnmarshaler); ok {
- return true
- }
- return false
-}
-
-func badtype(expected string, data interface{}) error {
- return e("Expected %s but found '%T'.", expected, data)
-}
-
-func mismatch(user reflect.Value, expected string, data interface{}) error {
- return e("Type mismatch for %s. Expected %s but found '%T'.",
- user.Type().String(), expected, data)
-}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
deleted file mode 100644
index c811445..0000000
--- a/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package toml
-
-import "strings"
-
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
-type MetaData struct {
- mapping map[string]interface{}
- types map[string]tomlType
- keys []Key
- decoded map[string]bool
- context Key // Used only during decoding.
-}
-
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
-//
-// // access the TOML key 'a.b.c'
-// IsDefined("a", "b", "c")
-//
-// IsDefined will return false if an empty key given. Keys are case sensitive.
-func (md *MetaData) IsDefined(key ...string) bool {
- if len(key) == 0 {
- return false
- }
-
- var hash map[string]interface{}
- var ok bool
- var hashOrVal interface{} = md.mapping
- for _, k := range key {
- if hash, ok = hashOrVal.(map[string]interface{}); !ok {
- return false
- }
- if hashOrVal, ok = hash[k]; !ok {
- return false
- }
- }
- return true
-}
-
-// Type returns a string representation of the type of the key specified.
-//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
-func (md *MetaData) Type(key ...string) string {
- fullkey := strings.Join(key, ".")
- if typ, ok := md.types[fullkey]; ok {
- return typ.typeString()
- }
- return ""
-}
-
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
-type Key []string
-
-func (k Key) String() string {
- return strings.Join(k, ".")
-}
-
-func (k Key) add(piece string) Key {
- newKey := make(Key, len(k)+1)
- copy(newKey, k)
- newKey[len(k)] = piece
- return newKey
-}
-
-// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
-//
-// The list will have the same order as the keys appeared in the TOML data.
-//
-// All keys returned are non-empty.
-func (md *MetaData) Keys() []Key {
- return md.keys
-}
-
-// Undecoded returns all keys that have not been decoded in the order in which
-// they appear in the original TOML document.
-//
-// This includes keys that haven't been decoded because of a Primitive value.
-// Once the Primitive value is decoded, the keys will be considered decoded.
-//
-// Also note that decoding into an empty interface will result in no decoding,
-// and so no keys will be considered decoded.
-//
-// In this sense, the Undecoded keys correspond to keys in the TOML document
-// that do not have a concrete type in your representation.
-func (md *MetaData) Undecoded() []Key {
- undecoded := make([]Key, 0, len(md.keys))
- for _, key := range md.keys {
- if !md.decoded[key.String()] {
- undecoded = append(undecoded, key)
- }
- }
- return undecoded
-}
diff --git a/vendor/github.com/BurntSushi/toml/decode_test.go b/vendor/github.com/BurntSushi/toml/decode_test.go
deleted file mode 100644
index b940333..0000000
--- a/vendor/github.com/BurntSushi/toml/decode_test.go
+++ /dev/null
@@ -1,540 +0,0 @@
-package toml
-
-import (
- "fmt"
- "log"
- "reflect"
- "testing"
- "time"
-)
-
-func init() {
- log.SetFlags(0)
-}
-
-func TestDecodeSimple(t *testing.T) {
- var testSimple = `
-age = 250
-andrew = "gallant"
-kait = "brady"
-now = 1987-07-05T05:45:00Z
-yesOrNo = true
-pi = 3.14
-colors = [
- ["red", "green", "blue"],
- ["cyan", "magenta", "yellow", "black"],
-]
-
-[My.Cats]
-plato = "cat 1"
-cauchy = "cat 2"
-`
-
- type cats struct {
- Plato string
- Cauchy string
- }
- type simple struct {
- Age int
- Colors [][]string
- Pi float64
- YesOrNo bool
- Now time.Time
- Andrew string
- Kait string
- My map[string]cats
- }
-
- var val simple
- _, err := Decode(testSimple, &val)
- if err != nil {
- t.Fatal(err)
- }
-
- now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00")
- if err != nil {
- panic(err)
- }
- var answer = simple{
- Age: 250,
- Andrew: "gallant",
- Kait: "brady",
- Now: now,
- YesOrNo: true,
- Pi: 3.14,
- Colors: [][]string{
- {"red", "green", "blue"},
- {"cyan", "magenta", "yellow", "black"},
- },
- My: map[string]cats{
- "Cats": cats{Plato: "cat 1", Cauchy: "cat 2"},
- },
- }
- if !reflect.DeepEqual(val, answer) {
- t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n",
- answer, val)
- }
-}
-
-func TestDecodeEmbedded(t *testing.T) {
- type Dog struct{ Name string }
- type Age int
-
- tests := map[string]struct {
- input string
- decodeInto interface{}
- wantDecoded interface{}
- }{
- "embedded struct": {
- input: `Name = "milton"`,
- decodeInto: &struct{ Dog }{},
- wantDecoded: &struct{ Dog }{Dog{"milton"}},
- },
- "embedded non-nil pointer to struct": {
- input: `Name = "milton"`,
- decodeInto: &struct{ *Dog }{},
- wantDecoded: &struct{ *Dog }{&Dog{"milton"}},
- },
- "embedded nil pointer to struct": {
- input: ``,
- decodeInto: &struct{ *Dog }{},
- wantDecoded: &struct{ *Dog }{nil},
- },
- "embedded int": {
- input: `Age = -5`,
- decodeInto: &struct{ Age }{},
- wantDecoded: &struct{ Age }{-5},
- },
- }
-
- for label, test := range tests {
- _, err := Decode(test.input, test.decodeInto)
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) {
- t.Errorf("%s: want decoded == %+v, got %+v",
- label, test.wantDecoded, test.decodeInto)
- }
- }
-}
-
-func TestTableArrays(t *testing.T) {
- var tomlTableArrays = `
-[[albums]]
-name = "Born to Run"
-
- [[albums.songs]]
- name = "Jungleland"
-
- [[albums.songs]]
- name = "Meeting Across the River"
-
-[[albums]]
-name = "Born in the USA"
-
- [[albums.songs]]
- name = "Glory Days"
-
- [[albums.songs]]
- name = "Dancing in the Dark"
-`
-
- type Song struct {
- Name string
- }
-
- type Album struct {
- Name string
- Songs []Song
- }
-
- type Music struct {
- Albums []Album
- }
-
- expected := Music{[]Album{
- {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}},
- {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}},
- }}
- var got Music
- if _, err := Decode(tomlTableArrays, &got); err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, got) {
- t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
- }
-}
-
-// Case insensitive matching tests.
-// A bit more comprehensive than needed given the current implementation,
-// but implementations change.
-// Probably still missing demonstrations of some ugly corner cases regarding
-// case insensitive matching and multiple fields.
-func TestCase(t *testing.T) {
- var caseToml = `
-tOpString = "string"
-tOpInt = 1
-tOpFloat = 1.1
-tOpBool = true
-tOpdate = 2006-01-02T15:04:05Z
-tOparray = [ "array" ]
-Match = "i should be in Match only"
-MatcH = "i should be in MatcH only"
-once = "just once"
-[nEst.eD]
-nEstedString = "another string"
-`
-
- type InsensitiveEd struct {
- NestedString string
- }
-
- type InsensitiveNest struct {
- Ed InsensitiveEd
- }
-
- type Insensitive struct {
- TopString string
- TopInt int
- TopFloat float64
- TopBool bool
- TopDate time.Time
- TopArray []string
- Match string
- MatcH string
- Once string
- OncE string
- Nest InsensitiveNest
- }
-
- tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5])
- if err != nil {
- panic(err)
- }
- expected := Insensitive{
- TopString: "string",
- TopInt: 1,
- TopFloat: 1.1,
- TopBool: true,
- TopDate: tme,
- TopArray: []string{"array"},
- MatcH: "i should be in MatcH only",
- Match: "i should be in Match only",
- Once: "just once",
- OncE: "",
- Nest: InsensitiveNest{
- Ed: InsensitiveEd{NestedString: "another string"},
- },
- }
- var got Insensitive
- if _, err := Decode(caseToml, &got); err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, got) {
- t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
- }
-}
-
-func TestPointers(t *testing.T) {
- type Object struct {
- Type string
- Description string
- }
-
- type Dict struct {
- NamedObject map[string]*Object
- BaseObject *Object
- Strptr *string
- Strptrs []*string
- }
- s1, s2, s3 := "blah", "abc", "def"
- expected := &Dict{
- Strptr: &s1,
- Strptrs: []*string{&s2, &s3},
- NamedObject: map[string]*Object{
- "foo": {"FOO", "fooooo!!!"},
- "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"},
- },
- BaseObject: &Object{"BASE", "da base"},
- }
-
- ex1 := `
-Strptr = "blah"
-Strptrs = ["abc", "def"]
-
-[NamedObject.foo]
-Type = "FOO"
-Description = "fooooo!!!"
-
-[NamedObject.bar]
-Type = "BAR"
-Description = "ba-ba-ba-ba-barrrr!!!"
-
-[BaseObject]
-Type = "BASE"
-Description = "da base"
-`
- dict := new(Dict)
- _, err := Decode(ex1, dict)
- if err != nil {
- t.Errorf("Decode error: %v", err)
- }
- if !reflect.DeepEqual(expected, dict) {
- t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict)
- }
-}
-
-type sphere struct {
- Center [3]float64
- Radius float64
-}
-
-func TestDecodeSimpleArray(t *testing.T) {
- var s1 sphere
- if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestDecodeArrayWrongSize(t *testing.T) {
- var s1 sphere
- if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil {
- t.Fatal("Expected array type mismatch error")
- }
-}
-
-func TestDecodeLargeIntoSmallInt(t *testing.T) {
- type table struct {
- Value int8
- }
- var tab table
- if _, err := Decode(`value = 500`, &tab); err == nil {
- t.Fatal("Expected integer out-of-bounds error.")
- }
-}
-
-func TestDecodeSizedInts(t *testing.T) {
- type table struct {
- U8 uint8
- U16 uint16
- U32 uint32
- U64 uint64
- U uint
- I8 int8
- I16 int16
- I32 int32
- I64 int64
- I int
- }
- answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}
- toml := `
- u8 = 1
- u16 = 1
- u32 = 1
- u64 = 1
- u = 1
- i8 = -1
- i16 = -1
- i32 = -1
- i64 = -1
- i = -1
- `
- var tab table
- if _, err := Decode(toml, &tab); err != nil {
- t.Fatal(err.Error())
- }
- if answer != tab {
- t.Fatalf("Expected %#v but got %#v", answer, tab)
- }
-}
-
-func ExampleMetaData_PrimitiveDecode() {
- var md MetaData
- var err error
-
- var tomlBlob = `
-ranking = ["Springsteen", "J Geils"]
-
-[bands.Springsteen]
-started = 1973
-albums = ["Greetings", "WIESS", "Born to Run", "Darkness"]
-
-[bands.J Geils]
-started = 1970
-albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"]
-`
-
- type band struct {
- Started int
- Albums []string
- }
- type classics struct {
- Ranking []string
- Bands map[string]Primitive
- }
-
- // Do the initial decode. Reflection is delayed on Primitive values.
- var music classics
- if md, err = Decode(tomlBlob, &music); err != nil {
- log.Fatal(err)
- }
-
- // MetaData still includes information on Primitive values.
- fmt.Printf("Is `bands.Springsteen` defined? %v\n",
- md.IsDefined("bands", "Springsteen"))
-
- // Decode primitive data into Go values.
- for _, artist := range music.Ranking {
- // A band is a primitive value, so we need to decode it to get a
- // real `band` value.
- primValue := music.Bands[artist]
-
- var aBand band
- if err = md.PrimitiveDecode(primValue, &aBand); err != nil {
- log.Fatal(err)
- }
- fmt.Printf("%s started in %d.\n", artist, aBand.Started)
- }
- // Check to see if there were any fields left undecoded.
- // Note that this won't be empty before decoding the Primitive value!
- fmt.Printf("Undecoded: %q\n", md.Undecoded())
-
- // Output:
- // Is `bands.Springsteen` defined? true
- // Springsteen started in 1973.
- // J Geils started in 1970.
- // Undecoded: []
-}
-
-func ExampleDecode() {
- var tomlBlob = `
-# Some comments.
-[alpha]
-ip = "10.0.0.1"
-
- [alpha.config]
- Ports = [ 8001, 8002 ]
- Location = "Toronto"
- Created = 1987-07-05T05:45:00Z
-
-[beta]
-ip = "10.0.0.2"
-
- [beta.config]
- Ports = [ 9001, 9002 ]
- Location = "New Jersey"
- Created = 1887-01-05T05:55:00Z
-`
-
- type serverConfig struct {
- Ports []int
- Location string
- Created time.Time
- }
-
- type server struct {
- IP string `toml:"ip"`
- Config serverConfig `toml:"config"`
- }
-
- type servers map[string]server
-
- var config servers
- if _, err := Decode(tomlBlob, &config); err != nil {
- log.Fatal(err)
- }
-
- for _, name := range []string{"alpha", "beta"} {
- s := config[name]
- fmt.Printf("Server: %s (ip: %s) in %s created on %s\n",
- name, s.IP, s.Config.Location,
- s.Config.Created.Format("2006-01-02"))
- fmt.Printf("Ports: %v\n", s.Config.Ports)
- }
-
- // Output:
- // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05
- // Ports: [8001 8002]
- // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05
- // Ports: [9001 9002]
-}
-
-type duration struct {
- time.Duration
-}
-
-func (d *duration) UnmarshalText(text []byte) error {
- var err error
- d.Duration, err = time.ParseDuration(string(text))
- return err
-}
-
-// Example Unmarshaler shows how to decode TOML strings into your own
-// custom data type.
-func Example_unmarshaler() {
- blob := `
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-`
- type song struct {
- Name string
- Duration duration
- }
- type songs struct {
- Song []song
- }
- var favorites songs
- if _, err := Decode(blob, &favorites); err != nil {
- log.Fatal(err)
- }
-
- // Code to implement the TextUnmarshaler interface for `duration`:
- //
- // type duration struct {
- // time.Duration
- // }
- //
- // func (d *duration) UnmarshalText(text []byte) error {
- // var err error
- // d.Duration, err = time.ParseDuration(string(text))
- // return err
- // }
-
- for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
- }
- // Output:
- // Thunder Road (4m49s)
- // Stairway to Heaven (8m3s)
-}
-
-// Example StrictDecoding shows how to detect whether there are keys in the
-// TOML document that weren't decoded into the value given. This is useful
-// for returning an error to the user if they've included extraneous fields
-// in their configuration.
-func Example_strictDecoding() {
- var blob = `
-key1 = "value1"
-key2 = "value2"
-key3 = "value3"
-`
- type config struct {
- Key1 string
- Key3 string
- }
-
- var conf config
- md, err := Decode(blob, &conf)
- if err != nil {
- log.Fatal(err)
- }
- fmt.Printf("Undecoded keys: %q\n", md.Undecoded())
- // Output:
- // Undecoded keys: ["key2"]
-}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
deleted file mode 100644
index fe26800..0000000
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
-
-The specification implemented: https://github.com/mojombo/toml
-
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
-
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
-*/
-package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
deleted file mode 100644
index 3618713..0000000
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ /dev/null
@@ -1,515 +0,0 @@
-package toml
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type tomlEncodeError struct{ error }
-
-var (
- errArrayMixedElementTypes = errors.New(
- "can't encode array with mixed element types")
- errArrayNilElement = errors.New(
- "can't encode array with nil element")
- errNonString = errors.New(
- "can't encode a map with non-string key type")
- errAnonNonStruct = errors.New(
- "can't encode an anonymous field that is not a struct")
- errArrayNoTable = errors.New(
- "TOML array element can't contain a table")
- errNoKey = errors.New(
- "top-level values must be a Go map or struct")
- errAnything = errors.New("") // used in testing
-)
-
-var quotedReplacer = strings.NewReplacer(
- "\t", "\\t",
- "\n", "\\n",
- "\r", "\\r",
- "\"", "\\\"",
- "\\", "\\\\",
-)
-
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
-//
-// The indentation level can be controlled with the Indent field.
-type Encoder struct {
- // A single indentation level. By default it is two spaces.
- Indent string
-
- // hasWritten is whether we have written any output to w yet.
- hasWritten bool
- w *bufio.Writer
-}
-
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: bufio.NewWriter(w),
- Indent: " ",
- }
-}
-
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
-//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
-func (enc *Encoder) Encode(v interface{}) error {
- rv := eindirect(reflect.ValueOf(v))
- if err := enc.safeEncode(Key([]string{}), rv); err != nil {
- return err
- }
- return enc.w.Flush()
-}
-
-func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
- defer func() {
- if r := recover(); r != nil {
- if terr, ok := r.(tomlEncodeError); ok {
- err = terr.error
- return
- }
- panic(r)
- }
- }()
- enc.encode(key, rv)
- return nil
-}
-
-func (enc *Encoder) encode(key Key, rv reflect.Value) {
- // Special case. Time needs to be in ISO8601 format.
- // Special case. If we can marshal the type to text, then we used that.
- // Basically, this prevents the encoder for handling these types as
- // generic structs (or whatever the underlying type of a TextMarshaler is).
- switch rv.Interface().(type) {
- case time.Time, TextMarshaler:
- enc.keyEqElement(key, rv)
- return
- }
-
- k := rv.Kind()
- switch k {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64,
- reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
- enc.keyEqElement(key, rv)
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
- enc.eArrayOfTables(key, rv)
- } else {
- enc.keyEqElement(key, rv)
- }
- case reflect.Interface:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Map:
- if rv.IsNil() {
- return
- }
- enc.eTable(key, rv)
- case reflect.Ptr:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Struct:
- enc.eTable(key, rv)
- default:
- panic(e("Unsupported type for key '%s': %s", key, k))
- }
-}
-
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
-func (enc *Encoder) eElement(rv reflect.Value) {
- switch v := rv.Interface().(type) {
- case time.Time:
- // Special case time.Time as a primitive. Has to come before
- // TextMarshaler below because time.Time implements
- // encoding.TextMarshaler, but we need to always use UTC.
- enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
- return
- case TextMarshaler:
- // Special case. Use text marshaler if it's available for this value.
- if s, err := v.MarshalText(); err != nil {
- encPanic(err)
- } else {
- enc.writeQuoted(string(s))
- }
- return
- }
- switch rv.Kind() {
- case reflect.Bool:
- enc.wf(strconv.FormatBool(rv.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- enc.wf(strconv.FormatInt(rv.Int(), 10))
- case reflect.Uint, reflect.Uint8, reflect.Uint16,
- reflect.Uint32, reflect.Uint64:
- enc.wf(strconv.FormatUint(rv.Uint(), 10))
- case reflect.Float32:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
- case reflect.Float64:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
- case reflect.Array, reflect.Slice:
- enc.eArrayOrSliceElement(rv)
- case reflect.Interface:
- enc.eElement(rv.Elem())
- case reflect.String:
- enc.writeQuoted(rv.String())
- default:
- panic(e("Unexpected primitive type: %s", rv.Kind()))
- }
-}
-
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
-func floatAddDecimal(fstr string) string {
- if !strings.Contains(fstr, ".") {
- return fstr + ".0"
- }
- return fstr
-}
-
-func (enc *Encoder) writeQuoted(s string) {
- enc.wf("\"%s\"", quotedReplacer.Replace(s))
-}
-
-func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
- length := rv.Len()
- enc.wf("[")
- for i := 0; i < length; i++ {
- elem := rv.Index(i)
- enc.eElement(elem)
- if i != length-1 {
- enc.wf(", ")
- }
- }
- enc.wf("]")
-}
-
-func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- panicIfInvalidKey(key, true)
- for i := 0; i < rv.Len(); i++ {
- trv := rv.Index(i)
- if isNil(trv) {
- continue
- }
- enc.newline()
- enc.wf("%s[[%s]]", enc.indentStr(key), key.String())
- enc.newline()
- enc.eMapOrStruct(key, trv)
- }
-}
-
-func (enc *Encoder) eTable(key Key, rv reflect.Value) {
- if len(key) == 1 {
- // Output an extra new line between top-level tables.
- // (The newline isn't written if nothing else has been written though.)
- enc.newline()
- }
- if len(key) > 0 {
- panicIfInvalidKey(key, true)
- enc.wf("%s[%s]", enc.indentStr(key), key.String())
- enc.newline()
- }
- enc.eMapOrStruct(key, rv)
-}
-
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
- switch rv := eindirect(rv); rv.Kind() {
- case reflect.Map:
- enc.eMap(key, rv)
- case reflect.Struct:
- enc.eStruct(key, rv)
- default:
- panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
- }
-}
-
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
- rt := rv.Type()
- if rt.Key().Kind() != reflect.String {
- encPanic(errNonString)
- }
-
- // Sort keys so that we have deterministic output. And write keys directly
- // underneath this key first, before writing sub-structs or sub-maps.
- var mapKeysDirect, mapKeysSub []string
- for _, mapKey := range rv.MapKeys() {
- k := mapKey.String()
- if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
- mapKeysSub = append(mapKeysSub, k)
- } else {
- mapKeysDirect = append(mapKeysDirect, k)
- }
- }
-
- var writeMapKeys = func(mapKeys []string) {
- sort.Strings(mapKeys)
- for _, mapKey := range mapKeys {
- mrv := rv.MapIndex(reflect.ValueOf(mapKey))
- if isNil(mrv) {
- // Don't write anything for nil fields.
- continue
- }
- enc.encode(key.add(mapKey), mrv)
- }
- }
- writeMapKeys(mapKeysDirect)
- writeMapKeys(mapKeysSub)
-}
-
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
- // Write keys for fields directly under this key first, because if we write
- // a field that creates a new table, then all keys under it will be in that
- // table (not the one we're writing here).
- rt := rv.Type()
- var fieldsDirect, fieldsSub [][]int
- var addFields func(rt reflect.Type, rv reflect.Value, start []int)
- addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
- for i := 0; i < rt.NumField(); i++ {
- f := rt.Field(i)
- // skip unexporded fields
- if f.PkgPath != "" {
- continue
- }
- frv := rv.Field(i)
- if f.Anonymous {
- frv := eindirect(frv)
- t := frv.Type()
- if t.Kind() != reflect.Struct {
- encPanic(errAnonNonStruct)
- }
- addFields(t, frv, f.Index)
- } else if typeIsHash(tomlTypeOfGo(frv)) {
- fieldsSub = append(fieldsSub, append(start, f.Index...))
- } else {
- fieldsDirect = append(fieldsDirect, append(start, f.Index...))
- }
- }
- }
- addFields(rt, rv, nil)
-
- var writeFields = func(fields [][]int) {
- for _, fieldIndex := range fields {
- sft := rt.FieldByIndex(fieldIndex)
- sf := rv.FieldByIndex(fieldIndex)
- if isNil(sf) {
- // Don't write anything for nil fields.
- continue
- }
-
- keyName := sft.Tag.Get("toml")
- if keyName == "-" {
- continue
- }
- if keyName == "" {
- keyName = sft.Name
- }
- enc.encode(key.add(keyName), sf)
- }
- }
- writeFields(fieldsDirect)
- writeFields(fieldsSub)
-}
-
-// tomlTypeName returns the TOML type name of the Go value's type. It is used to
-// determine whether the types of array elements are mixed (which is forbidden).
-// If the Go value is nil, then it is illegal for it to be an array element, and
-// valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
-func tomlTypeOfGo(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() {
- return nil
- }
- switch rv.Kind() {
- case reflect.Bool:
- return tomlBool
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64:
- return tomlInteger
- case reflect.Float32, reflect.Float64:
- return tomlFloat
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlHash, tomlArrayType(rv)) {
- return tomlArrayHash
- } else {
- return tomlArray
- }
- case reflect.Ptr, reflect.Interface:
- return tomlTypeOfGo(rv.Elem())
- case reflect.String:
- return tomlString
- case reflect.Map:
- return tomlHash
- case reflect.Struct:
- switch rv.Interface().(type) {
- case time.Time:
- return tomlDatetime
- case TextMarshaler:
- return tomlString
- default:
- return tomlHash
- }
- default:
- panic("unexpected reflect.Kind: " + rv.Kind().String())
- }
-}
-
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
- return nil
- }
- firstType := tomlTypeOfGo(rv.Index(0))
- if firstType == nil {
- encPanic(errArrayNilElement)
- }
-
- rvlen := rv.Len()
- for i := 1; i < rvlen; i++ {
- elem := rv.Index(i)
- switch elemType := tomlTypeOfGo(elem); {
- case elemType == nil:
- encPanic(errArrayNilElement)
- case !typeEqual(firstType, elemType):
- encPanic(errArrayMixedElementTypes)
- }
- }
- // If we have a nested array, then we must make sure that the nested
- // array contains ONLY primitives.
- // This checks arbitrarily nested arrays.
- if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
- nest := tomlArrayType(eindirect(rv.Index(0)))
- if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
- encPanic(errArrayNoTable)
- }
- }
- return firstType
-}
-
-func (enc *Encoder) newline() {
- if enc.hasWritten {
- enc.wf("\n")
- }
-}
-
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- panicIfInvalidKey(key, false)
- enc.wf("%s%s = ", enc.indentStr(key), key[len(key)-1])
- enc.eElement(val)
- enc.newline()
-}
-
-func (enc *Encoder) wf(format string, v ...interface{}) {
- if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
- encPanic(err)
- }
- enc.hasWritten = true
-}
-
-func (enc *Encoder) indentStr(key Key) string {
- return strings.Repeat(enc.Indent, len(key)-1)
-}
-
-func encPanic(err error) {
- panic(tomlEncodeError{err})
-}
-
-func eindirect(v reflect.Value) reflect.Value {
- switch v.Kind() {
- case reflect.Ptr, reflect.Interface:
- return eindirect(v.Elem())
- default:
- return v
- }
-}
-
-func isNil(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return rv.IsNil()
- default:
- return false
- }
-}
-
-func panicIfInvalidKey(key Key, hash bool) {
- if hash {
- for _, k := range key {
- if !isValidTableName(k) {
- encPanic(e("Key '%s' is not a valid table name. Table names "+
- "cannot contain '[', ']' or '.'.", key.String()))
- }
- }
- } else {
- if !isValidKeyName(key[len(key)-1]) {
- encPanic(e("Key '%s' is not a name. Key names "+
- "cannot contain whitespace.", key.String()))
- }
- }
-}
-
-func isValidTableName(s string) bool {
- if len(s) == 0 {
- return false
- }
- for _, r := range s {
- if r == '[' || r == ']' || r == '.' {
- return false
- }
- }
- return true
-}
-
-func isValidKeyName(s string) bool {
- if len(s) == 0 {
- return false
- }
- return true
-}
diff --git a/vendor/github.com/BurntSushi/toml/encode_test.go b/vendor/github.com/BurntSushi/toml/encode_test.go
deleted file mode 100644
index 74a5ee5..0000000
--- a/vendor/github.com/BurntSushi/toml/encode_test.go
+++ /dev/null
@@ -1,506 +0,0 @@
-package toml
-
-import (
- "bytes"
- "fmt"
- "log"
- "net"
- "testing"
- "time"
-)
-
-func TestEncodeRoundTrip(t *testing.T) {
- type Config struct {
- Age int
- Cats []string
- Pi float64
- Perfection []int
- DOB time.Time
- Ipaddress net.IP
- }
-
- var inputs = Config{
- 13,
- []string{"one", "two", "three"},
- 3.145,
- []int{11, 2, 3, 4},
- time.Now(),
- net.ParseIP("192.168.59.254"),
- }
-
- var firstBuffer bytes.Buffer
- e := NewEncoder(&firstBuffer)
- err := e.Encode(inputs)
- if err != nil {
- t.Fatal(err)
- }
- var outputs Config
- if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
- log.Printf("Could not decode:\n-----\n%s\n-----\n",
- firstBuffer.String())
- t.Fatal(err)
- }
-
- // could test each value individually, but I'm lazy
- var secondBuffer bytes.Buffer
- e2 := NewEncoder(&secondBuffer)
- err = e2.Encode(outputs)
- if err != nil {
- t.Fatal(err)
- }
- if firstBuffer.String() != secondBuffer.String() {
- t.Error(
- firstBuffer.String(),
- "\n\n is not identical to\n\n",
- secondBuffer.String())
- }
-}
-
-// XXX(burntsushi)
-// I think these tests probably should be removed. They are good, but they
-// ought to be obsolete by toml-test.
-func TestEncode(t *testing.T) {
- type Embedded struct {
- Int int `toml:"_int"`
- }
- type NonStruct int
-
- date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
- dateStr := "2014-05-11T19:30:40Z"
-
- tests := map[string]struct {
- input interface{}
- wantOutput string
- wantError error
- }{
- "bool field": {
- input: struct {
- BoolTrue bool
- BoolFalse bool
- }{true, false},
- wantOutput: "BoolTrue = true\nBoolFalse = false\n",
- },
- "int fields": {
- input: struct {
- Int int
- Int8 int8
- Int16 int16
- Int32 int32
- Int64 int64
- }{1, 2, 3, 4, 5},
- wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
- },
- "uint fields": {
- input: struct {
- Uint uint
- Uint8 uint8
- Uint16 uint16
- Uint32 uint32
- Uint64 uint64
- }{1, 2, 3, 4, 5},
- wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
- "\nUint64 = 5\n",
- },
- "float fields": {
- input: struct {
- Float32 float32
- Float64 float64
- }{1.5, 2.5},
- wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
- },
- "string field": {
- input: struct{ String string }{"foo"},
- wantOutput: "String = \"foo\"\n",
- },
- "string field and unexported field": {
- input: struct {
- String string
- unexported int
- }{"foo", 0},
- wantOutput: "String = \"foo\"\n",
- },
- "datetime field in UTC": {
- input: struct{ Date time.Time }{date},
- wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
- },
- "datetime field as primitive": {
- // Using a map here to fail if isStructOrMap() returns true for
- // time.Time.
- input: map[string]interface{}{
- "Date": date,
- "Int": 1,
- },
- wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
- },
- "array fields": {
- input: struct {
- IntArray0 [0]int
- IntArray3 [3]int
- }{[0]int{}, [3]int{1, 2, 3}},
- wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
- },
- "slice fields": {
- input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
- nil, []int{}, []int{1, 2, 3},
- },
- wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
- },
- "datetime slices": {
- input: struct{ DatetimeSlice []time.Time }{
- []time.Time{date, date},
- },
- wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
- dateStr, dateStr),
- },
- "nested arrays and slices": {
- input: struct {
- SliceOfArrays [][2]int
- ArrayOfSlices [2][]int
- SliceOfArraysOfSlices [][2][]int
- ArrayOfSlicesOfArrays [2][][2]int
- SliceOfMixedArrays [][2]interface{}
- ArrayOfMixedSlices [2][]interface{}
- }{
- [][2]int{{1, 2}, {3, 4}},
- [2][]int{{1, 2}, {3, 4}},
- [][2][]int{
- {
- {1, 2}, {3, 4},
- },
- {
- {5, 6}, {7, 8},
- },
- },
- [2][][2]int{
- {
- {1, 2}, {3, 4},
- },
- {
- {5, 6}, {7, 8},
- },
- },
- [][2]interface{}{
- {1, 2}, {"a", "b"},
- },
- [2][]interface{}{
- {1, 2}, {"a", "b"},
- },
- },
- wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
-ArrayOfSlices = [[1, 2], [3, 4]]
-SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
-ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
-SliceOfMixedArrays = [[1, 2], ["a", "b"]]
-ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
-`,
- },
- "empty slice": {
- input: struct{ Empty []interface{} }{[]interface{}{}},
- wantOutput: "Empty = []\n",
- },
- "(error) slice with element type mismatch (string and integer)": {
- input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
- wantError: errArrayMixedElementTypes,
- },
- "(error) slice with element type mismatch (integer and float)": {
- input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
- wantError: errArrayMixedElementTypes,
- },
- "slice with elems of differing Go types, same TOML types": {
- input: struct {
- MixedInts []interface{}
- MixedFloats []interface{}
- }{
- []interface{}{
- int(1), int8(2), int16(3), int32(4), int64(5),
- uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
- },
- []interface{}{float32(1.5), float64(2.5)},
- },
- wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
- "MixedFloats = [1.5, 2.5]\n",
- },
- "(error) slice w/ element type mismatch (one is nested array)": {
- input: struct{ Mixed []interface{} }{
- []interface{}{1, []interface{}{2}},
- },
- wantError: errArrayMixedElementTypes,
- },
- "(error) slice with 1 nil element": {
- input: struct{ NilElement1 []interface{} }{[]interface{}{nil}},
- wantError: errArrayNilElement,
- },
- "(error) slice with 1 nil element (and other non-nil elements)": {
- input: struct{ NilElement []interface{} }{
- []interface{}{1, nil},
- },
- wantError: errArrayNilElement,
- },
- "simple map": {
- input: map[string]int{"a": 1, "b": 2},
- wantOutput: "a = 1\nb = 2\n",
- },
- "map with interface{} value type": {
- input: map[string]interface{}{"a": 1, "b": "c"},
- wantOutput: "a = 1\nb = \"c\"\n",
- },
- "map with interface{} value type, some of which are structs": {
- input: map[string]interface{}{
- "a": struct{ Int int }{2},
- "b": 1,
- },
- wantOutput: "b = 1\n\n[a]\n Int = 2\n",
- },
- "nested map": {
- input: map[string]map[string]int{
- "a": {"b": 1},
- "c": {"d": 2},
- },
- wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n",
- },
- "nested struct": {
- input: struct{ Struct struct{ Int int } }{
- struct{ Int int }{1},
- },
- wantOutput: "[Struct]\n Int = 1\n",
- },
- "nested struct and non-struct field": {
- input: struct {
- Struct struct{ Int int }
- Bool bool
- }{struct{ Int int }{1}, true},
- wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n",
- },
- "2 nested structs": {
- input: struct{ Struct1, Struct2 struct{ Int int } }{
- struct{ Int int }{1}, struct{ Int int }{2},
- },
- wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n",
- },
- "deeply nested structs": {
- input: struct {
- Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
- }{
- struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
- struct{ Struct3 *struct{ Int int } }{nil},
- },
- wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" +
- "\n\n[Struct2]\n",
- },
- "nested struct with nil struct elem": {
- input: struct {
- Struct struct{ Inner *struct{ Int int } }
- }{
- struct{ Inner *struct{ Int int } }{nil},
- },
- wantOutput: "[Struct]\n",
- },
- "nested struct with no fields": {
- input: struct {
- Struct struct{ Inner struct{} }
- }{
- struct{ Inner struct{} }{struct{}{}},
- },
- wantOutput: "[Struct]\n [Struct.Inner]\n",
- },
- "struct with tags": {
- input: struct {
- Struct struct {
- Int int `toml:"_int"`
- } `toml:"_struct"`
- Bool bool `toml:"_bool"`
- }{
- struct {
- Int int `toml:"_int"`
- }{1}, true,
- },
- wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n",
- },
- "embedded struct": {
- input: struct{ Embedded }{Embedded{1}},
- wantOutput: "_int = 1\n",
- },
- "embedded *struct": {
- input: struct{ *Embedded }{&Embedded{1}},
- wantOutput: "_int = 1\n",
- },
- "nested embedded struct": {
- input: struct {
- Struct struct{ Embedded } `toml:"_struct"`
- }{struct{ Embedded }{Embedded{1}}},
- wantOutput: "[_struct]\n _int = 1\n",
- },
- "nested embedded *struct": {
- input: struct {
- Struct struct{ *Embedded } `toml:"_struct"`
- }{struct{ *Embedded }{&Embedded{1}}},
- wantOutput: "[_struct]\n _int = 1\n",
- },
- "array of tables": {
- input: struct {
- Structs []*struct{ Int int } `toml:"struct"`
- }{
- []*struct{ Int int }{{1}, {3}},
- },
- wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n",
- },
- "array of tables order": {
- input: map[string]interface{}{
- "map": map[string]interface{}{
- "zero": 5,
- "arr": []map[string]int{
- map[string]int{
- "friend": 5,
- },
- },
- },
- },
- wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n",
- },
- "(error) top-level slice": {
- input: []struct{ Int int }{{1}, {2}, {3}},
- wantError: errNoKey,
- },
- "(error) slice of slice": {
- input: struct {
- Slices [][]struct{ Int int }
- }{
- [][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
- },
- wantError: errArrayNoTable,
- },
- "(error) map no string key": {
- input: map[int]string{1: ""},
- wantError: errNonString,
- },
- "(error) anonymous non-struct": {
- input: struct{ NonStruct }{5},
- wantError: errAnonNonStruct,
- },
- "(error) empty key name": {
- input: map[string]int{"": 1},
- wantError: errAnything,
- },
- "(error) empty map name": {
- input: map[string]interface{}{
- "": map[string]int{"v": 1},
- },
- wantError: errAnything,
- },
- }
- for label, test := range tests {
- encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
- }
-}
-
-func TestEncodeNestedTableArrays(t *testing.T) {
- type song struct {
- Name string `toml:"name"`
- }
- type album struct {
- Name string `toml:"name"`
- Songs []song `toml:"songs"`
- }
- type springsteen struct {
- Albums []album `toml:"albums"`
- }
- value := springsteen{
- []album{
- {"Born to Run",
- []song{{"Jungleland"}, {"Meeting Across the River"}}},
- {"Born in the USA",
- []song{{"Glory Days"}, {"Dancing in the Dark"}}},
- },
- }
- expected := `[[albums]]
- name = "Born to Run"
-
- [[albums.songs]]
- name = "Jungleland"
-
- [[albums.songs]]
- name = "Meeting Across the River"
-
-[[albums]]
- name = "Born in the USA"
-
- [[albums.songs]]
- name = "Glory Days"
-
- [[albums.songs]]
- name = "Dancing in the Dark"
-`
- encodeExpected(t, "nested table arrays", value, expected, nil)
-}
-
-func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
- type Alpha struct {
- V int
- }
- type Beta struct {
- V int
- }
- type Conf struct {
- V int
- A Alpha
- B []Beta
- }
-
- val := Conf{
- V: 1,
- A: Alpha{2},
- B: []Beta{{3}},
- }
- expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n"
- encodeExpected(t, "array hash with normal hash order", val, expected, nil)
-}
-
-func encodeExpected(
- t *testing.T, label string, val interface{}, wantStr string, wantErr error,
-) {
- var buf bytes.Buffer
- enc := NewEncoder(&buf)
- err := enc.Encode(val)
- if err != wantErr {
- if wantErr != nil {
- if wantErr == errAnything && err != nil {
- return
- }
- t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
- } else {
- t.Errorf("%s: Encode failed: %s", label, err)
- }
- }
- if err != nil {
- return
- }
- if got := buf.String(); wantStr != got {
- t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
- label, wantStr, got)
- }
-}
-
-func ExampleEncoder_Encode() {
- date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
- var config = map[string]interface{}{
- "date": date,
- "counts": []int{1, 1, 2, 3, 5, 8},
- "hash": map[string]string{
- "key1": "val1",
- "key2": "val2",
- },
- }
- buf := new(bytes.Buffer)
- if err := NewEncoder(buf).Encode(config); err != nil {
- log.Fatal(err)
- }
- fmt.Println(buf.String())
-
- // Output:
- // counts = [1, 1, 2, 3, 5, 8]
- // date = 2010-03-14T18:00:00Z
- //
- // [hash]
- // key1 = "val1"
- // key2 = "val2"
-}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
deleted file mode 100644
index 140c44c..0000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
- "encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
deleted file mode 100644
index fb285e7..0000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
- MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
- UnmarshalText(text []byte) error
-}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
deleted file mode 100644
index 3821fa2..0000000
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ /dev/null
@@ -1,734 +0,0 @@
-package toml
-
-import (
- "fmt"
- "strings"
- "unicode/utf8"
-)
-
-type itemType int
-
-const (
- itemError itemType = iota
- itemNIL // used in the parser to indicate no type
- itemEOF
- itemText
- itemString
- itemBool
- itemInteger
- itemFloat
- itemDatetime
- itemArray // the start of an array
- itemArrayEnd
- itemTableStart
- itemTableEnd
- itemArrayTableStart
- itemArrayTableEnd
- itemKeyStart
- itemCommentStart
-)
-
-const (
- eof = 0
- tableStart = '['
- tableEnd = ']'
- arrayTableStart = '['
- arrayTableEnd = ']'
- tableSep = '.'
- keySep = '='
- arrayStart = '['
- arrayEnd = ']'
- arrayValTerm = ','
- commentStart = '#'
- stringStart = '"'
- stringEnd = '"'
-)
-
-type stateFn func(lx *lexer) stateFn
-
-type lexer struct {
- input string
- start int
- pos int
- width int
- line int
- state stateFn
- items chan item
-
- // A stack of state functions used to maintain context.
- // The idea is to reuse parts of the state machine in various places.
- // For example, values can appear at the top level or within arbitrarily
- // nested arrays. The last state on the stack is used after a value has
- // been lexed. Similarly for comments.
- stack []stateFn
-}
-
-type item struct {
- typ itemType
- val string
- line int
-}
-
-func (lx *lexer) nextItem() item {
- for {
- select {
- case item := <-lx.items:
- return item
- default:
- lx.state = lx.state(lx)
- }
- }
-}
-
-func lex(input string) *lexer {
- lx := &lexer{
- input: input + "\n",
- state: lexTop,
- line: 1,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
- }
- return lx
-}
-
-func (lx *lexer) push(state stateFn) {
- lx.stack = append(lx.stack, state)
-}
-
-func (lx *lexer) pop() stateFn {
- if len(lx.stack) == 0 {
- return lx.errorf("BUG in lexer: no states to pop.")
- }
- last := lx.stack[len(lx.stack)-1]
- lx.stack = lx.stack[0 : len(lx.stack)-1]
- return last
-}
-
-func (lx *lexer) current() string {
- return lx.input[lx.start:lx.pos]
-}
-
-func (lx *lexer) emit(typ itemType) {
- lx.items <- item{typ, lx.current(), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) emitTrim(typ itemType) {
- lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) next() (r rune) {
- if lx.pos >= len(lx.input) {
- lx.width = 0
- return eof
- }
-
- if lx.input[lx.pos] == '\n' {
- lx.line++
- }
- r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
- lx.pos += lx.width
- return r
-}
-
-// ignore skips over the pending input before this point.
-func (lx *lexer) ignore() {
- lx.start = lx.pos
-}
-
-// backup steps back one rune. Can be called only once per call of next.
-func (lx *lexer) backup() {
- lx.pos -= lx.width
- if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
- lx.line--
- }
-}
-
-// accept consumes the next rune if it's equal to `valid`.
-func (lx *lexer) accept(valid rune) bool {
- if lx.next() == valid {
- return true
- }
- lx.backup()
- return false
-}
-
-// peek returns but does not consume the next rune in the input.
-func (lx *lexer) peek() rune {
- r := lx.next()
- lx.backup()
- return r
-}
-
-// errorf stops all lexing by emitting an error and returning `nil`.
-// Note that any value that is a character is escaped if it's a special
-// character (new lines, tabs, etc.).
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
- lx.items <- item{
- itemError,
- fmt.Sprintf(format, values...),
- lx.line,
- }
- return nil
-}
-
-// lexTop consumes elements at the top level of TOML data.
-func lexTop(lx *lexer) stateFn {
- r := lx.next()
- if isWhitespace(r) || isNL(r) {
- return lexSkip(lx, lexTop)
- }
-
- switch r {
- case commentStart:
- lx.push(lexTop)
- return lexCommentStart
- case tableStart:
- return lexTableStart
- case eof:
- if lx.pos > lx.start {
- return lx.errorf("Unexpected EOF.")
- }
- lx.emit(itemEOF)
- return nil
- }
-
- // At this point, the only valid item can be a key, so we back up
- // and let the key lexer do the rest.
- lx.backup()
- lx.push(lexTopEnd)
- return lexKeyStart
-}
-
-// lexTopEnd is entered whenever a top-level item has been consumed. (A value
-// or a table.) It must see only whitespace, and will turn back to lexTop
-// upon a new line. If it sees EOF, it will quit the lexer successfully.
-func lexTopEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == commentStart:
- // a comment will read to a new line for us.
- lx.push(lexTop)
- return lexCommentStart
- case isWhitespace(r):
- return lexTopEnd
- case isNL(r):
- lx.ignore()
- return lexTop
- case r == eof:
- lx.ignore()
- return lexTop
- }
- return lx.errorf("Expected a top-level item to end with a new line, "+
- "comment or EOF, but got %q instead.", r)
-}
-
-// lexTable lexes the beginning of a table. Namely, it makes sure that
-// it starts with a character other than '.' and ']'.
-// It assumes that '[' has already been consumed.
-// It also handles the case that this is an item in an array of tables.
-// e.g., '[[name]]'.
-func lexTableStart(lx *lexer) stateFn {
- if lx.peek() == arrayTableStart {
- lx.next()
- lx.emit(itemArrayTableStart)
- lx.push(lexArrayTableEnd)
- } else {
- lx.emit(itemTableStart)
- lx.push(lexTableEnd)
- }
- return lexTableNameStart
-}
-
-func lexTableEnd(lx *lexer) stateFn {
- lx.emit(itemTableEnd)
- return lexTopEnd
-}
-
-func lexArrayTableEnd(lx *lexer) stateFn {
- if r := lx.next(); r != arrayTableEnd {
- return lx.errorf("Expected end of table array name delimiter %q, "+
- "but got %q instead.", arrayTableEnd, r)
- }
- lx.emit(itemArrayTableEnd)
- return lexTopEnd
-}
-
-func lexTableNameStart(lx *lexer) stateFn {
- switch lx.next() {
- case tableEnd, eof:
- return lx.errorf("Unexpected end of table. (Tables cannot " +
- "be empty.)")
- case tableSep:
- return lx.errorf("Unexpected table separator. (Tables cannot " +
- "be empty.)")
- }
- return lexTableName
-}
-
-// lexTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexTableName(lx *lexer) stateFn {
- switch lx.peek() {
- case eof:
- return lx.errorf("Unexpected end of table name %q.", lx.current())
- case tableStart:
- return lx.errorf("Table names cannot contain %q or %q.",
- tableStart, tableEnd)
- case tableEnd:
- lx.emit(itemText)
- lx.next()
- return lx.pop()
- case tableSep:
- lx.emit(itemText)
- lx.next()
- lx.ignore()
- return lexTableNameStart
- }
- lx.next()
- return lexTableName
-}
-
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
- r := lx.peek()
- switch {
- case r == keySep:
- return lx.errorf("Unexpected key separator %q.", keySep)
- case isWhitespace(r) || isNL(r):
- lx.next()
- return lexSkip(lx, lexKeyStart)
- }
-
- lx.ignore()
- lx.emit(itemKeyStart)
- lx.next()
- return lexKey
-}
-
-// lexKey consumes the text of a key. Assumes that the first character (which
-// is not whitespace) has already been consumed.
-func lexKey(lx *lexer) stateFn {
- r := lx.peek()
-
- // Keys cannot contain a '#' character.
- if r == commentStart {
- return lx.errorf("Key cannot contain a '#' character.")
- }
-
- // XXX: Possible divergence from spec?
- // "Keys start with the first non-whitespace character and end with the
- // last non-whitespace character before the equals sign."
- // Note here that whitespace is either a tab or a space.
- // But we'll call it quits if we see a new line too.
- if isNL(r) {
- lx.emitTrim(itemText)
- return lexKeyEnd
- }
-
- // Let's also call it quits if we see an equals sign.
- if r == keySep {
- lx.emitTrim(itemText)
- return lexKeyEnd
- }
-
- lx.next()
- return lexKey
-}
-
-// lexKeyEnd consumes the end of a key (up to the key separator).
-// Assumes that any whitespace after a key has been consumed.
-func lexKeyEnd(lx *lexer) stateFn {
- r := lx.next()
- if r == keySep {
- return lexSkip(lx, lexValue)
- }
- return lx.errorf("Expected key separator %q, but got %q instead.",
- keySep, r)
-}
-
-// lexValue starts the consumption of a value anywhere a value is expected.
-// lexValue will ignore whitespace.
-// After a value is lexed, the last state on the next is popped and returned.
-func lexValue(lx *lexer) stateFn {
- // We allow whitespace to precede a value, but NOT new lines.
- // In array syntax, the array states are responsible for ignoring new lines.
- r := lx.next()
- if isWhitespace(r) {
- return lexSkip(lx, lexValue)
- }
-
- switch {
- case r == arrayStart:
- lx.ignore()
- lx.emit(itemArray)
- return lexArrayValue
- case r == stringStart:
- lx.ignore() // ignore the '"'
- return lexString
- case r == 't':
- return lexTrue
- case r == 'f':
- return lexFalse
- case r == '-':
- return lexNumberStart
- case isDigit(r):
- lx.backup() // avoid an extra state and use the same as above
- return lexNumberOrDateStart
- case r == '.': // special error case, be kind to users
- return lx.errorf("Floats must start with a digit, not '.'.")
- }
- return lx.errorf("Expected value but found %q instead.", r)
-}
-
-// lexArrayValue consumes one value in an array. It assumes that '[' or ','
-// have already been consumed. All whitespace and new lines are ignored.
-func lexArrayValue(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValue)
- case r == commentStart:
- lx.push(lexArrayValue)
- return lexCommentStart
- case r == arrayValTerm:
- return lx.errorf("Unexpected array value terminator %q.",
- arrayValTerm)
- case r == arrayEnd:
- return lexArrayEnd
- }
-
- lx.backup()
- lx.push(lexArrayValueEnd)
- return lexValue
-}
-
-// lexArrayValueEnd consumes the cruft between values of an array. Namely,
-// it ignores whitespace and expects either a ',' or a ']'.
-func lexArrayValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValueEnd)
- case r == commentStart:
- lx.push(lexArrayValueEnd)
- return lexCommentStart
- case r == arrayValTerm:
- lx.ignore()
- return lexArrayValue // move on to the next value
- case r == arrayEnd:
- return lexArrayEnd
- }
- return lx.errorf("Expected an array value terminator %q or an array "+
- "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
-}
-
-// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
-// just been consumed.
-func lexArrayEnd(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemArrayEnd)
- return lx.pop()
-}
-
-// lexString consumes the inner contents of a string. It assumes that the
-// beginning '"' has already been consumed and ignored.
-func lexString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isNL(r):
- return lx.errorf("Strings cannot contain new lines.")
- case r == '\\':
- return lexStringEscape
- case r == stringEnd:
- lx.backup()
- lx.emit(itemString)
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- return lexString
-}
-
-// lexStringEscape consumes an escaped character. It assumes that the preceding
-// '\\' has already been consumed.
-func lexStringEscape(lx *lexer) stateFn {
- r := lx.next()
- switch r {
- case 'b':
- fallthrough
- case 't':
- fallthrough
- case 'n':
- fallthrough
- case 'f':
- fallthrough
- case 'r':
- fallthrough
- case '"':
- fallthrough
- case '/':
- fallthrough
- case '\\':
- return lexString
- case 'u':
- return lexStringUnicode
- }
- return lx.errorf("Invalid escape character %q. Only the following "+
- "escape characters are allowed: "+
- "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, and \\uXXXX.", r)
-}
-
-// lexStringBinary consumes two hexadecimal digits following '\x'. It assumes
-// that the '\x' has already been consumed.
-func lexStringUnicode(lx *lexer) stateFn {
- var r rune
-
- for i := 0; i < 4; i++ {
- r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf("Expected four hexadecimal digits after '\\x', "+
- "but got '%s' instead.", lx.current())
- }
- }
- return lexString
-}
-
-// lexNumberOrDateStart consumes either a (positive) integer, float or datetime.
-// It assumes that NO negative sign has been consumed.
-func lexNumberOrDateStart(lx *lexer) stateFn {
- r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("Floats must start with a digit, not '.'.")
- } else {
- return lx.errorf("Expected a digit but got %q.", r)
- }
- }
- return lexNumberOrDate
-}
-
-// lexNumberOrDate consumes either a (positive) integer, float or datetime.
-func lexNumberOrDate(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == '-':
- if lx.pos-lx.start != 5 {
- return lx.errorf("All ISO8601 dates must be in full Zulu form.")
- }
- return lexDateAfterYear
- case isDigit(r):
- return lexNumberOrDate
- case r == '.':
- return lexFloatStart
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
-// It assumes that "YYYY-" has already been consumed.
-func lexDateAfterYear(lx *lexer) stateFn {
- formats := []rune{
- // digits are '0'.
- // everything else is direct equality.
- '0', '0', '-', '0', '0',
- 'T',
- '0', '0', ':', '0', '0', ':', '0', '0',
- 'Z',
- }
- for _, f := range formats {
- r := lx.next()
- if f == '0' {
- if !isDigit(r) {
- return lx.errorf("Expected digit in ISO8601 datetime, "+
- "but found %q instead.", r)
- }
- } else if f != r {
- return lx.errorf("Expected %q in ISO8601 datetime, "+
- "but found %q instead.", f, r)
- }
- }
- lx.emit(itemDatetime)
- return lx.pop()
-}
-
-// lexNumberStart consumes either an integer or a float. It assumes that a
-// negative sign has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
- // we MUST see a digit. Even floats have to start with a digit.
- r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("Floats must start with a digit, not '.'.")
- } else {
- return lx.errorf("Expected a digit but got %q.", r)
- }
- }
- return lexNumber
-}
-
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isDigit(r):
- return lexNumber
- case r == '.':
- return lexFloatStart
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexFloatStart starts the consumption of digits of a float after a '.'.
-// Namely, at least one digit is required.
-func lexFloatStart(lx *lexer) stateFn {
- r := lx.next()
- if !isDigit(r) {
- return lx.errorf("Floats must have a digit after the '.', but got "+
- "%q instead.", r)
- }
- return lexFloat
-}
-
-// lexFloat consumes the digits of a float after a '.'.
-// Assumes that one digit has been consumed after a '.' already.
-func lexFloat(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemFloat)
- return lx.pop()
-}
-
-// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
-// consumed.
-func lexConst(lx *lexer, s string) stateFn {
- for i := range s[1:] {
- if r := lx.next(); r != rune(s[i+1]) {
- return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
- s[:i]+string(r))
- }
- }
- return nil
-}
-
-// lexTrue consumes the "rue" in "true". It assumes that 't' has already
-// been consumed.
-func lexTrue(lx *lexer) stateFn {
- if fn := lexConst(lx, "true"); fn != nil {
- return fn
- }
- lx.emit(itemBool)
- return lx.pop()
-}
-
-// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
-// been consumed.
-func lexFalse(lx *lexer) stateFn {
- if fn := lexConst(lx, "false"); fn != nil {
- return fn
- }
- lx.emit(itemBool)
- return lx.pop()
-}
-
-// lexCommentStart begins the lexing of a comment. It will emit
-// itemCommentStart and consume no characters, passing control to lexComment.
-func lexCommentStart(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemCommentStart)
- return lexComment
-}
-
-// lexComment lexes an entire comment. It assumes that '#' has been consumed.
-// It will consume *up to* the first new line character, and pass control
-// back to the last state on the stack.
-func lexComment(lx *lexer) stateFn {
- r := lx.peek()
- if isNL(r) || r == eof {
- lx.emit(itemText)
- return lx.pop()
- }
- lx.next()
- return lexComment
-}
-
-// lexSkip ignores all slurped input and moves on to the next state.
-func lexSkip(lx *lexer, nextState stateFn) stateFn {
- return func(lx *lexer) stateFn {
- lx.ignore()
- return nextState
- }
-}
-
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
- return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
- return r == '\n' || r == '\r'
-}
-
-func isDigit(r rune) bool {
- return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') ||
- (r >= 'a' && r <= 'f') ||
- (r >= 'A' && r <= 'F')
-}
-
-func (itype itemType) String() string {
- switch itype {
- case itemError:
- return "Error"
- case itemNIL:
- return "NIL"
- case itemEOF:
- return "EOF"
- case itemText:
- return "Text"
- case itemString:
- return "String"
- case itemBool:
- return "Bool"
- case itemInteger:
- return "Integer"
- case itemFloat:
- return "Float"
- case itemDatetime:
- return "DateTime"
- case itemTableStart:
- return "TableStart"
- case itemTableEnd:
- return "TableEnd"
- case itemKeyStart:
- return "KeyStart"
- case itemArray:
- return "Array"
- case itemArrayEnd:
- return "ArrayEnd"
- case itemCommentStart:
- return "CommentStart"
- }
- panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
-}
-
-func (item item) String() string {
- return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
-}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
deleted file mode 100644
index 43afe3c..0000000
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ /dev/null
@@ -1,417 +0,0 @@
-package toml
-
-import (
- "fmt"
- "log"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-)
-
-type parser struct {
- mapping map[string]interface{}
- types map[string]tomlType
- lx *lexer
-
- // A list of keys in the order that they appear in the TOML data.
- ordered []Key
-
- // the full key for the current hash in scope
- context Key
-
- // the base key name for everything except hashes
- currentKey string
-
- // rough approximation of line number
- approxLine int
-
- // A map of 'key.group.names' to whether they were created implicitly.
- implicits map[string]bool
-}
-
-type parseError string
-
-func (pe parseError) Error() string {
- return string(pe)
-}
-
-func parse(data string) (p *parser, err error) {
- defer func() {
- if r := recover(); r != nil {
- var ok bool
- if err, ok = r.(parseError); ok {
- return
- }
- panic(r)
- }
- }()
-
- p = &parser{
- mapping: make(map[string]interface{}),
- types: make(map[string]tomlType),
- lx: lex(data),
- ordered: make([]Key, 0),
- implicits: make(map[string]bool),
- }
- for {
- item := p.next()
- if item.typ == itemEOF {
- break
- }
- p.topLevel(item)
- }
-
- return p, nil
-}
-
-func (p *parser) panicf(format string, v ...interface{}) {
- msg := fmt.Sprintf("Near line %d, key '%s': %s",
- p.approxLine, p.current(), fmt.Sprintf(format, v...))
- panic(parseError(msg))
-}
-
-func (p *parser) next() item {
- it := p.lx.nextItem()
- if it.typ == itemError {
- p.panicf("Near line %d: %s", it.line, it.val)
- }
- return it
-}
-
-func (p *parser) bug(format string, v ...interface{}) {
- log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...))
-}
-
-func (p *parser) expect(typ itemType) item {
- it := p.next()
- p.assertEqual(typ, it.typ)
- return it
-}
-
-func (p *parser) assertEqual(expected, got itemType) {
- if expected != got {
- p.bug("Expected '%s' but got '%s'.", expected, got)
- }
-}
-
-func (p *parser) topLevel(item item) {
- switch item.typ {
- case itemCommentStart:
- p.approxLine = item.line
- p.expect(itemText)
- case itemTableStart:
- kg := p.expect(itemText)
- p.approxLine = kg.line
-
- key := make(Key, 0)
- for ; kg.typ == itemText; kg = p.next() {
- key = append(key, kg.val)
- }
- p.assertEqual(itemTableEnd, kg.typ)
-
- p.establishContext(key, false)
- p.setType("", tomlHash)
- p.ordered = append(p.ordered, key)
- case itemArrayTableStart:
- kg := p.expect(itemText)
- p.approxLine = kg.line
-
- key := make(Key, 0)
- for ; kg.typ == itemText; kg = p.next() {
- key = append(key, kg.val)
- }
- p.assertEqual(itemArrayTableEnd, kg.typ)
-
- p.establishContext(key, true)
- p.setType("", tomlArrayHash)
- p.ordered = append(p.ordered, key)
- case itemKeyStart:
- kname := p.expect(itemText)
- p.currentKey = kname.val
- p.approxLine = kname.line
-
- val, typ := p.value(p.next())
- p.setValue(p.currentKey, val)
- p.setType(p.currentKey, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
-
- p.currentKey = ""
- default:
- p.bug("Unexpected type at top level: %s", item.typ)
- }
-}
-
-// value translates an expected value from the lexer into a Go value wrapped
-// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
- switch it.typ {
- case itemString:
- return p.replaceUnicode(replaceEscapes(it.val)), p.typeOfPrimitive(it)
- case itemBool:
- switch it.val {
- case "true":
- return true, p.typeOfPrimitive(it)
- case "false":
- return false, p.typeOfPrimitive(it)
- }
- p.bug("Expected boolean value, but got '%s'.", it.val)
- case itemInteger:
- num, err := strconv.ParseInt(it.val, 10, 64)
- if err != nil {
- // See comment below for floats describing why we make a
- // distinction between a bug and a user error.
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Integer '%s' is out of the range of 64-bit "+
- "signed integers.", it.val)
- } else {
- p.bug("Expected integer value, but got '%s'.", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemFloat:
- num, err := strconv.ParseFloat(it.val, 64)
- if err != nil {
- // Distinguish float values. Normally, it'd be a bug if the lexer
- // provides an invalid float, but it's possible that the float is
- // out of range of valid values (which the lexer cannot determine).
- // So mark the former as a bug but the latter as a legitimate user
- // error.
- //
- // This is also true for integers.
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Float '%s' is out of the range of 64-bit "+
- "IEEE-754 floating-point numbers.", it.val)
- } else {
- p.bug("Expected float value, but got '%s'.", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemDatetime:
- t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
- if err != nil {
- p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val)
- }
- return t, p.typeOfPrimitive(it)
- case itemArray:
- array := make([]interface{}, 0)
- types := make([]tomlType, 0)
-
- for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
-
- val, typ := p.value(it)
- array = append(array, val)
- types = append(types, typ)
- }
- return array, p.typeOfArray(types)
- }
- p.bug("Unexpected value type: %s", it.typ)
- panic("unreachable")
-}
-
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
-//
-// Establishing the context also makes sure that the key isn't a duplicate, and
-// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
- var ok bool
-
- // Always start at the top level and drill down for our context.
- hashContext := p.mapping
- keyContext := make(Key, 0)
-
- // We only need implicit hashes for key[0:-1]
- for _, k := range key[0 : len(key)-1] {
- _, ok = hashContext[k]
- keyContext = append(keyContext, k)
-
- // No key? Make an implicit hash and move on.
- if !ok {
- p.addImplicit(keyContext)
- hashContext[k] = make(map[string]interface{})
- }
-
- // If the hash context is actually an array of tables, then set
- // the hash context to the last element in that array.
- //
- // Otherwise, it better be a table, since this MUST be a key group (by
- // virtue of it not being the last element in a key).
- switch t := hashContext[k].(type) {
- case []map[string]interface{}:
- hashContext = t[len(t)-1]
- case map[string]interface{}:
- hashContext = t
- default:
- p.panicf("Key '%s' was already created as a hash.", keyContext)
- }
- }
-
- p.context = keyContext
- if array {
- // If this is the first element for this array, then allocate a new
- // list of tables for it.
- k := key[len(key)-1]
- if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 5)
- }
-
- // Add a new table. But make sure the key hasn't already been used
- // for something else.
- if hash, ok := hashContext[k].([]map[string]interface{}); ok {
- hashContext[k] = append(hash, make(map[string]interface{}))
- } else {
- p.panicf("Key '%s' was already created and cannot be used as "+
- "an array.", keyContext)
- }
- } else {
- p.setValue(key[len(key)-1], make(map[string]interface{}))
- }
- p.context = append(p.context, key[len(key)-1])
-}
-
-// setValue sets the given key to the given value in the current context.
-// It will make sure that the key hasn't already been defined, account for
-// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
- var tmpHash interface{}
- var ok bool
-
- hash := p.mapping
- keyContext := make(Key, 0)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- if tmpHash, ok = hash[k]; !ok {
- p.bug("Context for key '%s' has not been established.", keyContext)
- }
- switch t := tmpHash.(type) {
- case []map[string]interface{}:
- // The context is a table of hashes. Pick the most recent table
- // defined as the current hash.
- hash = t[len(t)-1]
- case map[string]interface{}:
- hash = t
- default:
- p.bug("Expected hash to have type 'map[string]interface{}', but "+
- "it has '%T' instead.", tmpHash)
- }
- }
- keyContext = append(keyContext, key)
-
- if _, ok := hash[key]; ok {
- // Typically, if the given key has already been set, then we have
- // to raise an error since duplicate keys are disallowed. However,
- // it's possible that a key was previously defined implicitly. In this
- // case, it is allowed to be redefined concretely. (See the
- // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
- //
- // But we have to make sure to stop marking it as an implicit. (So that
- // another redefinition provokes an error.)
- //
- // Note that since it has already been defined (as a hash), we don't
- // want to overwrite it. So our business is done.
- if p.isImplicit(keyContext) {
- p.removeImplicit(keyContext)
- return
- }
-
- // Otherwise, we have a concrete key trying to override a previous
- // key, which is *always* wrong.
- p.panicf("Key '%s' has already been defined.", keyContext)
- }
- hash[key] = value
-}
-
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
-//
-// Note that if `key` is empty, then the type given will be applied to the
-// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
- keyContext := make(Key, 0, len(p.context)+1)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- }
- if len(key) > 0 { // allow type setting for hashes
- keyContext = append(keyContext, key)
- }
- p.types[keyContext.String()] = typ
-}
-
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
- p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly created.
-func (p *parser) removeImplicit(key Key) {
- p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
- return p.implicits[key.String()]
-}
-
-// current returns the full key name of the current context.
-func (p *parser) current() string {
- if len(p.currentKey) == 0 {
- return p.context.String()
- }
- if len(p.context) == 0 {
- return p.currentKey
- }
- return fmt.Sprintf("%s.%s", p.context, p.currentKey)
-}
-
-func replaceEscapes(s string) string {
- return strings.NewReplacer(
- "\\b", "\u0008",
- "\\t", "\u0009",
- "\\n", "\u000A",
- "\\f", "\u000C",
- "\\r", "\u000D",
- "\\\"", "\u0022",
- "\\/", "\u002F",
- "\\\\", "\u005C",
- ).Replace(s)
-}
-
-func (p *parser) replaceUnicode(s string) string {
- indexEsc := func() int {
- return strings.Index(s, "\\u")
- }
- for i := indexEsc(); i != -1; i = indexEsc() {
- asciiBytes := s[i+2 : i+6]
- s = strings.Replace(s, s[i:i+6], p.asciiEscapeToUnicode(asciiBytes), -1)
- }
- return s
-}
-
-func (p *parser) asciiEscapeToUnicode(s string) string {
- hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
- if err != nil {
- p.bug("Could not parse '%s' as a hexadecimal number, but the "+
- "lexer claims it's OK: %s", s, err)
- }
-
- // BUG(burntsushi)
- // I honestly don't understand how this works. I can't seem
- // to find a way to make this fail. I figured this would fail on invalid
- // UTF-8 characters like U+DCFF, but it doesn't.
- r := string(rune(hex))
- if !utf8.ValidString(r) {
- p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
- }
- return string(r)
-}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
deleted file mode 100644
index 562164b..0000000
--- a/vendor/github.com/BurntSushi/toml/session.vim
+++ /dev/null
@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
deleted file mode 100644
index 79dac6b..0000000
--- a/vendor/github.com/BurntSushi/toml/type_check.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package toml
-
-// tomlType represents any Go type that corresponds to a TOML type.
-// While the first draft of the TOML spec has a simplistic type system that
-// probably doesn't need this level of sophistication, we seem to be militating
-// toward adding real composite types.
-type tomlType interface {
- typeString() string
-}
-
-// typeEqual accepts any two types and returns true if they are equal.
-func typeEqual(t1, t2 tomlType) bool {
- if t1 == nil || t2 == nil {
- return false
- }
- return t1.typeString() == t2.typeString()
-}
-
-func typeIsHash(t tomlType) bool {
- return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
-}
-
-type tomlBaseType string
-
-func (btype tomlBaseType) typeString() string {
- return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
- return btype.typeString()
-}
-
-var (
- tomlInteger tomlBaseType = "Integer"
- tomlFloat tomlBaseType = "Float"
- tomlDatetime tomlBaseType = "Datetime"
- tomlString tomlBaseType = "String"
- tomlBool tomlBaseType = "Bool"
- tomlArray tomlBaseType = "Array"
- tomlHash tomlBaseType = "Hash"
- tomlArrayHash tomlBaseType = "ArrayHash"
-)
-
-// typeOfPrimitive returns a tomlType of any primitive value in TOML.
-// Primitive values are: Integer, Float, Datetime, String and Bool.
-//
-// Passing a lexer item other than the following will cause a BUG message
-// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
-func (p *parser) typeOfPrimitive(lexItem item) tomlType {
- switch lexItem.typ {
- case itemInteger:
- return tomlInteger
- case itemFloat:
- return tomlFloat
- case itemDatetime:
- return tomlDatetime
- case itemString:
- return tomlString
- case itemBool:
- return tomlBool
- }
- p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
- panic("unreachable")
-}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
- // Empty arrays are cool.
- if len(types) == 0 {
- return tomlArray
- }
-
- theType := types[0]
- for _, t := range types[1:] {
- if !typeEqual(theType, t) {
- p.panicf("Array contains values of type '%s' and '%s', but arrays "+
- "must be homogeneous.", theType, t)
- }
- }
- return tomlArray
-}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
deleted file mode 100644
index 7592f87..0000000
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package toml
-
-// Struct field handling is adapted from code in encoding/json:
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the Go distribution.
-
-import (
- "reflect"
- "sort"
- "sync"
-)
-
-// A field represents a single field found in a struct.
-type field struct {
- name string // the name of the field (`toml` tag included)
- tag bool // whether field has a `toml` tag
- index []int // represents the depth of an anonymous field
- typ reflect.Type // the type of the field
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from toml tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
- if x[i].name != x[j].name {
- return x[i].name < x[j].name
- }
- if len(x[i].index) != len(x[j].index) {
- return len(x[i].index) < len(x[j].index)
- }
- if x[i].tag != x[j].tag {
- return x[i].tag
- }
- return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
- for k, xik := range x[i].index {
- if k >= len(x[j].index) {
- return false
- }
- if xik != x[j].index[k] {
- return xik < x[j].index[k]
- }
- }
- return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that TOML should recognize for the given
-// type. The algorithm is breadth-first search over the set of structs to
-// include - the top struct and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
- // Anonymous fields to explore at the current level and the next.
- current := []field{}
- next := []field{{typ: t}}
-
- // Count of queued names for current level and the next.
- count := map[reflect.Type]int{}
- nextCount := map[reflect.Type]int{}
-
- // Types already visited at an earlier level.
- visited := map[reflect.Type]bool{}
-
- // Fields found.
- var fields []field
-
- for len(next) > 0 {
- current, next = next, current[:0]
- count, nextCount = nextCount, map[reflect.Type]int{}
-
- for _, f := range current {
- if visited[f.typ] {
- continue
- }
- visited[f.typ] = true
-
- // Scan f.typ for fields to include.
- for i := 0; i < f.typ.NumField(); i++ {
- sf := f.typ.Field(i)
- if sf.PkgPath != "" { // unexported
- continue
- }
- name := sf.Tag.Get("toml")
- if name == "-" {
- continue
- }
- index := make([]int, len(f.index)+1)
- copy(index, f.index)
- index[len(f.index)] = i
-
- ft := sf.Type
- if ft.Name() == "" && ft.Kind() == reflect.Ptr {
- // Follow pointer.
- ft = ft.Elem()
- }
-
- // Record found field and index sequence.
- if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
- tagged := name != ""
- if name == "" {
- name = sf.Name
- }
- fields = append(fields, field{name, tagged, index, ft})
- if count[f.typ] > 1 {
- // If there were multiple instances, add a second,
- // so that the annihilation code will see a duplicate.
- // It only cares about the distinction between 1 or 2,
- // so don't bother generating any more copies.
- fields = append(fields, fields[len(fields)-1])
- }
- continue
- }
-
- // Record new anonymous struct to explore in next round.
- nextCount[ft]++
- if nextCount[ft] == 1 {
- f := field{name: ft.Name(), index: index, typ: ft}
- next = append(next, f)
- }
- }
- }
- }
-
- sort.Sort(byName(fields))
-
- // Delete all fields that are hidden by the Go rules for embedded fields,
- // except that fields with TOML tags are promoted.
-
- // The fields are sorted in primary order of name, secondary order
- // of field index length. Loop over names; for each name, delete
- // hidden fields by choosing the one dominant field that survives.
- out := fields[:0]
- for advance, i := 0, 0; i < len(fields); i += advance {
- // One iteration per name.
- // Find the sequence of fields with the name of this first field.
- fi := fields[i]
- name := fi.name
- for advance = 1; i+advance < len(fields); advance++ {
- fj := fields[i+advance]
- if fj.name != name {
- break
- }
- }
- if advance == 1 { // Only one field with this name
- out = append(out, fi)
- continue
- }
- dominant, ok := dominantField(fields[i : i+advance])
- if ok {
- out = append(out, dominant)
- }
- }
-
- fields = out
- sort.Sort(byIndex(fields))
-
- return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// TOML tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
- // The fields are sorted in increasing index-length order. The winner
- // must therefore be one with the shortest index length. Drop all
- // longer entries, which is easy: just truncate the slice.
- length := len(fields[0].index)
- tagged := -1 // Index of first tagged field.
- for i, f := range fields {
- if len(f.index) > length {
- fields = fields[:i]
- break
- }
- if f.tag {
- if tagged >= 0 {
- // Multiple tagged fields at the same level: conflict.
- // Return no field.
- return field{}, false
- }
- tagged = i
- }
- }
- if tagged >= 0 {
- return fields[tagged], true
- }
- // All remaining fields have the same length. If there's more than one,
- // we have a conflict (two fields named "X" at the same level) and we
- // return no field.
- if len(fields) > 1 {
- return field{}, false
- }
- return fields[0], true
-}
-
-var fieldCache struct {
- sync.RWMutex
- m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
- fieldCache.RLock()
- f := fieldCache.m[t]
- fieldCache.RUnlock()
- if f != nil {
- return f
- }
-
- // Compute fields without lock.
- // Might duplicate effort but won't hold other computations back.
- f = typeFields(t)
- if f == nil {
- f = []field{}
- }
-
- fieldCache.Lock()
- if fieldCache.m == nil {
- fieldCache.m = map[reflect.Type][]field{}
- }
- fieldCache.m[t] = f
- fieldCache.Unlock()
- return f
-}
diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore
new file mode 100644
index 0000000..1241112
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/.gitignore
@@ -0,0 +1,6 @@
+testdata/conf_out.ini
+ini.sublime-project
+ini.sublime-workspace
+testdata/conf_reflect.ini
+.idea
+/.vscode
diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE
new file mode 100644
index 0000000..d361bbc
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright 2014 Unknwon
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile
new file mode 100644
index 0000000..f3b0dae
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/Makefile
@@ -0,0 +1,15 @@
+.PHONY: build test bench vet coverage
+
+build: vet bench
+
+test:
+ go test -v -cover -race
+
+bench:
+ go test -v -cover -test.bench=. -test.benchmem
+
+vet:
+ go vet
+
+coverage:
+ go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
new file mode 100644
index 0000000..5d65658
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README.md
@@ -0,0 +1,43 @@
+# INI
+
+[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/go-ini/ini/Go?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=workflow%3AGo)
+[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini)
+[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc)
+[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini)
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+## Features
+
+- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+The minimum requirement of Go is **1.6**.
+
+```sh
+$ go get gopkg.in/ini.v1
+```
+
+Please add `-u` flag to update in the future.
+
+## Getting Help
+
+- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- 中国大陆镜像:https://ini.unknwon.cn
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/github.com/go-ini/ini/codecov.yml b/vendor/github.com/go-ini/ini/codecov.yml
new file mode 100644
index 0000000..fc947f2
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/codecov.yml
@@ -0,0 +1,9 @@
+coverage:
+ range: "60...95"
+ status:
+ project:
+ default:
+ threshold: 1%
+
+comment:
+ layout: 'diff, files'
diff --git a/vendor/github.com/go-ini/ini/data_source.go b/vendor/github.com/go-ini/ini/data_source.go
new file mode 100644
index 0000000..c3a541f
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/data_source.go
@@ -0,0 +1,76 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+)
+
+var (
+ _ dataSource = (*sourceFile)(nil)
+ _ dataSource = (*sourceData)(nil)
+ _ dataSource = (*sourceReadCloser)(nil)
+)
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+ reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+ return s.reader, nil
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ case io.ReadCloser:
+ return &sourceReadCloser{s}, nil
+ case io.Reader:
+ return &sourceReadCloser{ioutil.NopCloser(s)}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
+ }
+}
diff --git a/vendor/github.com/go-ini/ini/deprecated.go b/vendor/github.com/go-ini/ini/deprecated.go
new file mode 100644
index 0000000..e8bda06
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/deprecated.go
@@ -0,0 +1,25 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+const (
+ // Deprecated: Use "DefaultSection" instead.
+ DEFAULT_SECTION = DefaultSection
+)
+
+var (
+ // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore = SnackCase
+)
diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go
new file mode 100644
index 0000000..d88347c
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/error.go
@@ -0,0 +1,34 @@
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+)
+
+// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one.
+type ErrDelimiterNotFound struct {
+ Line string
+}
+
+// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound.
+func IsErrDelimiterNotFound(err error) bool {
+ _, ok := err.(ErrDelimiterNotFound)
+ return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+ return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go
new file mode 100644
index 0000000..b96d172
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/file.go
@@ -0,0 +1,517 @@
+// Copyright 2017 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "sync"
+)
+
+// File represents a combination of one or more INI files in memory.
+type File struct {
+ options LoadOptions
+ dataSources []dataSource
+
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ lock sync.RWMutex
+
+ // To keep data in order.
+ sectionList []string
+ // To keep track of the index of a section with same name.
+ // This meta list is only used with non-unique section names are allowed.
+ sectionIndexes []int
+
+ // Actual data is stored here.
+ sections map[string][]*Section
+
+ NameMapper
+ ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+ if len(opts.KeyValueDelimiters) == 0 {
+ opts.KeyValueDelimiters = "=:"
+ }
+ if len(opts.KeyValueDelimiterOnWrite) == 0 {
+ opts.KeyValueDelimiterOnWrite = "="
+ }
+ if len(opts.ChildSectionDelimiter) == 0 {
+ opts.ChildSectionDelimiter = "."
+ }
+
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string][]*Section),
+ options: opts,
+ }
+}
+
+// Empty returns an empty file object.
+func Empty(opts ...LoadOptions) *File {
+ var opt LoadOptions
+ if len(opts) > 0 {
+ opt = opts[0]
+ }
+
+ // Ignore error here, we are sure our data is good.
+ f, _ := LoadSources(opt, []byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("empty section name")
+ }
+
+ if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) {
+ return f.sections[name][0], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+
+ // NOTE: Append to indexes must happen before appending to sections,
+ // otherwise index will have off-by-one problem.
+ f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name]))
+
+ sec := newSection(f, name)
+ f.sections[name] = append(f.sections[name], sec)
+
+ return sec, nil
+}
+
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+ section, err := f.NewSection(name)
+ if err != nil {
+ return nil, err
+ }
+
+ section.isRawSection = true
+ section.rawBody = body
+ return section, nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return secs[0], err
+}
+
+// SectionsByName returns all sections with given name.
+func (f *File) SectionsByName(name string) ([]*Section, error) {
+ if len(name) == 0 {
+ name = DefaultSection
+ }
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ secs := f.sections[name]
+ if len(secs) == 0 {
+ return nil, fmt.Errorf("section %q does not exist", name)
+ }
+
+ return secs, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ // Note: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// SectionWithIndex assumes named section exists and returns a new section when not.
+func (f *File) SectionWithIndex(name string, index int) *Section {
+ secs, err := f.SectionsByName(name)
+ if err != nil || len(secs) <= index {
+ // NOTE: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ newSec, _ := f.NewSection(name)
+ return newSec
+ }
+
+ return secs[index]
+}
+
+// Sections returns a list of Section stored in the current instance.
+func (f *File) Sections() []*Section {
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sections := make([]*Section, len(f.sectionList))
+ for i, name := range f.sectionList {
+ sections[i] = f.sections[name][f.sectionIndexes[i]]
+ }
+ return sections
+}
+
+// ChildSections returns a list of child sections of given section name.
+func (f *File) ChildSections(name string) []*Section {
+ return f.Section(name).ChildSections()
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section or all sections with given name.
+func (f *File) DeleteSection(name string) {
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return
+ }
+
+ for i := 0; i < len(secs); i++ {
+ // For non-unique sections, it is always needed to remove the first one so
+ // in the next iteration, the subsequent section continue having index 0.
+ // Ignoring the error as index 0 never returns an error.
+ _ = f.DeleteSectionWithIndex(name, 0)
+ }
+}
+
+// DeleteSectionWithIndex deletes a section with given name and index.
+func (f *File) DeleteSectionWithIndex(name string, index int) error {
+ if !f.options.AllowNonUniqueSections && index != 0 {
+ return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled")
+ }
+
+ if len(name) == 0 {
+ name = DefaultSection
+ }
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ // Count occurrences of the sections
+ occurrences := 0
+
+ sectionListCopy := make([]string, len(f.sectionList))
+ copy(sectionListCopy, f.sectionList)
+
+ for i, s := range sectionListCopy {
+ if s != name {
+ continue
+ }
+
+ if occurrences == index {
+ if len(f.sections[name]) <= 1 {
+ delete(f.sections, name) // The last one in the map
+ } else {
+ f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...)
+ }
+
+ // Fix section lists
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...)
+
+ } else if occurrences > index {
+ // Fix the indices of all following sections with this name.
+ f.sectionIndexes[i-1]--
+ }
+
+ occurrences++
+ }
+
+ return nil
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ // In loose mode, we create an empty default section for nonexistent files.
+ if os.IsNotExist(err) && f.options.Loose {
+ _ = f.parse(bytes.NewBuffer(nil))
+ continue
+ }
+ return err
+ }
+ if f.options.ShortCircuit {
+ return nil
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
+ equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight
+
+ if PrettyFormat || PrettyEqual {
+ equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite)
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ for i, sname := range f.sectionList {
+ sec := f.SectionWithIndex(sname, f.sectionIndexes[i])
+ if len(sec.Comment) > 0 {
+ // Support multiline comments
+ lines := strings.Split(sec.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + lines[i]
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) {
+ if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return nil, err
+ }
+ } else {
+ // Write nothing if default section is empty
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ if sec.isRawSection {
+ if _, err := buf.WriteString(sec.rawBody); err != nil {
+ return nil, err
+ }
+
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+
+ // Count and generate alignment length and buffer spaces using the
+ // longest key. Keys may be modified if they contain certain characters so
+ // we need to take that into account in our calculation.
+ alignLength := 0
+ if PrettyFormat {
+ for _, kname := range sec.keyList {
+ keyLength := len(kname)
+ // First case will surround key by ` and second by """
+ if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
+ keyLength += 2
+ } else if strings.Contains(kname, "`") {
+ keyLength += 6
+ }
+
+ if keyLength > alignLength {
+ alignLength = keyLength
+ }
+ }
+ }
+ alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+ KeyList:
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DefaultSection {
+ buf.WriteString(indent)
+ }
+
+ // Support multiline comments
+ lines := strings.Split(key.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + strings.TrimSpace(lines[i])
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if len(indent) > 0 && sname != DefaultSection {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncrement:
+ kname = "-"
+ case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+
+ for _, val := range key.ValueWithShadows() {
+ if _, err := buf.WriteString(kname); err != nil {
+ return nil, err
+ }
+
+ if key.isBooleanType {
+ if kname != sec.keyList[len(sec.keyList)-1] {
+ buf.WriteString(LineBreak)
+ }
+ continue KeyList
+ }
+
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ } else if len(strings.TrimSpace(val)) != len(val) {
+ val = `"` + val + `"`
+ }
+ if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, val := range key.nestedValues {
+ if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return 0, err
+ }
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename after done.
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(filename, buf.Bytes(), 0666)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/vendor/github.com/go-ini/ini/helper.go b/vendor/github.com/go-ini/ini/helper.go
new file mode 100644
index 0000000..f9d80a6
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/helper.go
@@ -0,0 +1,24 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
new file mode 100644
index 0000000..23f0742
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -0,0 +1,176 @@
+// +build go1.6
+
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "os"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+const (
+ // DefaultSection is the name of default section. You can use this constant or the string literal.
+ // In most of cases, an empty string is all you need to access the section.
+ DefaultSection = "DEFAULT"
+
+ // Maximum allowed depth when recursively substituing variable names.
+ depthValues = 99
+)
+
+var (
+ // LineBreak is the delimiter to determine or compose a new line.
+ // This variable will be changed to "\r\n" automatically on Windows at package init time.
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^)]+)\)s`)
+
+ // DefaultHeader explicitly writes default section header.
+ DefaultHeader = false
+
+ // PrettySection indicates whether to put a line between sections.
+ PrettySection = true
+ // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
+ // or reduce all possible spaces for compact format.
+ PrettyFormat = true
+ // PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
+ PrettyEqual = false
+ // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
+ DefaultFormatLeft = ""
+ // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
+ DefaultFormatRight = ""
+)
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+func init() {
+ if runtime.GOOS == "windows" && !inTest {
+ LineBreak = "\r\n"
+ }
+}
+
+// LoadOptions contains all customized options used for load data source(s).
+type LoadOptions struct {
+ // Loose indicates whether the parser should ignore nonexistent files or return error.
+ Loose bool
+ // Insensitive indicates whether the parser forces all section and key names to lowercase.
+ Insensitive bool
+ // InsensitiveSections indicates whether the parser forces all section to lowercase.
+ InsensitiveSections bool
+ // InsensitiveKeys indicates whether the parser forces all key names to lowercase.
+ InsensitiveKeys bool
+ // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+ IgnoreContinuation bool
+ // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
+ IgnoreInlineComment bool
+ // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
+ SkipUnrecognizableLines bool
+ // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source.
+ ShortCircuit bool
+ // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+ // This type of keys are mostly used in my.cnf.
+ AllowBooleanKeys bool
+ // AllowShadows indicates whether to keep track of keys with same name under same section.
+ AllowShadows bool
+ // AllowNestedValues indicates whether to allow AWS-like nested values.
+ // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
+ AllowNestedValues bool
+ // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
+ // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
+ // Relevant quote: Values can also span multiple lines, as long as they are indented deeper
+ // than the first line of the value.
+ AllowPythonMultilineValues bool
+ // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
+ // Docs: https://docs.python.org/2/library/configparser.html
+ // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
+ // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
+ SpaceBeforeInlineComment bool
+ // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
+ // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
+ UnescapeValueDoubleQuotes bool
+ // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
+ // when value is NOT surrounded by any quotes.
+ // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
+ UnescapeValueCommentSymbols bool
+ // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
+ // conform to key/value pairs. Specify the names of those blocks here.
+ UnparseableSections []string
+ // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
+ KeyValueDelimiters string
+ // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=".
+ KeyValueDelimiterOnWrite string
+ // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".".
+ ChildSectionDelimiter string
+ // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
+ PreserveSurroundedQuote bool
+ // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
+ DebugFunc DebugFunc
+ // ReaderBufferSize is the buffer size of the reader in bytes.
+ ReaderBufferSize int
+ // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
+ AllowNonUniqueSections bool
+}
+
+// DebugFunc is the type of function called to log parse events.
+type DebugFunc func(message string)
+
+// LoadSources allows caller to apply customized options for loading from data source(s).
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources, opts)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// ShadowLoad has exactly same functionality as Load function
+// except it allows have shadow keys.
+func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
+}
diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go
new file mode 100644
index 0000000..8baafd9
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/key.go
@@ -0,0 +1,829 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ Comment string
+ name string
+ value string
+ isAutoIncrement bool
+ isBooleanType bool
+
+ isShadow bool
+ shadows []*Key
+
+ nestedValues []string
+}
+
+// newKey simply return a key object with given values.
+func newKey(s *Section, name, val string) *Key {
+ return &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+}
+
+func (k *Key) addShadow(val string) error {
+ if k.isShadow {
+ return errors.New("cannot add shadow to another shadow key")
+ } else if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add shadow to auto-increment or boolean key")
+ }
+
+ // Deduplicate shadows based on their values.
+ if k.value == val {
+ return nil
+ }
+ for i := range k.shadows {
+ if k.shadows[i].value == val {
+ return nil
+ }
+ }
+
+ shadow := newKey(k.s, k.name, val)
+ shadow.isShadow = true
+ k.shadows = append(k.shadows, shadow)
+ return nil
+}
+
+// AddShadow adds a new shadow key to itself.
+func (k *Key) AddShadow(val string) error {
+ if !k.s.f.options.AllowShadows {
+ return errors.New("shadow key is not allowed")
+ }
+ return k.addShadow(val)
+}
+
+func (k *Key) addNestedValue(val string) error {
+ if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add nested value to auto-increment or boolean key")
+ }
+
+ k.nestedValues = append(k.nestedValues, val)
+ return nil
+}
+
+// AddNestedValue adds a nested value to the key.
+func (k *Key) AddNestedValue(val string) error {
+ if !k.s.f.options.AllowNestedValues {
+ return errors.New("nested value is not allowed")
+ }
+ return k.addNestedValue(val)
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// ValueWithShadows returns raw values of key and its shadows if any.
+func (k *Key) ValueWithShadows() []string {
+ if len(k.shadows) == 0 {
+ return []string{k.value}
+ }
+ vals := make([]string, len(k.shadows)+1)
+ vals[0] = k.value
+ for i := range k.shadows {
+ vals[i+1] = k.shadows[i].value
+ }
+ return vals
+}
+
+// NestedValues returns nested values stored in the key.
+// It is possible returned value is nil if no nested values stored in the key.
+func (k *Key) NestedValues() []string {
+ return k.nestedValues
+}
+
+// transformValue takes a raw value and transforms to its final string.
+func (k *Key) transformValue(val string) string {
+ if k.s.f.ValueMapper != nil {
+ val = k.s.f.ValueMapper(val)
+ }
+
+ // Fail-fast if no indicate char found for recursive value
+ if !strings.Contains(val, "%") {
+ return val
+ }
+ for i := 0; i < depthValues; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := vr[2 : len(vr)-2]
+
+ // Search in the same section.
+ // If not found or found the key itself, then search again in default section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil || k == nk {
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ if nk == nil {
+ // Stop when no results found in the default section,
+ // and returns the value as-is.
+ break
+ }
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ return k.transformValue(k.value)
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ v, err := strconv.ParseInt(k.String(), 0, 64)
+ return int(v), err
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 0, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 0, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 0, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ k.value = defaultVal
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatBool(defaultVal[0])
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].String()
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].Format(format)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ runes := []rune(str)
+ vals := make([]string, 0, 2)
+ var buf bytes.Buffer
+ escape := false
+ idx := 0
+ for {
+ if escape {
+ escape = false
+ if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
+ buf.WriteRune('\\')
+ }
+ buf.WriteRune(runes[idx])
+ } else {
+ if runes[idx] == '\\' {
+ escape = true
+ } else if strings.HasPrefix(string(runes[idx:]), delim) {
+ idx += len(delim) - 1
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ buf.Reset()
+ } else {
+ buf.WriteRune(runes[idx])
+ }
+ }
+ idx++
+ if idx == len(runes) {
+ break
+ }
+ }
+
+ if buf.Len() > 0 {
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ }
+
+ return vals
+}
+
+// StringsWithShadows returns list of string divided by given delimiter.
+// Shadows will also be appended if any.
+func (k *Key) StringsWithShadows(delim string) []string {
+ vals := k.ValueWithShadows()
+ results := make([]string, 0, len(vals)*2)
+ for i := range vals {
+ if len(vals) == 0 {
+ continue
+ }
+
+ results = append(results, strings.Split(vals[i], delim)...)
+ }
+
+ for i := range results {
+ results[i] = k.transformValue(strings.TrimSpace(results[i]))
+ }
+ return results
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Bools(delim string) []bool {
+ vals, _ := k.parseBools(k.Strings(delim), true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidBools(delim string) []bool {
+ vals, _ := k.parseBools(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.parseFloat64s(k.Strings(delim), false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.parseInts(k.Strings(delim), false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.parseInt64s(k.Strings(delim), false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.parseUints(k.Strings(delim), false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.parseUint64s(k.Strings(delim), false, true)
+}
+
+// StrictBools returns list of bool divided by given delimiter or error on first invalid input.
+func (k *Key) StrictBools(delim string) ([]bool, error) {
+ return k.parseBools(k.Strings(delim), false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.parseTimesFormat(format, k.Strings(delim), false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// parseBools transforms strings to bools.
+func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
+ vals := make([]bool, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := parseBool(str)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(bool))
+ }
+ }
+ return vals, err
+}
+
+// parseFloat64s transforms strings to float64s.
+func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ vals := make([]float64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseFloat(str, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(float64))
+ }
+ }
+ return vals, err
+}
+
+// parseInts transforms strings to ints.
+func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ vals := make([]int, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseInt(str, 0, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, int(val.(int64)))
+ }
+ }
+ return vals, err
+}
+
+// parseInt64s transforms strings to int64s.
+func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ vals := make([]int64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseInt(str, 0, 64)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(int64))
+ }
+ }
+ return vals, err
+}
+
+// parseUints transforms strings to uints.
+func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ vals := make([]uint, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseUint(str, 0, 64)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, uint(val.(uint64)))
+ }
+ }
+ return vals, err
+}
+
+// parseUint64s transforms strings to uint64s.
+func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ vals := make([]uint64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseUint(str, 0, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(uint64))
+ }
+ }
+ return vals, err
+}
+
+
+type Parser func(str string) (interface{}, error)
+
+
+// parseTimesFormat transforms strings to times in given format.
+func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ vals := make([]time.Time, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := time.Parse(format, str)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(time.Time))
+ }
+ }
+ return vals, err
+}
+
+
+// doParse transforms strings to different types
+func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
+ vals := make([]interface{}, 0, len(strs))
+ for _, str := range strs {
+ val, err := parser(str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
new file mode 100644
index 0000000..6514716
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/parser.go
@@ -0,0 +1,535 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+const minReaderBufferSize = 4096
+
+var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`)
+
+type parserOptions struct {
+ IgnoreContinuation bool
+ IgnoreInlineComment bool
+ AllowPythonMultilineValues bool
+ SpaceBeforeInlineComment bool
+ UnescapeValueDoubleQuotes bool
+ UnescapeValueCommentSymbols bool
+ PreserveSurroundedQuote bool
+ DebugFunc DebugFunc
+ ReaderBufferSize int
+}
+
+type parser struct {
+ buf *bufio.Reader
+ options parserOptions
+
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func (p *parser) debug(format string, args ...interface{}) {
+ if p.options.DebugFunc != nil {
+ p.options.DebugFunc(fmt.Sprintf(format, args...))
+ }
+}
+
+func newParser(r io.Reader, opts parserOptions) *parser {
+ size := opts.ReaderBufferSize
+ if size < minReaderBufferSize {
+ size = minReaderBufferSize
+ }
+
+ return &parser{
+ buf: bufio.NewReaderSize(r, size),
+ options: opts,
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(2)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 2 {
+ return nil
+ }
+
+ switch {
+ case mask[0] == 254 && mask[1] == 255:
+ fallthrough
+ case mask[0] == 255 && mask[1] == 254:
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
+ case mask[0] == 239 && mask[1] == 187:
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ }
+ if mask[2] == 191 {
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(delimiters string, in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && string(line[0:3]) == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ var endIdx int
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], delimiters)
+ if i < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, delimiters)
+ if endIdx < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from %q to %q", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
+
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
+ return p.readPythonMultilines(line, bufferSize)
+ }
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && string(line[0:3]) == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
+ valQuote = `"`
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
+ return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
+ }
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ lastChar := line[len(line)-1]
+ // Won't be able to reach here if value only contains whitespace
+ line = strings.TrimSpace(line)
+ trimmedLastChar := line[len(line)-1]
+
+ // Check continuation lines when desired
+ if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ // Check if ignore inline comment
+ if !p.options.IgnoreInlineComment {
+ var i int
+ if p.options.SpaceBeforeInlineComment {
+ i = strings.Index(line, " #")
+ if i == -1 {
+ i = strings.Index(line, " ;")
+ }
+
+ } else {
+ i = strings.IndexAny(line, "#;")
+ }
+
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+
+ }
+
+ // Trim single and double quotes
+ if (hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
+ line = line[1 : len(line)-1]
+ } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
+ if strings.Contains(line, `\;`) {
+ line = strings.Replace(line, `\;`, ";", -1)
+ }
+ if strings.Contains(line, `\#`) {
+ line = strings.Replace(line, `\#`, "#", -1)
+ }
+ } else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
+ return p.readPythonMultilines(line, bufferSize)
+ }
+
+ return line, nil
+}
+
+func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
+ parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
+ peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
+
+ indentSize := 0
+ for {
+ peekData, peekErr := peekBuffer.ReadBytes('\n')
+ if peekErr != nil {
+ if peekErr == io.EOF {
+ p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line)
+ return line, nil
+ }
+
+ p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
+ return "", peekErr
+ }
+
+ p.debug("readPythonMultilines: parsing %q", string(peekData))
+
+ peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
+ p.debug("readPythonMultilines: matched %d parts", len(peekMatches))
+ for n, v := range peekMatches {
+ p.debug(" %d: %q", n, v)
+ }
+
+ // Return if not a Python multiline value.
+ if len(peekMatches) != 3 {
+ p.debug("readPythonMultilines: end of value, got: %q", line)
+ return line, nil
+ }
+
+ // Determine indent size and line prefix.
+ currentIndentSize := len(peekMatches[1])
+ if indentSize < 1 {
+ indentSize = currentIndentSize
+ p.debug("readPythonMultilines: indent size is %d", indentSize)
+ }
+
+ // Make sure each line is indented at least as far as first line.
+ if currentIndentSize < indentSize {
+ p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line)
+ return line, nil
+ }
+
+ // Advance the parser reader (buffer) in-sync with the peek buffer.
+ _, err := p.buf.Discard(len(peekData))
+ if err != nil {
+ p.debug("readPythonMultilines: failed to skip to the end, returning error")
+ return "", err
+ }
+
+ // Handle indented empty line.
+ line += "\n" + peekMatches[1][indentSize:] + peekMatches[2]
+ }
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader, parserOptions{
+ IgnoreContinuation: f.options.IgnoreContinuation,
+ IgnoreInlineComment: f.options.IgnoreInlineComment,
+ AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
+ SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
+ UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
+ UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
+ PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
+ DebugFunc: f.options.DebugFunc,
+ ReaderBufferSize: f.options.ReaderBufferSize,
+ })
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ name := DefaultSection
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(DefaultSection)
+ }
+ section, _ := f.NewSection(name)
+
+ // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
+ var isLastValueEmpty bool
+ var lastRegularKey *Key
+
+ var line []byte
+ var inUnparseableSection bool
+
+ // NOTE: Iterate and increase `currentPeekSize` until
+ // the size of the parser buffer is found.
+ // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
+ parserBufferSize := 0
+ // NOTE: Peek 4kb at a time.
+ currentPeekSize := minReaderBufferSize
+
+ if f.options.AllowPythonMultilineValues {
+ for {
+ peekBytes, _ := p.buf.Peek(currentPeekSize)
+ peekBytesLength := len(peekBytes)
+
+ if parserBufferSize >= peekBytesLength {
+ break
+ }
+
+ currentPeekSize *= 2
+ parserBufferSize = peekBytesLength
+ }
+ }
+
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ if f.options.AllowNestedValues &&
+ isLastValueEmpty && len(line) > 0 {
+ if line[0] == ' ' || line[0] == '\t' {
+ err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ closeIdx := bytes.LastIndexByte(line, ']')
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ name := string(line[1:closeIdx])
+ section, err = f.NewSection(name)
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset auto-counter and comments
+ p.comment.Reset()
+ p.count = 1
+
+ inUnparseableSection = false
+ for i := range f.options.UnparseableSections {
+ if f.options.UnparseableSections[i] == name ||
+ ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) {
+ inUnparseableSection = true
+ continue
+ }
+ }
+ continue
+ }
+
+ if inUnparseableSection {
+ section.isRawSection = true
+ section.rawBody += string(line)
+ continue
+ }
+
+ kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
+ if err != nil {
+ // Treat as boolean key when desired, and whole line is key name.
+ if IsErrDelimiterNotFound(err) {
+ switch {
+ case f.options.AllowBooleanKeys:
+ kname, err := p.readValue(line, parserBufferSize)
+ if err != nil {
+ return err
+ }
+ key, err := section.NewBooleanKey(kname)
+ if err != nil {
+ return err
+ }
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ continue
+
+ case f.options.SkipUnrecognizableLines:
+ continue
+ }
+ }
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ value, err := p.readValue(line[offset:], parserBufferSize)
+ if err != nil {
+ return err
+ }
+ isLastValueEmpty = len(value) == 0
+
+ key, err := section.NewKey(kname, value)
+ if err != nil {
+ return err
+ }
+ key.isAutoIncrement = isAutoIncr
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ lastRegularKey = key
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go
new file mode 100644
index 0000000..afaa97c
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/section.go
@@ -0,0 +1,256 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+
+ isRawSection bool
+ rawBody string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{
+ f: f,
+ name: name,
+ keys: make(map[string]*Key),
+ keyList: make([]string, 0, 10),
+ keysHash: make(map[string]string),
+ }
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+ return strings.TrimSpace(s.rawBody)
+}
+
+// SetBody updates body content only if section is raw.
+func (s *Section) SetBody(body string) {
+ if !s.isRawSection {
+ return
+ }
+ s.rawBody = body
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
+ name = strings.ToLower(name)
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ if s.f.options.AllowShadows {
+ if err := s.keys[name].addShadow(val); err != nil {
+ return nil, err
+ }
+ } else {
+ s.keys[name].value = val
+ s.keysHash[name] = val
+ }
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = newKey(s, name, val)
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// NewBooleanKey creates a new boolean type key to given section.
+func (s *Section) NewBooleanKey(name string) (*Key, error) {
+ key, err := s.NewKey(name, "true")
+ if err != nil {
+ return nil, err
+ }
+
+ key.isBooleanType = true
+ return key, nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
+ name = strings.ToLower(name)
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ }
+ break
+ }
+ return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Deprecated: Use "HasKey" instead.
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+ var parentKeys []*Key
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ parentKeys = append(parentKeys, sec.Keys()...)
+ } else {
+ break
+ }
+
+ }
+ return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := map[string]string{}
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ delete(s.keysHash, name)
+ return
+ }
+ }
+}
+
+// ChildSections returns a list of child sections of current section.
+// For example, "[parent.child1]" and "[parent.child12]" are child sections
+// of section "[parent]".
+func (s *Section) ChildSections() []*Section {
+ prefix := s.name + s.f.options.ChildSectionDelimiter
+ children := make([]*Section, 0, 3)
+ for _, name := range s.f.sectionList {
+ if strings.HasPrefix(name, prefix) {
+ children = append(children, s.f.sections[name]...)
+ }
+ }
+ return children
+}
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
new file mode 100644
index 0000000..a486b2f
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/struct.go
@@ -0,0 +1,747 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // SnackCase converts to format SNACK_CASE.
+ SnackCase NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= 'A' - 'a'
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ var strs []string
+ if allowShadow {
+ strs = key.StringsWithShadows(delim)
+ } else {
+ strs = key.Strings(delim)
+ }
+
+ numVals := len(strs)
+ if numVals == 0 {
+ return nil
+ }
+
+ var vals interface{}
+ var err error
+
+ sliceOf := field.Type().Elem().Kind()
+ switch sliceOf {
+ case reflect.String:
+ vals = strs
+ case reflect.Int:
+ vals, err = key.parseInts(strs, true, false)
+ case reflect.Int64:
+ vals, err = key.parseInt64s(strs, true, false)
+ case reflect.Uint:
+ vals, err = key.parseUints(strs, true, false)
+ case reflect.Uint64:
+ vals, err = key.parseUint64s(strs, true, false)
+ case reflect.Float64:
+ vals, err = key.parseFloat64s(strs, true, false)
+ case reflect.Bool:
+ vals, err = key.parseBools(strs, true, false)
+ case reflectTime:
+ vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ if err != nil && isStrict {
+ return err
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflect.String:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+ case reflect.Int:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+ case reflect.Int64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+ case reflect.Uint:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+ case reflect.Uint64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+ case reflect.Float64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+ case reflect.Bool:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i]))
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+ }
+ }
+ field.Set(slice)
+ return nil
+}
+
+func wrapStrictError(err error, isStrict bool) error {
+ if isStrict {
+ return err
+ }
+ return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to struct.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ vt := t
+ isPtr := t.Kind() == reflect.Ptr
+ if isPtr {
+ vt = t.Elem()
+ }
+ switch vt.Kind() {
+ case reflect.String:
+ stringVal := key.String()
+ if isPtr {
+ field.Set(reflect.ValueOf(&stringVal))
+ } else if len(stringVal) > 0 {
+ field.SetString(key.String())
+ }
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&boolVal))
+ } else {
+ field.SetBool(boolVal)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // ParseDuration will not return err for `0`, so check the type name
+ if vt.Name() == "Duration" {
+ durationVal, err := key.Duration()
+ if err != nil {
+ if intVal, err := key.Int64(); err == nil {
+ field.SetInt(intVal)
+ return nil
+ }
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&durationVal))
+ } else if int64(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ }
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetInt(intVal)
+ field.Set(pv)
+ } else {
+ field.SetInt(intVal)
+ }
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && uint64(durationVal) > 0 {
+ if isPtr {
+ field.Set(reflect.ValueOf(&durationVal))
+ } else {
+ field.Set(reflect.ValueOf(durationVal))
+ }
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetUint(uintVal)
+ field.Set(pv)
+ } else {
+ field.SetUint(uintVal)
+ }
+
+ case reflect.Float32, reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetFloat(floatVal)
+ field.Set(pv)
+ } else {
+ field.SetFloat(floatVal)
+ }
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&timeVal))
+ } else {
+ field.Set(reflect.ValueOf(timeVal))
+ }
+ case reflect.Slice:
+ return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
+ default:
+ return fmt.Errorf("unsupported type %q", t)
+ }
+ return nil
+}
+
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) {
+ opts := strings.SplitN(tag, ",", 5)
+ rawName = opts[0]
+ for _, opt := range opts[1:] {
+ omitEmpty = omitEmpty || (opt == "omitempty")
+ allowShadow = allowShadow || (opt == "allowshadow")
+ allowNonUnique = allowNonUnique || (opt == "nonunique")
+ extends = extends || (opt == "extends")
+ }
+ return rawName, omitEmpty, allowShadow, allowNonUnique, extends
+}
+
+// mapToField maps the given value to the matching field of the given section.
+// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
+func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
+ isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ if isAnonymousPtr {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) {
+ if isStructPtr && field.IsNil() {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+ fieldSection := s
+ if rawName != "" {
+ sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName
+ if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) {
+ fieldSection = secs[sectionIndex]
+ }
+ }
+ if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
+ }
+ } else if isAnonymousPtr || isStruct || isStructPtr {
+ if secs, err := s.f.SectionsByName(fieldName); err == nil {
+ if len(secs) <= sectionIndex {
+ return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
+ }
+ // Only set the field to non-nil struct value if we have a section for it.
+ // Otherwise, we end up with a non-nil struct ptr even though there is no data.
+ if isStructPtr && field.IsNil() {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+ if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ // Map non-unique sections
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(fieldName, field, isStrict)
+ if err != nil {
+ return fmt.Errorf("map to slice %q: %v", fieldName, err)
+ }
+
+ field.Set(newField)
+ continue
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
+ return fmt.Errorf("set field %q: %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// mapToSlice maps all sections with the same name and returns the new value.
+// The type of the Value must be a slice.
+func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) {
+ secs, err := s.f.SectionsByName(secName)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ typ := val.Type().Elem()
+ for i, sec := range secs {
+ elem := reflect.New(typ)
+ if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil {
+ return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
+ }
+
+ val = reflect.Append(val, elem.Elem())
+ }
+ return val, nil
+}
+
+// mapTo maps a section to object v.
+func (s *Section) mapTo(v interface{}, isStrict bool) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("not a pointer to a struct")
+ }
+
+ if typ.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(s.name, val, isStrict)
+ if err != nil {
+ return err
+ }
+
+ val.Set(newField)
+ return nil
+ }
+
+ return s.mapToField(val, isStrict, 0, s.name)
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ return s.mapTo(v, false)
+}
+
+// StrictMapTo maps section to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (s *Section) StrictMapTo(v interface{}) error {
+ return s.mapTo(v, true)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// StrictMapTo maps file to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (f *File) StrictMapTo(v interface{}) error {
+ return f.Section("").StrictMapTo(v)
+}
+
+// MapToWithMapper maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// StrictMapTo maps data sources to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapTo(v, source interface{}, others ...interface{}) error {
+ return StrictMapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+ sliceOf := field.Type().Elem().Kind()
+
+ if allowShadow {
+ var keyWithShadows *Key
+ for i := 0; i < field.Len(); i++ {
+ var val string
+ switch sliceOf {
+ case reflect.String:
+ val = slice.Index(i).String()
+ case reflect.Int, reflect.Int64:
+ val = fmt.Sprint(slice.Index(i).Int())
+ case reflect.Uint, reflect.Uint64:
+ val = fmt.Sprint(slice.Index(i).Uint())
+ case reflect.Float64:
+ val = fmt.Sprint(slice.Index(i).Float())
+ case reflect.Bool:
+ val = fmt.Sprint(slice.Index(i).Bool())
+ case reflectTime:
+ val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+
+ if i == 0 {
+ keyWithShadows = newKey(key.s, key.name, val)
+ } else {
+ _ = keyWithShadows.AddShadow(val)
+ }
+ }
+ *key = *keyWithShadows
+ return nil
+ }
+
+ var buf bytes.Buffer
+ for i := 0; i < field.Len(); i++ {
+ switch sliceOf {
+ case reflect.String:
+ buf.WriteString(slice.Index(i).String())
+ case reflect.Int, reflect.Int64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+ case reflect.Uint, reflect.Uint64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+ case reflect.Float64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+ case reflect.Bool:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Bool()))
+ case reflectTime:
+ buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-len(delim)])
+ return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool:
+ key.SetValue(fmt.Sprint(field.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ key.SetValue(fmt.Sprint(field.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ key.SetValue(fmt.Sprint(field.Uint()))
+ case reflect.Float32, reflect.Float64:
+ key.SetValue(fmt.Sprint(field.Float()))
+ case reflectTime:
+ key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+ case reflect.Slice:
+ return reflectSliceWithProperType(key, field, delim, allowShadow)
+ case reflect.Ptr:
+ if !field.IsNil() {
+ return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
+ }
+ default:
+ return fmt.Errorf("unsupported type %q", t)
+ }
+ return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflectTime:
+ t, ok := v.Interface().(time.Time)
+ return ok && t.IsZero()
+ }
+ return false
+}
+
+// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
+type StructReflector interface {
+ ReflectINIStruct(*File) error
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ if !val.Field(i).CanInterface() {
+ continue
+ }
+
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
+ if omitEmpty && isEmptyValue(field) {
+ continue
+ }
+
+ if r, ok := field.Interface().(StructReflector); ok {
+ return r.ReflectINIStruct(s.f)
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) {
+ if err := s.reflectFrom(field); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) ||
+ (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ continue
+ }
+
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+ sliceOf := field.Type().Elem().Kind()
+
+ for i := 0; i < field.Len(); i++ {
+ if sliceOf != reflect.Struct && sliceOf != reflect.Ptr {
+ return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName)
+ }
+
+ sec, err := s.f.NewSection(fieldName)
+ if err != nil {
+ return err
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err := sec.reflectFrom(slice.Index(i)); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ }
+ continue
+ }
+
+ // Note: Same reason as section.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+
+ // Add comment from comment tag
+ if len(key.Comment) == 0 {
+ key.Comment = tpField.Tag.Get("comment")
+ }
+
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
+ return fmt.Errorf("reflect field %q: %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects section from given struct. It overwrites existing ones.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+
+ if s.name != DefaultSection && s.f.options.AllowNonUniqueSections &&
+ (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) {
+ // Clear sections to make sure none exists before adding the new ones
+ s.f.DeleteSection(s.name)
+
+ if typ.Kind() == reflect.Ptr {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+ return sec.reflectFrom(val.Elem())
+ }
+
+ slice := val.Slice(0, val.Len())
+ sliceOf := val.Type().Elem().Kind()
+ if sliceOf != reflect.Ptr {
+ return fmt.Errorf("not a slice of pointers")
+ }
+
+ for i := 0; i < slice.Len(); i++ {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+
+ err = sec.reflectFrom(slice.Index(i))
+ if err != nil {
+ return fmt.Errorf("reflect from %dth field: %v", i, err)
+ }
+ }
+
+ return nil
+ }
+
+ if typ.Kind() == reflect.Ptr {
+ val = val.Elem()
+ } else {
+ return errors.New("not a pointer to a struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFromWithMapper reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/vendor/github.com/siddontang/go-mysql/LICENSE b/vendor/github.com/go-mysql-org/go-mysql/LICENSE
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/LICENSE
rename to vendor/github.com/go-mysql-org/go-mysql/LICENSE
diff --git a/vendor/github.com/siddontang/go-mysql/client/auth.go b/vendor/github.com/go-mysql-org/go-mysql/client/auth.go
similarity index 94%
rename from vendor/github.com/siddontang/go-mysql/client/auth.go
rename to vendor/github.com/go-mysql-org/go-mysql/client/auth.go
index 5ba9c9f..01217f8 100644
--- a/vendor/github.com/siddontang/go-mysql/client/auth.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/client/auth.go
@@ -6,9 +6,9 @@ import (
"encoding/binary"
"fmt"
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/packet"
+ . "github.com/go-mysql-org/go-mysql/mysql"
+ "github.com/go-mysql-org/go-mysql/packet"
+ "github.com/pingcap/errors"
)
const defaultAuthPluginName = AUTH_NATIVE_PASSWORD
@@ -34,7 +34,7 @@ func (c *Conn) readInitialHandshake() error {
}
if data[0] == ERR_HEADER {
- return errors.New("read initial handshake error")
+ return errors.Annotate(c.handleErrorPacket(data), "read initial handshake error")
}
if data[0] < MinProtocolVersion {
@@ -46,7 +46,7 @@ func (c *Conn) readInitialHandshake() error {
pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1
// connection id length is 4
- c.connectionID = uint32(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ c.connectionID = binary.LittleEndian.Uint32(data[pos : pos+4])
pos += 4
c.salt = []byte{}
@@ -106,7 +106,7 @@ func (c *Conn) readInitialHandshake() error {
// generate auth response data according to auth plugin
//
// NOTE: the returned boolean value indicates whether to add a \NUL to the end of data.
-// it is quite tricky because MySQl server expects different formats of responses in different auth situations.
+// it is quite tricky because MySQL server expects different formats of responses in different auth situations.
// here the \NUL needs to be added when sending back the empty password or cleartext password in 'sha256_password'
// authentication.
func (c *Conn) genAuthResponse(authData []byte) ([]byte, bool, error) {
@@ -199,7 +199,7 @@ func (c *Conn) writeAuthHandshake() error {
// Charset [1 byte]
// use default collation id 33 here, is utf-8
- data[12] = byte(DEFAULT_COLLATION_ID)
+ data[12] = DEFAULT_COLLATION_ID
// SSL Connection Request Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
diff --git a/vendor/github.com/siddontang/go-mysql/client/conn.go b/vendor/github.com/go-mysql-org/go-mysql/client/conn.go
similarity index 78%
rename from vendor/github.com/siddontang/go-mysql/client/conn.go
rename to vendor/github.com/go-mysql-org/go-mysql/client/conn.go
index b015b43..1b37444 100644
--- a/vendor/github.com/siddontang/go-mysql/client/conn.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/client/conn.go
@@ -7,9 +7,9 @@ import (
"strings"
"time"
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/packet"
+ . "github.com/go-mysql-org/go-mysql/mysql"
+ "github.com/go-mysql-org/go-mysql/packet"
+ "github.com/pingcap/errors"
)
type Conn struct {
@@ -33,6 +33,9 @@ type Conn struct {
connectionID uint32
}
+// This function will be called for every row in resultset from ExecuteSelectStreaming.
+type SelectPerRowCallback func(row []FieldValue) error
+
func getNetProto(addr string) string {
proto := "tcp"
if strings.Contains(addr, "/") {
@@ -54,7 +57,12 @@ func Connect(addr string, user string, password string, dbName string, options .
return nil, errors.Trace(err)
}
- c.Conn = packet.NewConn(conn)
+ if c.tlsConfig != nil {
+ c.Conn = packet.NewTLSConn(conn)
+ } else {
+ c.Conn = packet.NewConn(conn)
+ }
+
c.user = user
c.password = password
c.db = dbName
@@ -112,13 +120,13 @@ func (c *Conn) Ping() error {
return nil
}
-// use default SSL
+// UseSSL: use default SSL
// pass to options when connect
func (c *Conn) UseSSL(insecureSkipVerify bool) {
c.tlsConfig = &tls.Config{InsecureSkipVerify: insecureSkipVerify}
}
-// use user-specified TLS config
+// SetTLSConfig: use user-specified TLS config
// pass to options when connect
func (c *Conn) SetTLSConfig(config *tls.Config) {
c.tlsConfig = config
@@ -160,6 +168,28 @@ func (c *Conn) Execute(command string, args ...interface{}) (*Result, error) {
}
}
+// ExecuteSelectStreaming will call perRowCallback for every row in resultset
+// WITHOUT saving any row data to Result.{Values/RawPkg/RowDatas} fields.
+//
+// ExecuteSelectStreaming should be used only for SELECT queries with a large response resultset for memory preserving.
+//
+// Example:
+//
+// var result mysql.Result
+// conn.ExecuteSelectStreaming(`SELECT ... LIMIT 100500`, &result, func(row []mysql.FieldValue) error {
+// // Use the row as you want.
+// // You must not save FieldValue.AsString() value after this callback is done. Copy it if you need.
+// return nil
+// })
+//
+func (c *Conn) ExecuteSelectStreaming(command string, result *Result, perRowCallback SelectPerRowCallback) error {
+ if err := c.writeCommandStr(COM_QUERY, command); err != nil {
+ return errors.Trace(err)
+ }
+
+ return c.readResultStreaming(false, result, perRowCallback)
+}
+
func (c *Conn) Begin() error {
_, err := c.exec("BEGIN")
return errors.Trace(err)
diff --git a/vendor/github.com/go-mysql-org/go-mysql/client/pool.go b/vendor/github.com/go-mysql-org/go-mysql/client/pool.go
new file mode 100644
index 0000000..c8c1f45
--- /dev/null
+++ b/vendor/github.com/go-mysql-org/go-mysql/client/pool.go
@@ -0,0 +1,476 @@
+package client
+
+import (
+ "context"
+ "math"
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/pingcap/errors"
+)
+
+/*
+Pool for efficient reuse of connections.
+
+Usage:
+ pool := client.NewPool(log.Debugf, 100, 400, 5, `127.0.0.1:3306`, `username`, `userpwd`, `dbname`)
+ ...
+ conn, _ := pool.GetConn(ctx)
+ defer pool.PutConn(conn)
+ conn.Execute/conn.Begin/etc...
+*/
+
+type (
+ Timestamp int64
+
+ LogFunc func(format string, args ...interface{})
+
+ Pool struct {
+ logFunc LogFunc
+ minAlive int
+ maxAlive int
+ maxIdle int
+ idleCloseTimeout Timestamp
+ idlePingTimeout Timestamp
+ connect func() (*Conn, error)
+
+ synchro struct {
+ sync.Mutex
+ idleConnections []Connection
+ stats ConnectionStats
+ }
+
+ readyConnection chan Connection
+ }
+
+ ConnectionStats struct {
+ // Uses internally
+ TotalCount int
+
+ // Only for stats
+ IdleCount int
+ CreatedCount int64
+ }
+
+ Connection struct {
+ conn *Conn
+ lastUseAt Timestamp
+ }
+)
+
+var (
+ // MaxIdleTimeoutWithoutPing - If the connection has been idle for more than this time,
+ // then ping will be performed before use to check if it alive
+ MaxIdleTimeoutWithoutPing = 10 * time.Second
+
+ // DefaultIdleTimeout - If the connection has been idle for more than this time,
+ // we can close it (but we should remember about Pool.minAlive)
+ DefaultIdleTimeout = 30 * time.Second
+
+ // MaxNewConnectionAtOnce - If we need to create new connections,
+ // then we will create no more than this number of connections at a time.
+ // This restriction will be ignored on pool initialization.
+ MaxNewConnectionAtOnce = 5
+)
+
+// NewPool initializes new connection pool and uses params: addr, user, password, dbName and options.
+// minAlive specifies the minimum number of open connections that the pool will try to maintain.
+// maxAlive specifies the maximum number of open connections
+// (for internal reasons, may be greater by 1 inside newConnectionProducer).
+// maxIdle specifies the maximum number of idle connections (see DefaultIdleTimeout).
+func NewPool(
+ logFunc LogFunc,
+ minAlive int,
+ maxAlive int,
+ maxIdle int,
+ addr string,
+ user string,
+ password string,
+ dbName string,
+ options ...func(conn *Conn),
+) *Pool {
+ if minAlive > maxAlive {
+ minAlive = maxAlive
+ }
+ if maxIdle > maxAlive {
+ maxIdle = maxAlive
+ }
+ if maxIdle <= minAlive {
+ maxIdle = minAlive
+ }
+
+ pool := &Pool{
+ logFunc: logFunc,
+ minAlive: minAlive,
+ maxAlive: maxAlive,
+ maxIdle: maxIdle,
+
+ idleCloseTimeout: Timestamp(math.Ceil(DefaultIdleTimeout.Seconds())),
+ idlePingTimeout: Timestamp(math.Ceil(MaxIdleTimeoutWithoutPing.Seconds())),
+
+ connect: func() (*Conn, error) {
+ return Connect(addr, user, password, dbName, options...)
+ },
+
+ readyConnection: make(chan Connection),
+ }
+
+ pool.synchro.idleConnections = make([]Connection, 0, pool.maxIdle)
+
+ go pool.newConnectionProducer()
+
+ if pool.minAlive > 0 {
+ pool.logFunc(`Pool: Setup %d new connections (minimal pool size)...`, pool.minAlive)
+ pool.startNewConnections(pool.minAlive)
+ }
+
+ go pool.closeOldIdleConnections()
+
+ return pool
+}
+
+func (pool *Pool) GetStats(stats *ConnectionStats) {
+ pool.synchro.Lock()
+
+ *stats = pool.synchro.stats
+
+ stats.IdleCount = len(pool.synchro.idleConnections)
+
+ pool.synchro.Unlock()
+}
+
+// GetConn returns connection from the pool or create new
+func (pool *Pool) GetConn(ctx context.Context) (*Conn, error) {
+ for {
+ connection, err := pool.getConnection(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // For long time idle connections, we do a ping check
+ if delta := pool.nowTs() - connection.lastUseAt; delta > pool.idlePingTimeout {
+ if err := pool.ping(connection.conn); err != nil {
+ pool.closeConn(connection.conn)
+ continue
+ }
+ }
+
+ return connection.conn, nil
+ }
+}
+
+// PutConn returns working connection back to pool
+func (pool *Pool) PutConn(conn *Conn) {
+ pool.putConnection(Connection{
+ conn: conn,
+ lastUseAt: pool.nowTs(),
+ })
+}
+
+// DropConn closes the connection without any checks
+func (pool *Pool) DropConn(conn *Conn) {
+ pool.closeConn(conn)
+}
+
+func (pool *Pool) putConnection(connection Connection) {
+ pool.synchro.Lock()
+ defer pool.synchro.Unlock()
+
+ // If someone is already waiting for a connection, then we return it to him
+ select {
+ case pool.readyConnection <- connection:
+ return
+ default:
+ }
+
+ // Nobody needs this connection
+
+ pool.putConnectionUnsafe(connection)
+}
+
+func (pool *Pool) nowTs() Timestamp {
+ return Timestamp(time.Now().Unix())
+}
+
+func (pool *Pool) getConnection(ctx context.Context) (Connection, error) {
+ pool.synchro.Lock()
+
+ connection := pool.getIdleConnectionUnsafe()
+ if connection.conn != nil {
+ pool.synchro.Unlock()
+ return connection, nil
+ }
+ pool.synchro.Unlock()
+
+ // No idle connections are available
+
+ select {
+ case connection := <-pool.readyConnection:
+ return connection, nil
+
+ case <-ctx.Done():
+ return Connection{}, ctx.Err()
+ }
+}
+
+func (pool *Pool) putConnectionUnsafe(connection Connection) {
+ if len(pool.synchro.idleConnections) == cap(pool.synchro.idleConnections) {
+ pool.synchro.stats.TotalCount--
+ _ = connection.conn.Close() // Could it be more effective to close older connections?
+ } else {
+ pool.synchro.idleConnections = append(pool.synchro.idleConnections, connection)
+ }
+}
+
+func (pool *Pool) newConnectionProducer() {
+ var connection Connection
+ var err error
+
+ for {
+ connection.conn = nil
+
+ pool.synchro.Lock()
+
+ connection = pool.getIdleConnectionUnsafe()
+ if connection.conn == nil {
+ if pool.synchro.stats.TotalCount >= pool.maxAlive {
+ // Can't create more connections
+ pool.synchro.Unlock()
+ time.Sleep(10 * time.Millisecond)
+ continue
+ }
+ pool.synchro.stats.TotalCount++ // "Reserving" new connection
+ }
+
+ pool.synchro.Unlock()
+
+ if connection.conn == nil {
+ connection, err = pool.createNewConnection()
+ if err != nil {
+ pool.synchro.Lock()
+ pool.synchro.stats.TotalCount-- // Bad luck, should try again
+ pool.synchro.Unlock()
+
+ time.Sleep(time.Duration(10+rand.Intn(90)) * time.Millisecond)
+ continue
+ }
+ }
+
+ pool.readyConnection <- connection
+ }
+}
+
+func (pool *Pool) createNewConnection() (Connection, error) {
+ var connection Connection
+ var err error
+
+ connection.conn, err = pool.connect()
+ if err != nil {
+ return Connection{}, errors.Errorf(`Could not connect to mysql: %s`, err)
+ }
+ connection.lastUseAt = pool.nowTs()
+
+ pool.synchro.Lock()
+ pool.synchro.stats.CreatedCount++
+ pool.synchro.Unlock()
+
+ return connection, nil
+}
+
+func (pool *Pool) getIdleConnectionUnsafe() Connection {
+ cnt := len(pool.synchro.idleConnections)
+ if cnt == 0 {
+ return Connection{}
+ }
+
+ last := cnt - 1
+ connection := pool.synchro.idleConnections[last]
+ pool.synchro.idleConnections[last].conn = nil
+ pool.synchro.idleConnections = pool.synchro.idleConnections[:last]
+
+ return connection
+}
+
+func (pool *Pool) closeOldIdleConnections() {
+ var toPing []Connection
+
+ ticker := time.NewTicker(5 * time.Second)
+
+ for range ticker.C {
+ toPing = pool.getOldIdleConnections(toPing[:0])
+ if len(toPing) == 0 {
+ continue
+ }
+ pool.recheckConnections(toPing)
+
+ if !pool.spawnConnectionsIfNeeded() {
+ pool.closeIdleConnectionsIfCan()
+ }
+ }
+}
+
+func (pool *Pool) getOldIdleConnections(dst []Connection) []Connection {
+ dst = dst[:0]
+
+ pool.synchro.Lock()
+
+ synchro := &pool.synchro
+
+ idleCnt := len(synchro.idleConnections)
+ checkBefore := pool.nowTs() - pool.idlePingTimeout
+
+ for i := idleCnt - 1; i >= 0; i-- {
+ if synchro.idleConnections[i].lastUseAt > checkBefore {
+ continue
+ }
+
+ dst = append(dst, synchro.idleConnections[i])
+
+ last := idleCnt - 1
+ if i < last {
+ // Removing an item from the middle of a slice
+ synchro.idleConnections[i], synchro.idleConnections[last] = synchro.idleConnections[last], synchro.idleConnections[i]
+ }
+
+ synchro.idleConnections[last].conn = nil
+ synchro.idleConnections = synchro.idleConnections[:last]
+ idleCnt--
+ }
+
+ pool.synchro.Unlock()
+
+ return dst
+}
+
+func (pool *Pool) recheckConnections(connections []Connection) {
+ const workerCnt = 2 // Heuristic :)
+
+ queue := make(chan Connection, len(connections))
+ for _, connection := range connections {
+ queue <- connection
+ }
+ close(queue)
+
+ var wg sync.WaitGroup
+ wg.Add(workerCnt)
+ for worker := 0; worker < workerCnt; worker++ {
+ go func() {
+ defer wg.Done()
+ for connection := range queue {
+ if err := pool.ping(connection.conn); err != nil {
+ pool.closeConn(connection.conn)
+ } else {
+ pool.putConnection(connection)
+ }
+ }
+ }()
+ }
+
+ wg.Wait()
+}
+
+// spawnConnectionsIfNeeded creates new connections if there are not enough of them and returns true in this case
+func (pool *Pool) spawnConnectionsIfNeeded() bool {
+ pool.synchro.Lock()
+ totalCount := pool.synchro.stats.TotalCount
+ idleCount := len(pool.synchro.idleConnections)
+ needSpanNew := pool.minAlive - totalCount
+ pool.synchro.Unlock()
+
+ if needSpanNew <= 0 {
+ return false
+ }
+
+ // Не хватает соединений, нужно создать еще
+
+ if needSpanNew > MaxNewConnectionAtOnce {
+ needSpanNew = MaxNewConnectionAtOnce
+ }
+
+ pool.logFunc(`Pool: Setup %d new connections (total: %d idle: %d)...`, needSpanNew, totalCount, idleCount)
+ pool.startNewConnections(needSpanNew)
+
+ return true
+}
+
+func (pool *Pool) closeIdleConnectionsIfCan() {
+ pool.synchro.Lock()
+
+ canCloseCnt := pool.synchro.stats.TotalCount - pool.minAlive
+ canCloseCnt-- // -1 to account for an open but unused connection (pool.readyConnection <- connection in newConnectionProducer)
+
+ idleCnt := len(pool.synchro.idleConnections)
+
+ inFly := pool.synchro.stats.TotalCount - idleCnt
+
+ // We can close no more than 10% connections at a time, but at least 1, if possible
+ idleCanCloseCnt := idleCnt / 10
+ if idleCanCloseCnt == 0 {
+ idleCanCloseCnt = 1
+ }
+ if canCloseCnt > idleCanCloseCnt {
+ canCloseCnt = idleCanCloseCnt
+ }
+ if canCloseCnt <= 0 {
+ pool.synchro.Unlock()
+ return
+ }
+
+ closeFromIdx := idleCnt - canCloseCnt
+ if closeFromIdx < 0 {
+ // If there are enough requests in the "flight" now, then we can close all unnecessary
+ closeFromIdx = 0
+ }
+
+ toClose := append([]Connection{}, pool.synchro.idleConnections[closeFromIdx:]...)
+
+ for i := closeFromIdx; i < idleCnt; i++ {
+ pool.synchro.idleConnections[i].conn = nil
+ }
+ pool.synchro.idleConnections = pool.synchro.idleConnections[:closeFromIdx]
+
+ pool.synchro.Unlock()
+
+ pool.logFunc(`Pool: Close %d idle connections (in fly %d)`, len(toClose), inFly)
+ for _, connection := range toClose {
+ pool.closeConn(connection.conn)
+ }
+}
+
+func (pool *Pool) closeConn(conn *Conn) {
+ pool.synchro.Lock()
+ pool.synchro.stats.TotalCount--
+ pool.synchro.Unlock()
+
+ _ = conn.Close() // Closing is not an instant action, so do it outside the lock
+}
+
+func (pool *Pool) startNewConnections(count int) {
+ connections := make([]Connection, 0, count)
+ for i := 0; i < count; i++ {
+ if conn, err := pool.createNewConnection(); err == nil {
+ pool.synchro.Lock()
+ pool.synchro.stats.TotalCount++
+ pool.synchro.Unlock()
+ connections = append(connections, conn)
+ }
+ }
+
+ pool.synchro.Lock()
+ for _, connection := range connections {
+ pool.putConnectionUnsafe(connection)
+ }
+ pool.synchro.Unlock()
+}
+
+func (pool *Pool) ping(conn *Conn) error {
+ deadline := time.Now().Add(100 * time.Millisecond)
+ _ = conn.SetWriteDeadline(deadline)
+ _ = conn.SetReadDeadline(deadline)
+ err := conn.Ping()
+ if err != nil {
+ pool.logFunc(`Pool: ping query fail: %s`, err.Error())
+ }
+ return err
+}
diff --git a/vendor/github.com/siddontang/go-mysql/client/req.go b/vendor/github.com/go-mysql-org/go-mysql/client/req.go
similarity index 81%
rename from vendor/github.com/siddontang/go-mysql/client/req.go
rename to vendor/github.com/go-mysql-org/go-mysql/client/req.go
index dde03e7..df3cee2 100644
--- a/vendor/github.com/siddontang/go-mysql/client/req.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/client/req.go
@@ -1,5 +1,9 @@
package client
+import (
+ "github.com/go-mysql-org/go-mysql/utils"
+)
+
func (c *Conn) writeCommand(command byte) error {
c.ResetSequence()
@@ -16,28 +20,20 @@ func (c *Conn) writeCommandBuf(command byte, arg []byte) error {
c.ResetSequence()
length := len(arg) + 1
-
- data := make([]byte, length+4)
-
+ data := utils.ByteSliceGet(length + 4)
data[4] = command
copy(data[5:], arg)
- return c.WritePacket(data)
+ err := c.WritePacket(data)
+
+ utils.ByteSlicePut(data)
+
+ return err
}
func (c *Conn) writeCommandStr(command byte, arg string) error {
- c.ResetSequence()
-
- length := len(arg) + 1
-
- data := make([]byte, length+4)
-
- data[4] = command
-
- copy(data[5:], arg)
-
- return c.WritePacket(data)
+ return c.writeCommandBuf(command, utils.StringToByteSlice(arg))
}
func (c *Conn) writeCommandUint32(command byte, arg uint32) error {
diff --git a/vendor/github.com/siddontang/go-mysql/client/resp.go b/vendor/github.com/go-mysql-org/go-mysql/client/resp.go
similarity index 62%
rename from vendor/github.com/siddontang/go-mysql/client/resp.go
rename to vendor/github.com/go-mysql-org/go-mysql/client/resp.go
index 71aa1bc..cc94414 100644
--- a/vendor/github.com/siddontang/go-mysql/client/resp.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/client/resp.go
@@ -1,16 +1,15 @@
package client
-import "C"
import (
- "encoding/binary"
-
"bytes"
"crypto/rsa"
"crypto/x509"
+ "encoding/binary"
"encoding/pem"
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
+ . "github.com/go-mysql-org/go-mysql/mysql"
+ "github.com/go-mysql-org/go-mysql/utils"
+ "github.com/pingcap/errors"
"github.com/siddontang/go/hack"
)
@@ -40,7 +39,7 @@ func (c *Conn) handleOKPacket(data []byte) (*Result, error) {
var n int
var pos = 1
- r := new(Result)
+ r := &Result{Resultset: &Resultset{}}
r.AffectedRows, _, n = LengthEncodedInt(data[pos:])
pos += n
@@ -102,6 +101,10 @@ func (c *Conn) handleAuthResult() error {
}
c.authPluginName = switchToPlugin
auth, addNull, err := c.genAuthResponse(data)
+ if err != nil {
+ return err
+ }
+
if err = c.WriteAuthSwitchPacket(auth, addNull); err != nil {
return err
}
@@ -139,7 +142,7 @@ func (c *Conn) handleAuthResult() error {
}
}
} else {
- errors.Errorf("invalid packet")
+ return errors.Errorf("invalid packet %x", data[0])
}
} else if c.authPluginName == AUTH_SHA256_PASSWORD {
if len(data) == 0 {
@@ -170,7 +173,6 @@ func (c *Conn) readAuthResult() ([]byte, string, error) {
// see: https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
// packet indicator
switch data[0] {
-
case OK_HEADER:
_, err := c.handleOKPacket(data)
return nil, "", err
@@ -213,31 +215,61 @@ func (c *Conn) readOK() (*Result, error) {
}
func (c *Conn) readResult(binary bool) (*Result, error) {
- data, err := c.ReadPacket()
+ firstPkgBuf, err := c.ReadPacketReuseMem(utils.ByteSliceGet(16)[:0])
+ defer utils.ByteSlicePut(firstPkgBuf)
+
if err != nil {
return nil, errors.Trace(err)
}
- if data[0] == OK_HEADER {
- return c.handleOKPacket(data)
- } else if data[0] == ERR_HEADER {
- return nil, c.handleErrorPacket(data)
- } else if data[0] == LocalInFile_HEADER {
+ if firstPkgBuf[0] == OK_HEADER {
+ return c.handleOKPacket(firstPkgBuf)
+ } else if firstPkgBuf[0] == ERR_HEADER {
+ return nil, c.handleErrorPacket(append([]byte{}, firstPkgBuf...))
+ } else if firstPkgBuf[0] == LocalInFile_HEADER {
return nil, ErrMalformPacket
}
- return c.readResultset(data, binary)
+ return c.readResultset(firstPkgBuf, binary)
+}
+
+func (c *Conn) readResultStreaming(binary bool, result *Result, perRowCb SelectPerRowCallback) error {
+ firstPkgBuf, err := c.ReadPacketReuseMem(utils.ByteSliceGet(16)[:0])
+ defer utils.ByteSlicePut(firstPkgBuf)
+
+ if err != nil {
+ return errors.Trace(err)
+ }
+
+ if firstPkgBuf[0] == OK_HEADER {
+ // https://dev.mysql.com/doc/internals/en/com-query-response.html
+ // 14.6.4.1 COM_QUERY Response
+ // If the number of columns in the resultset is 0, this is a OK_Packet.
+
+ okResult, err := c.handleOKPacket(firstPkgBuf)
+ if err != nil {
+ return errors.Trace(err)
+ }
+
+ result.Status = okResult.Status
+ result.AffectedRows = okResult.AffectedRows
+ result.InsertId = okResult.InsertId
+ if result.Resultset == nil {
+ result.Resultset = NewResultset(0)
+ } else {
+ result.Reset(0)
+ }
+ return nil
+ } else if firstPkgBuf[0] == ERR_HEADER {
+ return c.handleErrorPacket(append([]byte{}, firstPkgBuf...))
+ } else if firstPkgBuf[0] == LocalInFile_HEADER {
+ return ErrMalformPacket
+ }
+
+ return c.readResultsetStreaming(firstPkgBuf, binary, result, perRowCb)
}
func (c *Conn) readResultset(data []byte, binary bool) (*Result, error) {
- result := &Result{
- Status: 0,
- InsertId: 0,
- AffectedRows: 0,
-
- Resultset: &Resultset{},
- }
-
// column count
count, _, n := LengthEncodedInt(data)
@@ -245,8 +277,9 @@ func (c *Conn) readResultset(data []byte, binary bool) (*Result, error) {
return nil, ErrMalformPacket
}
- result.Fields = make([]*Field, count)
- result.FieldNames = make(map[string]int, count)
+ result := &Result{
+ Resultset: NewResultset(int(count)),
+ }
if err := c.readResultColumns(result); err != nil {
return nil, errors.Trace(err)
@@ -259,15 +292,42 @@ func (c *Conn) readResultset(data []byte, binary bool) (*Result, error) {
return result, nil
}
+func (c *Conn) readResultsetStreaming(data []byte, binary bool, result *Result, perRowCb SelectPerRowCallback) error {
+ columnCount, _, n := LengthEncodedInt(data)
+
+ if n-len(data) != 0 {
+ return ErrMalformPacket
+ }
+
+ if result.Resultset == nil {
+ result.Resultset = NewResultset(int(columnCount))
+ } else {
+ // Reuse memory if can
+ result.Reset(int(columnCount))
+ }
+
+ if err := c.readResultColumns(result); err != nil {
+ return errors.Trace(err)
+ }
+
+ if err := c.readResultRowsStreaming(result, binary, perRowCb); err != nil {
+ return errors.Trace(err)
+ }
+
+ return nil
+}
+
func (c *Conn) readResultColumns(result *Result) (err error) {
var i int = 0
var data []byte
for {
- data, err = c.ReadPacket()
+ rawPkgLen := len(result.RawPkg)
+ result.RawPkg, err = c.ReadPacketReuseMem(result.RawPkg)
if err != nil {
return
}
+ data = result.RawPkg[rawPkgLen:]
// EOF Packet
if c.isEOFPacket(data) {
@@ -285,7 +345,10 @@ func (c *Conn) readResultColumns(result *Result) (err error) {
return
}
- result.Fields[i], err = FieldData(data).Parse()
+ if result.Fields[i] == nil {
+ result.Fields[i] = &Field{}
+ }
+ err = result.Fields[i].Parse(data)
if err != nil {
return
}
@@ -300,11 +363,12 @@ func (c *Conn) readResultRows(result *Result, isBinary bool) (err error) {
var data []byte
for {
- data, err = c.ReadPacket()
-
+ rawPkgLen := len(result.RawPkg)
+ result.RawPkg, err = c.ReadPacketReuseMem(result.RawPkg)
if err != nil {
return
}
+ data = result.RawPkg[rawPkgLen:]
// EOF Packet
if c.isEOFPacket(data) {
@@ -318,13 +382,21 @@ func (c *Conn) readResultRows(result *Result, isBinary bool) (err error) {
break
}
+ if data[0] == ERR_HEADER {
+ return c.handleErrorPacket(data)
+ }
+
result.RowDatas = append(result.RowDatas, data)
}
- result.Values = make([][]interface{}, len(result.RowDatas))
+ if cap(result.Values) < len(result.RowDatas) {
+ result.Values = make([][]FieldValue, len(result.RowDatas))
+ } else {
+ result.Values = result.Values[:len(result.RowDatas)]
+ }
for i := range result.Values {
- result.Values[i], err = result.RowDatas[i].Parse(result.Fields, isBinary)
+ result.Values[i], err = result.RowDatas[i].Parse(result.Fields, isBinary, result.Values[i])
if err != nil {
return errors.Trace(err)
@@ -333,3 +405,47 @@ func (c *Conn) readResultRows(result *Result, isBinary bool) (err error) {
return nil
}
+
+func (c *Conn) readResultRowsStreaming(result *Result, isBinary bool, perRowCb SelectPerRowCallback) (err error) {
+ var (
+ data []byte
+ row []FieldValue
+ )
+
+ for {
+ data, err = c.ReadPacketReuseMem(data[:0])
+ if err != nil {
+ return
+ }
+
+ // EOF Packet
+ if c.isEOFPacket(data) {
+ if c.capability&CLIENT_PROTOCOL_41 > 0 {
+ // result.Warnings = binary.LittleEndian.Uint16(data[1:])
+ // todo add strict_mode, warning will be treat as error
+ result.Status = binary.LittleEndian.Uint16(data[3:])
+ c.status = result.Status
+ }
+
+ break
+ }
+
+ if data[0] == ERR_HEADER {
+ return c.handleErrorPacket(data)
+ }
+
+ // Parse this row
+ row, err = RowData(data).Parse(result.Fields, isBinary, row)
+ if err != nil {
+ return errors.Trace(err)
+ }
+
+ // Send the row to "userland" code
+ err = perRowCb(row)
+ if err != nil {
+ return errors.Trace(err)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/siddontang/go-mysql/client/stmt.go b/vendor/github.com/go-mysql-org/go-mysql/client/stmt.go
similarity index 93%
rename from vendor/github.com/siddontang/go-mysql/client/stmt.go
rename to vendor/github.com/go-mysql-org/go-mysql/client/stmt.go
index fd2dfd7..239da78 100644
--- a/vendor/github.com/siddontang/go-mysql/client/stmt.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/client/stmt.go
@@ -5,14 +5,13 @@ import (
"fmt"
"math"
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
+ . "github.com/go-mysql-org/go-mysql/mysql"
+ "github.com/pingcap/errors"
)
type Stmt struct {
- conn *Conn
- id uint32
- query string
+ conn *Conn
+ id uint32
params int
columns int
@@ -55,7 +54,7 @@ func (s *Stmt) write(args ...interface{}) error {
//NULL-bitmap, length: (num-params+7)
nullBitmap := make([]byte, (paramsNum+7)>>3)
- var length int = int(1 + 4 + 1 + 4 + ((paramsNum + 7) >> 3) + 1 + (paramsNum << 1))
+ length := 1 + 4 + 1 + 4 + ((paramsNum + 7) >> 3) + 1 + (paramsNum << 1)
var newParamBoundFlag byte = 0
@@ -91,11 +90,11 @@ func (s *Stmt) write(args ...interface{}) error {
case uint16:
paramTypes[i<<1] = MYSQL_TYPE_SHORT
paramTypes[(i<<1)+1] = 0x80
- paramValues[i] = Uint16ToBytes(uint16(v))
+ paramValues[i] = Uint16ToBytes(v)
case uint32:
paramTypes[i<<1] = MYSQL_TYPE_LONG
paramTypes[(i<<1)+1] = 0x80
- paramValues[i] = Uint32ToBytes(uint32(v))
+ paramValues[i] = Uint32ToBytes(v)
case uint:
paramTypes[i<<1] = MYSQL_TYPE_LONGLONG
paramTypes[(i<<1)+1] = 0x80
@@ -103,14 +102,13 @@ func (s *Stmt) write(args ...interface{}) error {
case uint64:
paramTypes[i<<1] = MYSQL_TYPE_LONGLONG
paramTypes[(i<<1)+1] = 0x80
- paramValues[i] = Uint64ToBytes(uint64(v))
+ paramValues[i] = Uint64ToBytes(v)
case bool:
paramTypes[i<<1] = MYSQL_TYPE_TINY
if v {
paramValues[i] = []byte{1}
} else {
paramValues[i] = []byte{0}
-
}
case float32:
paramTypes[i<<1] = MYSQL_TYPE_FLOAT
diff --git a/vendor/github.com/siddontang/go-mysql/client/tls.go b/vendor/github.com/go-mysql-org/go-mysql/client/tls.go
similarity index 91%
rename from vendor/github.com/siddontang/go-mysql/client/tls.go
rename to vendor/github.com/go-mysql-org/go-mysql/client/tls.go
index 3772a50..f652399 100644
--- a/vendor/github.com/siddontang/go-mysql/client/tls.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/client/tls.go
@@ -5,7 +5,7 @@ import (
"crypto/x509"
)
-// generate TLS config for client side
+// NewClientTLSConfig: generate TLS config for client side
// if insecureSkipVerify is set to true, serverName will not be validated
func NewClientTLSConfig(caPem, certPem, keyPem []byte, insecureSkipVerify bool, serverName string) *tls.Config {
pool := x509.NewCertPool()
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/const.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/const.go
similarity index 95%
rename from vendor/github.com/siddontang/go-mysql/mysql/const.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/const.go
index 256d163..9b78791 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/const.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/const.go
@@ -18,10 +18,10 @@ const (
)
const (
- AUTH_MYSQL_OLD_PASSWORD = "mysql_old_password"
- AUTH_NATIVE_PASSWORD = "mysql_native_password"
+ AUTH_MYSQL_OLD_PASSWORD = "mysql_old_password"
+ AUTH_NATIVE_PASSWORD = "mysql_native_password"
AUTH_CACHING_SHA2_PASSWORD = "caching_sha2_password"
- AUTH_SHA256_PASSWORD = "sha256_password"
+ AUTH_SHA256_PASSWORD = "sha256_password"
)
const (
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/errcode.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/errcode.go
similarity index 77%
rename from vendor/github.com/siddontang/go-mysql/mysql/errcode.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/errcode.go
index 8acff1a..1be5c44 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/errcode.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/errcode.go
@@ -1,870 +1,870 @@
package mysql
const (
- ER_ERROR_FIRST uint16 = 1000
- ER_HASHCHK = 1000
- ER_NISAMCHK = 1001
- ER_NO = 1002
- ER_YES = 1003
- ER_CANT_CREATE_FILE = 1004
- ER_CANT_CREATE_TABLE = 1005
- ER_CANT_CREATE_DB = 1006
- ER_DB_CREATE_EXISTS = 1007
- ER_DB_DROP_EXISTS = 1008
- ER_DB_DROP_DELETE = 1009
- ER_DB_DROP_RMDIR = 1010
- ER_CANT_DELETE_FILE = 1011
- ER_CANT_FIND_SYSTEM_REC = 1012
- ER_CANT_GET_STAT = 1013
- ER_CANT_GET_WD = 1014
- ER_CANT_LOCK = 1015
- ER_CANT_OPEN_FILE = 1016
- ER_FILE_NOT_FOUND = 1017
- ER_CANT_READ_DIR = 1018
- ER_CANT_SET_WD = 1019
- ER_CHECKREAD = 1020
- ER_DISK_FULL = 1021
- ER_DUP_KEY = 1022
- ER_ERROR_ON_CLOSE = 1023
- ER_ERROR_ON_READ = 1024
- ER_ERROR_ON_RENAME = 1025
- ER_ERROR_ON_WRITE = 1026
- ER_FILE_USED = 1027
- ER_FILSORT_ABORT = 1028
- ER_FORM_NOT_FOUND = 1029
- ER_GET_ERRNO = 1030
- ER_ILLEGAL_HA = 1031
- ER_KEY_NOT_FOUND = 1032
- ER_NOT_FORM_FILE = 1033
- ER_NOT_KEYFILE = 1034
- ER_OLD_KEYFILE = 1035
- ER_OPEN_AS_READONLY = 1036
- ER_OUTOFMEMORY = 1037
- ER_OUT_OF_SORTMEMORY = 1038
- ER_UNEXPECTED_EOF = 1039
- ER_CON_COUNT_ERROR = 1040
- ER_OUT_OF_RESOURCES = 1041
- ER_BAD_HOST_ERROR = 1042
- ER_HANDSHAKE_ERROR = 1043
- ER_DBACCESS_DENIED_ERROR = 1044
- ER_ACCESS_DENIED_ERROR = 1045
- ER_NO_DB_ERROR = 1046
- ER_UNKNOWN_COM_ERROR = 1047
- ER_BAD_NULL_ERROR = 1048
- ER_BAD_DB_ERROR = 1049
- ER_TABLE_EXISTS_ERROR = 1050
- ER_BAD_TABLE_ERROR = 1051
- ER_NON_UNIQ_ERROR = 1052
- ER_SERVER_SHUTDOWN = 1053
- ER_BAD_FIELD_ERROR = 1054
- ER_WRONG_FIELD_WITH_GROUP = 1055
- ER_WRONG_GROUP_FIELD = 1056
- ER_WRONG_SUM_SELECT = 1057
- ER_WRONG_VALUE_COUNT = 1058
- ER_TOO_LONG_IDENT = 1059
- ER_DUP_FIELDNAME = 1060
- ER_DUP_KEYNAME = 1061
- ER_DUP_ENTRY = 1062
- ER_WRONG_FIELD_SPEC = 1063
- ER_PARSE_ERROR = 1064
- ER_EMPTY_QUERY = 1065
- ER_NONUNIQ_TABLE = 1066
- ER_INVALID_DEFAULT = 1067
- ER_MULTIPLE_PRI_KEY = 1068
- ER_TOO_MANY_KEYS = 1069
- ER_TOO_MANY_KEY_PARTS = 1070
- ER_TOO_LONG_KEY = 1071
- ER_KEY_COLUMN_DOES_NOT_EXITS = 1072
- ER_BLOB_USED_AS_KEY = 1073
- ER_TOO_BIG_FIELDLENGTH = 1074
- ER_WRONG_AUTO_KEY = 1075
- ER_READY = 1076
- ER_NORMAL_SHUTDOWN = 1077
- ER_GOT_SIGNAL = 1078
- ER_SHUTDOWN_COMPLETE = 1079
- ER_FORCING_CLOSE = 1080
- ER_IPSOCK_ERROR = 1081
- ER_NO_SUCH_INDEX = 1082
- ER_WRONG_FIELD_TERMINATORS = 1083
- ER_BLOBS_AND_NO_TERMINATED = 1084
- ER_TEXTFILE_NOT_READABLE = 1085
- ER_FILE_EXISTS_ERROR = 1086
- ER_LOAD_INFO = 1087
- ER_ALTER_INFO = 1088
- ER_WRONG_SUB_KEY = 1089
- ER_CANT_REMOVE_ALL_FIELDS = 1090
- ER_CANT_DROP_FIELD_OR_KEY = 1091
- ER_INSERT_INFO = 1092
- ER_UPDATE_TABLE_USED = 1093
- ER_NO_SUCH_THREAD = 1094
- ER_KILL_DENIED_ERROR = 1095
- ER_NO_TABLES_USED = 1096
- ER_TOO_BIG_SET = 1097
- ER_NO_UNIQUE_LOGFILE = 1098
- ER_TABLE_NOT_LOCKED_FOR_WRITE = 1099
- ER_TABLE_NOT_LOCKED = 1100
- ER_BLOB_CANT_HAVE_DEFAULT = 1101
- ER_WRONG_DB_NAME = 1102
- ER_WRONG_TABLE_NAME = 1103
- ER_TOO_BIG_SELECT = 1104
- ER_UNKNOWN_ERROR = 1105
- ER_UNKNOWN_PROCEDURE = 1106
- ER_WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
- ER_WRONG_PARAMETERS_TO_PROCEDURE = 1108
- ER_UNKNOWN_TABLE = 1109
- ER_FIELD_SPECIFIED_TWICE = 1110
- ER_INVALID_GROUP_FUNC_USE = 1111
- ER_UNSUPPORTED_EXTENSION = 1112
- ER_TABLE_MUST_HAVE_COLUMNS = 1113
- ER_RECORD_FILE_FULL = 1114
- ER_UNKNOWN_CHARACTER_SET = 1115
- ER_TOO_MANY_TABLES = 1116
- ER_TOO_MANY_FIELDS = 1117
- ER_TOO_BIG_ROWSIZE = 1118
- ER_STACK_OVERRUN = 1119
- ER_WRONG_OUTER_JOIN = 1120
- ER_NULL_COLUMN_IN_INDEX = 1121
- ER_CANT_FIND_UDF = 1122
- ER_CANT_INITIALIZE_UDF = 1123
- ER_UDF_NO_PATHS = 1124
- ER_UDF_EXISTS = 1125
- ER_CANT_OPEN_LIBRARY = 1126
- ER_CANT_FIND_DL_ENTRY = 1127
- ER_FUNCTION_NOT_DEFINED = 1128
- ER_HOST_IS_BLOCKED = 1129
- ER_HOST_NOT_PRIVILEGED = 1130
- ER_PASSWORD_ANONYMOUS_USER = 1131
- ER_PASSWORD_NOT_ALLOWED = 1132
- ER_PASSWORD_NO_MATCH = 1133
- ER_UPDATE_INFO = 1134
- ER_CANT_CREATE_THREAD = 1135
- ER_WRONG_VALUE_COUNT_ON_ROW = 1136
- ER_CANT_REOPEN_TABLE = 1137
- ER_INVALID_USE_OF_NULL = 1138
- ER_REGEXP_ERROR = 1139
- ER_MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
- ER_NONEXISTING_GRANT = 1141
- ER_TABLEACCESS_DENIED_ERROR = 1142
- ER_COLUMNACCESS_DENIED_ERROR = 1143
- ER_ILLEGAL_GRANT_FOR_TABLE = 1144
- ER_GRANT_WRONG_HOST_OR_USER = 1145
- ER_NO_SUCH_TABLE = 1146
- ER_NONEXISTING_TABLE_GRANT = 1147
- ER_NOT_ALLOWED_COMMAND = 1148
- ER_SYNTAX_ERROR = 1149
- ER_DELAYED_CANT_CHANGE_LOCK = 1150
- ER_TOO_MANY_DELAYED_THREADS = 1151
- ER_ABORTING_CONNECTION = 1152
- ER_NET_PACKET_TOO_LARGE = 1153
- ER_NET_READ_ERROR_FROM_PIPE = 1154
- ER_NET_FCNTL_ERROR = 1155
- ER_NET_PACKETS_OUT_OF_ORDER = 1156
- ER_NET_UNCOMPRESS_ERROR = 1157
- ER_NET_READ_ERROR = 1158
- ER_NET_READ_INTERRUPTED = 1159
- ER_NET_ERROR_ON_WRITE = 1160
- ER_NET_WRITE_INTERRUPTED = 1161
- ER_TOO_LONG_STRING = 1162
- ER_TABLE_CANT_HANDLE_BLOB = 1163
- ER_TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
- ER_DELAYED_INSERT_TABLE_LOCKED = 1165
- ER_WRONG_COLUMN_NAME = 1166
- ER_WRONG_KEY_COLUMN = 1167
- ER_WRONG_MRG_TABLE = 1168
- ER_DUP_UNIQUE = 1169
- ER_BLOB_KEY_WITHOUT_LENGTH = 1170
- ER_PRIMARY_CANT_HAVE_NULL = 1171
- ER_TOO_MANY_ROWS = 1172
- ER_REQUIRES_PRIMARY_KEY = 1173
- ER_NO_RAID_COMPILED = 1174
- ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
- ER_KEY_DOES_NOT_EXITS = 1176
- ER_CHECK_NO_SUCH_TABLE = 1177
- ER_CHECK_NOT_IMPLEMENTED = 1178
- ER_CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
- ER_ERROR_DURING_COMMIT = 1180
- ER_ERROR_DURING_ROLLBACK = 1181
- ER_ERROR_DURING_FLUSH_LOGS = 1182
- ER_ERROR_DURING_CHECKPOINT = 1183
- ER_NEW_ABORTING_CONNECTION = 1184
- ER_DUMP_NOT_IMPLEMENTED = 1185
- ER_FLUSH_MASTER_BINLOG_CLOSED = 1186
- ER_INDEX_REBUILD = 1187
- ER_MASTER = 1188
- ER_MASTER_NET_READ = 1189
- ER_MASTER_NET_WRITE = 1190
- ER_FT_MATCHING_KEY_NOT_FOUND = 1191
- ER_LOCK_OR_ACTIVE_TRANSACTION = 1192
- ER_UNKNOWN_SYSTEM_VARIABLE = 1193
- ER_CRASHED_ON_USAGE = 1194
- ER_CRASHED_ON_REPAIR = 1195
- ER_WARNING_NOT_COMPLETE_ROLLBACK = 1196
- ER_TRANS_CACHE_FULL = 1197
- ER_SLAVE_MUST_STOP = 1198
- ER_SLAVE_NOT_RUNNING = 1199
- ER_BAD_SLAVE = 1200
- ER_MASTER_INFO = 1201
- ER_SLAVE_THREAD = 1202
- ER_TOO_MANY_USER_CONNECTIONS = 1203
- ER_SET_CONSTANTS_ONLY = 1204
- ER_LOCK_WAIT_TIMEOUT = 1205
- ER_LOCK_TABLE_FULL = 1206
- ER_READ_ONLY_TRANSACTION = 1207
- ER_DROP_DB_WITH_READ_LOCK = 1208
- ER_CREATE_DB_WITH_READ_LOCK = 1209
- ER_WRONG_ARGUMENTS = 1210
- ER_NO_PERMISSION_TO_CREATE_USER = 1211
- ER_UNION_TABLES_IN_DIFFERENT_DIR = 1212
- ER_LOCK_DEADLOCK = 1213
- ER_TABLE_CANT_HANDLE_FT = 1214
- ER_CANNOT_ADD_FOREIGN = 1215
- ER_NO_REFERENCED_ROW = 1216
- ER_ROW_IS_REFERENCED = 1217
- ER_CONNECT_TO_MASTER = 1218
- ER_QUERY_ON_MASTER = 1219
- ER_ERROR_WHEN_EXECUTING_COMMAND = 1220
- ER_WRONG_USAGE = 1221
- ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
- ER_CANT_UPDATE_WITH_READLOCK = 1223
- ER_MIXING_NOT_ALLOWED = 1224
- ER_DUP_ARGUMENT = 1225
- ER_USER_LIMIT_REACHED = 1226
- ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227
- ER_LOCAL_VARIABLE = 1228
- ER_GLOBAL_VARIABLE = 1229
- ER_NO_DEFAULT = 1230
- ER_WRONG_VALUE_FOR_VAR = 1231
- ER_WRONG_TYPE_FOR_VAR = 1232
- ER_VAR_CANT_BE_READ = 1233
- ER_CANT_USE_OPTION_HERE = 1234
- ER_NOT_SUPPORTED_YET = 1235
- ER_MASTER_FATAL_ERROR_READING_BINLOG = 1236
- ER_SLAVE_IGNORED_TABLE = 1237
- ER_INCORRECT_GLOBAL_LOCAL_VAR = 1238
- ER_WRONG_FK_DEF = 1239
- ER_KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
- ER_OPERAND_COLUMNS = 1241
- ER_SUBQUERY_NO_1_ROW = 1242
- ER_UNKNOWN_STMT_HANDLER = 1243
- ER_CORRUPT_HELP_DB = 1244
- ER_CYCLIC_REFERENCE = 1245
- ER_AUTO_CONVERT = 1246
- ER_ILLEGAL_REFERENCE = 1247
- ER_DERIVED_MUST_HAVE_ALIAS = 1248
- ER_SELECT_REDUCED = 1249
- ER_TABLENAME_NOT_ALLOWED_HERE = 1250
- ER_NOT_SUPPORTED_AUTH_MODE = 1251
- ER_SPATIAL_CANT_HAVE_NULL = 1252
- ER_COLLATION_CHARSET_MISMATCH = 1253
- ER_SLAVE_WAS_RUNNING = 1254
- ER_SLAVE_WAS_NOT_RUNNING = 1255
- ER_TOO_BIG_FOR_UNCOMPRESS = 1256
- ER_ZLIB_Z_MEM_ERROR = 1257
- ER_ZLIB_Z_BUF_ERROR = 1258
- ER_ZLIB_Z_DATA_ERROR = 1259
- ER_CUT_VALUE_GROUP_CONCAT = 1260
- ER_WARN_TOO_FEW_RECORDS = 1261
- ER_WARN_TOO_MANY_RECORDS = 1262
- ER_WARN_NULL_TO_NOTNULL = 1263
- ER_WARN_DATA_OUT_OF_RANGE = 1264
- WARN_DATA_TRUNCATED = 1265
- ER_WARN_USING_OTHER_HANDLER = 1266
- ER_CANT_AGGREGATE_2COLLATIONS = 1267
- ER_DROP_USER = 1268
- ER_REVOKE_GRANTS = 1269
- ER_CANT_AGGREGATE_3COLLATIONS = 1270
- ER_CANT_AGGREGATE_NCOLLATIONS = 1271
- ER_VARIABLE_IS_NOT_STRUCT = 1272
- ER_UNKNOWN_COLLATION = 1273
- ER_SLAVE_IGNORED_SSL_PARAMS = 1274
- ER_SERVER_IS_IN_SECURE_AUTH_MODE = 1275
- ER_WARN_FIELD_RESOLVED = 1276
- ER_BAD_SLAVE_UNTIL_COND = 1277
- ER_MISSING_SKIP_SLAVE = 1278
- ER_UNTIL_COND_IGNORED = 1279
- ER_WRONG_NAME_FOR_INDEX = 1280
- ER_WRONG_NAME_FOR_CATALOG = 1281
- ER_WARN_QC_RESIZE = 1282
- ER_BAD_FT_COLUMN = 1283
- ER_UNKNOWN_KEY_CACHE = 1284
- ER_WARN_HOSTNAME_WONT_WORK = 1285
- ER_UNKNOWN_STORAGE_ENGINE = 1286
- ER_WARN_DEPRECATED_SYNTAX = 1287
- ER_NON_UPDATABLE_TABLE = 1288
- ER_FEATURE_DISABLED = 1289
- ER_OPTION_PREVENTS_STATEMENT = 1290
- ER_DUPLICATED_VALUE_IN_TYPE = 1291
- ER_TRUNCATED_WRONG_VALUE = 1292
- ER_TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
- ER_INVALID_ON_UPDATE = 1294
- ER_UNSUPPORTED_PS = 1295
- ER_GET_ERRMSG = 1296
- ER_GET_TEMPORARY_ERRMSG = 1297
- ER_UNKNOWN_TIME_ZONE = 1298
- ER_WARN_INVALID_TIMESTAMP = 1299
- ER_INVALID_CHARACTER_STRING = 1300
- ER_WARN_ALLOWED_PACKET_OVERFLOWED = 1301
- ER_CONFLICTING_DECLARATIONS = 1302
- ER_SP_NO_RECURSIVE_CREATE = 1303
- ER_SP_ALREADY_EXISTS = 1304
- ER_SP_DOES_NOT_EXIST = 1305
- ER_SP_DROP_FAILED = 1306
- ER_SP_STORE_FAILED = 1307
- ER_SP_LILABEL_MISMATCH = 1308
- ER_SP_LABEL_REDEFINE = 1309
- ER_SP_LABEL_MISMATCH = 1310
- ER_SP_UNINIT_VAR = 1311
- ER_SP_BADSELECT = 1312
- ER_SP_BADRETURN = 1313
- ER_SP_BADSTATEMENT = 1314
- ER_UPDATE_LOG_DEPRECATED_IGNORED = 1315
- ER_UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
- ER_QUERY_INTERRUPTED = 1317
- ER_SP_WRONG_NO_OF_ARGS = 1318
- ER_SP_COND_MISMATCH = 1319
- ER_SP_NORETURN = 1320
- ER_SP_NORETURNEND = 1321
- ER_SP_BAD_CURSOR_QUERY = 1322
- ER_SP_BAD_CURSOR_SELECT = 1323
- ER_SP_CURSOR_MISMATCH = 1324
- ER_SP_CURSOR_ALREADY_OPEN = 1325
- ER_SP_CURSOR_NOT_OPEN = 1326
- ER_SP_UNDECLARED_VAR = 1327
- ER_SP_WRONG_NO_OF_FETCH_ARGS = 1328
- ER_SP_FETCH_NO_DATA = 1329
- ER_SP_DUP_PARAM = 1330
- ER_SP_DUP_VAR = 1331
- ER_SP_DUP_COND = 1332
- ER_SP_DUP_CURS = 1333
- ER_SP_CANT_ALTER = 1334
- ER_SP_SUBSELECT_NYI = 1335
- ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
- ER_SP_VARCOND_AFTER_CURSHNDLR = 1337
- ER_SP_CURSOR_AFTER_HANDLER = 1338
- ER_SP_CASE_NOT_FOUND = 1339
- ER_FPARSER_TOO_BIG_FILE = 1340
- ER_FPARSER_BAD_HEADER = 1341
- ER_FPARSER_EOF_IN_COMMENT = 1342
- ER_FPARSER_ERROR_IN_PARAMETER = 1343
- ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
- ER_VIEW_NO_EXPLAIN = 1345
- ER_FRM_UNKNOWN_TYPE = 1346
- ER_WRONG_OBJECT = 1347
- ER_NONUPDATEABLE_COLUMN = 1348
- ER_VIEW_SELECT_DERIVED = 1349
- ER_VIEW_SELECT_CLAUSE = 1350
- ER_VIEW_SELECT_VARIABLE = 1351
- ER_VIEW_SELECT_TMPTABLE = 1352
- ER_VIEW_WRONG_LIST = 1353
- ER_WARN_VIEW_MERGE = 1354
- ER_WARN_VIEW_WITHOUT_KEY = 1355
- ER_VIEW_INVALID = 1356
- ER_SP_NO_DROP_SP = 1357
- ER_SP_GOTO_IN_HNDLR = 1358
- ER_TRG_ALREADY_EXISTS = 1359
- ER_TRG_DOES_NOT_EXIST = 1360
- ER_TRG_ON_VIEW_OR_TEMP_TABLE = 1361
- ER_TRG_CANT_CHANGE_ROW = 1362
- ER_TRG_NO_SUCH_ROW_IN_TRG = 1363
- ER_NO_DEFAULT_FOR_FIELD = 1364
- ER_DIVISION_BY_ZERO = 1365
- ER_TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
- ER_ILLEGAL_VALUE_FOR_TYPE = 1367
- ER_VIEW_NONUPD_CHECK = 1368
- ER_VIEW_CHECK_FAILED = 1369
- ER_PROCACCESS_DENIED_ERROR = 1370
- ER_RELAY_LOG_FAIL = 1371
- ER_PASSWD_LENGTH = 1372
- ER_UNKNOWN_TARGET_BINLOG = 1373
- ER_IO_ERR_LOG_INDEX_READ = 1374
- ER_BINLOG_PURGE_PROHIBITED = 1375
- ER_FSEEK_FAIL = 1376
- ER_BINLOG_PURGE_FATAL_ERR = 1377
- ER_LOG_IN_USE = 1378
- ER_LOG_PURGE_UNKNOWN_ERR = 1379
- ER_RELAY_LOG_INIT = 1380
- ER_NO_BINARY_LOGGING = 1381
- ER_RESERVED_SYNTAX = 1382
- ER_WSAS_FAILED = 1383
- ER_DIFF_GROUPS_PROC = 1384
- ER_NO_GROUP_FOR_PROC = 1385
- ER_ORDER_WITH_PROC = 1386
- ER_LOGGING_PROHIBIT_CHANGING_OF = 1387
- ER_NO_FILE_MAPPING = 1388
- ER_WRONG_MAGIC = 1389
- ER_PS_MANY_PARAM = 1390
- ER_KEY_PART_0 = 1391
- ER_VIEW_CHECKSUM = 1392
- ER_VIEW_MULTIUPDATE = 1393
- ER_VIEW_NO_INSERT_FIELD_LIST = 1394
- ER_VIEW_DELETE_MERGE_VIEW = 1395
- ER_CANNOT_USER = 1396
- ER_XAER_NOTA = 1397
- ER_XAER_INVAL = 1398
- ER_XAER_RMFAIL = 1399
- ER_XAER_OUTSIDE = 1400
- ER_XAER_RMERR = 1401
- ER_XA_RBROLLBACK = 1402
- ER_NONEXISTING_PROC_GRANT = 1403
- ER_PROC_AUTO_GRANT_FAIL = 1404
- ER_PROC_AUTO_REVOKE_FAIL = 1405
- ER_DATA_TOO_LONG = 1406
- ER_SP_BAD_SQLSTATE = 1407
- ER_STARTUP = 1408
- ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
- ER_CANT_CREATE_USER_WITH_GRANT = 1410
- ER_WRONG_VALUE_FOR_TYPE = 1411
- ER_TABLE_DEF_CHANGED = 1412
- ER_SP_DUP_HANDLER = 1413
- ER_SP_NOT_VAR_ARG = 1414
- ER_SP_NO_RETSET = 1415
- ER_CANT_CREATE_GEOMETRY_OBJECT = 1416
- ER_FAILED_ROUTINE_BREAK_BINLOG = 1417
- ER_BINLOG_UNSAFE_ROUTINE = 1418
- ER_BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
- ER_EXEC_STMT_WITH_OPEN_CURSOR = 1420
- ER_STMT_HAS_NO_OPEN_CURSOR = 1421
- ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
- ER_NO_DEFAULT_FOR_VIEW_FIELD = 1423
- ER_SP_NO_RECURSION = 1424
- ER_TOO_BIG_SCALE = 1425
- ER_TOO_BIG_PRECISION = 1426
- ER_M_BIGGER_THAN_D = 1427
- ER_WRONG_LOCK_OF_SYSTEM_TABLE = 1428
- ER_CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
- ER_QUERY_ON_FOREIGN_DATA_SOURCE = 1430
- ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
- ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
- ER_FOREIGN_DATA_STRING_INVALID = 1433
- ER_CANT_CREATE_FEDERATED_TABLE = 1434
- ER_TRG_IN_WRONG_SCHEMA = 1435
- ER_STACK_OVERRUN_NEED_MORE = 1436
- ER_TOO_LONG_BODY = 1437
- ER_WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
- ER_TOO_BIG_DISPLAYWIDTH = 1439
- ER_XAER_DUPID = 1440
- ER_DATETIME_FUNCTION_OVERFLOW = 1441
- ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
- ER_VIEW_PREVENT_UPDATE = 1443
- ER_PS_NO_RECURSION = 1444
- ER_SP_CANT_SET_AUTOCOMMIT = 1445
- ER_MALFORMED_DEFINER = 1446
- ER_VIEW_FRM_NO_USER = 1447
- ER_VIEW_OTHER_USER = 1448
- ER_NO_SUCH_USER = 1449
- ER_FORBID_SCHEMA_CHANGE = 1450
- ER_ROW_IS_REFERENCED_2 = 1451
- ER_NO_REFERENCED_ROW_2 = 1452
- ER_SP_BAD_VAR_SHADOW = 1453
- ER_TRG_NO_DEFINER = 1454
- ER_OLD_FILE_FORMAT = 1455
- ER_SP_RECURSION_LIMIT = 1456
- ER_SP_PROC_TABLE_CORRUPT = 1457
- ER_SP_WRONG_NAME = 1458
- ER_TABLE_NEEDS_UPGRADE = 1459
- ER_SP_NO_AGGREGATE = 1460
- ER_MAX_PREPARED_STMT_COUNT_REACHED = 1461
- ER_VIEW_RECURSIVE = 1462
- ER_NON_GROUPING_FIELD_USED = 1463
- ER_TABLE_CANT_HANDLE_SPKEYS = 1464
- ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465
- ER_REMOVED_SPACES = 1466
- ER_AUTOINC_READ_FAILED = 1467
- ER_USERNAME = 1468
- ER_HOSTNAME = 1469
- ER_WRONG_STRING_LENGTH = 1470
- ER_NON_INSERTABLE_TABLE = 1471
- ER_ADMIN_WRONG_MRG_TABLE = 1472
- ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT = 1473
- ER_NAME_BECOMES_EMPTY = 1474
- ER_AMBIGUOUS_FIELD_TERM = 1475
- ER_FOREIGN_SERVER_EXISTS = 1476
- ER_FOREIGN_SERVER_DOESNT_EXIST = 1477
- ER_ILLEGAL_HA_CREATE_OPTION = 1478
- ER_PARTITION_REQUIRES_VALUES_ERROR = 1479
- ER_PARTITION_WRONG_VALUES_ERROR = 1480
- ER_PARTITION_MAXVALUE_ERROR = 1481
- ER_PARTITION_SUBPARTITION_ERROR = 1482
- ER_PARTITION_SUBPART_MIX_ERROR = 1483
- ER_PARTITION_WRONG_NO_PART_ERROR = 1484
- ER_PARTITION_WRONG_NO_SUBPART_ERROR = 1485
- ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR = 1486
- ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR = 1487
- ER_FIELD_NOT_FOUND_PART_ERROR = 1488
- ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR = 1489
- ER_INCONSISTENT_PARTITION_INFO_ERROR = 1490
- ER_PARTITION_FUNC_NOT_ALLOWED_ERROR = 1491
- ER_PARTITIONS_MUST_BE_DEFINED_ERROR = 1492
- ER_RANGE_NOT_INCREASING_ERROR = 1493
- ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR = 1494
- ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR = 1495
- ER_PARTITION_ENTRY_ERROR = 1496
- ER_MIX_HANDLER_ERROR = 1497
- ER_PARTITION_NOT_DEFINED_ERROR = 1498
- ER_TOO_MANY_PARTITIONS_ERROR = 1499
- ER_SUBPARTITION_ERROR = 1500
- ER_CANT_CREATE_HANDLER_FILE = 1501
- ER_BLOB_FIELD_IN_PART_FUNC_ERROR = 1502
- ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF = 1503
- ER_NO_PARTS_ERROR = 1504
- ER_PARTITION_MGMT_ON_NONPARTITIONED = 1505
- ER_FOREIGN_KEY_ON_PARTITIONED = 1506
- ER_DROP_PARTITION_NON_EXISTENT = 1507
- ER_DROP_LAST_PARTITION = 1508
- ER_COALESCE_ONLY_ON_HASH_PARTITION = 1509
- ER_REORG_HASH_ONLY_ON_SAME_NO = 1510
- ER_REORG_NO_PARAM_ERROR = 1511
- ER_ONLY_ON_RANGE_LIST_PARTITION = 1512
- ER_ADD_PARTITION_SUBPART_ERROR = 1513
- ER_ADD_PARTITION_NO_NEW_PARTITION = 1514
- ER_COALESCE_PARTITION_NO_PARTITION = 1515
- ER_REORG_PARTITION_NOT_EXIST = 1516
- ER_SAME_NAME_PARTITION = 1517
- ER_NO_BINLOG_ERROR = 1518
- ER_CONSECUTIVE_REORG_PARTITIONS = 1519
- ER_REORG_OUTSIDE_RANGE = 1520
- ER_PARTITION_FUNCTION_FAILURE = 1521
- ER_PART_STATE_ERROR = 1522
- ER_LIMITED_PART_RANGE = 1523
- ER_PLUGIN_IS_NOT_LOADED = 1524
- ER_WRONG_VALUE = 1525
- ER_NO_PARTITION_FOR_GIVEN_VALUE = 1526
- ER_FILEGROUP_OPTION_ONLY_ONCE = 1527
- ER_CREATE_FILEGROUP_FAILED = 1528
- ER_DROP_FILEGROUP_FAILED = 1529
- ER_TABLESPACE_AUTO_EXTEND_ERROR = 1530
- ER_WRONG_SIZE_NUMBER = 1531
- ER_SIZE_OVERFLOW_ERROR = 1532
- ER_ALTER_FILEGROUP_FAILED = 1533
- ER_BINLOG_ROW_LOGGING_FAILED = 1534
- ER_BINLOG_ROW_WRONG_TABLE_DEF = 1535
- ER_BINLOG_ROW_RBR_TO_SBR = 1536
- ER_EVENT_ALREADY_EXISTS = 1537
- ER_EVENT_STORE_FAILED = 1538
- ER_EVENT_DOES_NOT_EXIST = 1539
- ER_EVENT_CANT_ALTER = 1540
- ER_EVENT_DROP_FAILED = 1541
- ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG = 1542
- ER_EVENT_ENDS_BEFORE_STARTS = 1543
- ER_EVENT_EXEC_TIME_IN_THE_PAST = 1544
- ER_EVENT_OPEN_TABLE_FAILED = 1545
- ER_EVENT_NEITHER_M_EXPR_NOR_M_AT = 1546
- ER_OBSOLETE_COL_COUNT_DOESNT_MATCH_CORRUPTED = 1547
- ER_OBSOLETE_CANNOT_LOAD_FROM_TABLE = 1548
- ER_EVENT_CANNOT_DELETE = 1549
- ER_EVENT_COMPILE_ERROR = 1550
- ER_EVENT_SAME_NAME = 1551
- ER_EVENT_DATA_TOO_LONG = 1552
- ER_DROP_INDEX_FK = 1553
- ER_WARN_DEPRECATED_SYNTAX_WITH_VER = 1554
- ER_CANT_WRITE_LOCK_LOG_TABLE = 1555
- ER_CANT_LOCK_LOG_TABLE = 1556
- ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED = 1557
- ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE = 1558
- ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR = 1559
- ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1560
- ER_NDB_CANT_SWITCH_BINLOG_FORMAT = 1561
- ER_PARTITION_NO_TEMPORARY = 1562
- ER_PARTITION_CONST_DOMAIN_ERROR = 1563
- ER_PARTITION_FUNCTION_IS_NOT_ALLOWED = 1564
- ER_DDL_LOG_ERROR = 1565
- ER_NULL_IN_VALUES_LESS_THAN = 1566
- ER_WRONG_PARTITION_NAME = 1567
- ER_CANT_CHANGE_TX_CHARACTERISTICS = 1568
- ER_DUP_ENTRY_AUTOINCREMENT_CASE = 1569
- ER_EVENT_MODIFY_QUEUE_ERROR = 1570
- ER_EVENT_SET_VAR_ERROR = 1571
- ER_PARTITION_MERGE_ERROR = 1572
- ER_CANT_ACTIVATE_LOG = 1573
- ER_RBR_NOT_AVAILABLE = 1574
- ER_BASE64_DECODE_ERROR = 1575
- ER_EVENT_RECURSION_FORBIDDEN = 1576
- ER_EVENTS_DB_ERROR = 1577
- ER_ONLY_INTEGERS_ALLOWED = 1578
- ER_UNSUPORTED_LOG_ENGINE = 1579
- ER_BAD_LOG_STATEMENT = 1580
- ER_CANT_RENAME_LOG_TABLE = 1581
- ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT = 1582
- ER_WRONG_PARAMETERS_TO_NATIVE_FCT = 1583
- ER_WRONG_PARAMETERS_TO_STORED_FCT = 1584
- ER_NATIVE_FCT_NAME_COLLISION = 1585
- ER_DUP_ENTRY_WITH_KEY_NAME = 1586
- ER_BINLOG_PURGE_EMFILE = 1587
- ER_EVENT_CANNOT_CREATE_IN_THE_PAST = 1588
- ER_EVENT_CANNOT_ALTER_IN_THE_PAST = 1589
- ER_SLAVE_INCIDENT = 1590
- ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT = 1591
- ER_BINLOG_UNSAFE_STATEMENT = 1592
- ER_SLAVE_FATAL_ERROR = 1593
- ER_SLAVE_RELAY_LOG_READ_FAILURE = 1594
- ER_SLAVE_RELAY_LOG_WRITE_FAILURE = 1595
- ER_SLAVE_CREATE_EVENT_FAILURE = 1596
- ER_SLAVE_MASTER_COM_FAILURE = 1597
- ER_BINLOG_LOGGING_IMPOSSIBLE = 1598
- ER_VIEW_NO_CREATION_CTX = 1599
- ER_VIEW_INVALID_CREATION_CTX = 1600
- ER_SR_INVALID_CREATION_CTX = 1601
- ER_TRG_CORRUPTED_FILE = 1602
- ER_TRG_NO_CREATION_CTX = 1603
- ER_TRG_INVALID_CREATION_CTX = 1604
- ER_EVENT_INVALID_CREATION_CTX = 1605
- ER_TRG_CANT_OPEN_TABLE = 1606
- ER_CANT_CREATE_SROUTINE = 1607
- ER_NEVER_USED = 1608
- ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT = 1609
- ER_SLAVE_CORRUPT_EVENT = 1610
- ER_LOAD_DATA_INVALID_COLUMN = 1611
- ER_LOG_PURGE_NO_FILE = 1612
- ER_XA_RBTIMEOUT = 1613
- ER_XA_RBDEADLOCK = 1614
- ER_NEED_REPREPARE = 1615
- ER_DELAYED_NOT_SUPPORTED = 1616
- WARN_NO_MASTER_INFO = 1617
- WARN_OPTION_IGNORED = 1618
- WARN_PLUGIN_DELETE_BUILTIN = 1619
- WARN_PLUGIN_BUSY = 1620
- ER_VARIABLE_IS_READONLY = 1621
- ER_WARN_ENGINE_TRANSACTION_ROLLBACK = 1622
- ER_SLAVE_HEARTBEAT_FAILURE = 1623
- ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE = 1624
- ER_NDB_REPLICATION_SCHEMA_ERROR = 1625
- ER_CONFLICT_FN_PARSE_ERROR = 1626
- ER_EXCEPTIONS_WRITE_ERROR = 1627
- ER_TOO_LONG_TABLE_COMMENT = 1628
- ER_TOO_LONG_FIELD_COMMENT = 1629
- ER_FUNC_INEXISTENT_NAME_COLLISION = 1630
- ER_DATABASE_NAME = 1631
- ER_TABLE_NAME = 1632
- ER_PARTITION_NAME = 1633
- ER_SUBPARTITION_NAME = 1634
- ER_TEMPORARY_NAME = 1635
- ER_RENAMED_NAME = 1636
- ER_TOO_MANY_CONCURRENT_TRXS = 1637
- WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED = 1638
- ER_DEBUG_SYNC_TIMEOUT = 1639
- ER_DEBUG_SYNC_HIT_LIMIT = 1640
- ER_DUP_SIGNAL_SET = 1641
- ER_SIGNAL_WARN = 1642
- ER_SIGNAL_NOT_FOUND = 1643
- ER_SIGNAL_EXCEPTION = 1644
- ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER = 1645
- ER_SIGNAL_BAD_CONDITION_TYPE = 1646
- WARN_COND_ITEM_TRUNCATED = 1647
- ER_COND_ITEM_TOO_LONG = 1648
- ER_UNKNOWN_LOCALE = 1649
- ER_SLAVE_IGNORE_SERVER_IDS = 1650
- ER_QUERY_CACHE_DISABLED = 1651
- ER_SAME_NAME_PARTITION_FIELD = 1652
- ER_PARTITION_COLUMN_LIST_ERROR = 1653
- ER_WRONG_TYPE_COLUMN_VALUE_ERROR = 1654
- ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR = 1655
- ER_MAXVALUE_IN_VALUES_IN = 1656
- ER_TOO_MANY_VALUES_ERROR = 1657
- ER_ROW_SINGLE_PARTITION_FIELD_ERROR = 1658
- ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD = 1659
- ER_PARTITION_FIELDS_TOO_LONG = 1660
- ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE = 1661
- ER_BINLOG_ROW_MODE_AND_STMT_ENGINE = 1662
- ER_BINLOG_UNSAFE_AND_STMT_ENGINE = 1663
- ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE = 1664
- ER_BINLOG_STMT_MODE_AND_ROW_ENGINE = 1665
- ER_BINLOG_ROW_INJECTION_AND_STMT_MODE = 1666
- ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1667
- ER_BINLOG_UNSAFE_LIMIT = 1668
- ER_BINLOG_UNSAFE_INSERT_DELAYED = 1669
- ER_BINLOG_UNSAFE_SYSTEM_TABLE = 1670
- ER_BINLOG_UNSAFE_AUTOINC_COLUMNS = 1671
- ER_BINLOG_UNSAFE_UDF = 1672
- ER_BINLOG_UNSAFE_SYSTEM_VARIABLE = 1673
- ER_BINLOG_UNSAFE_SYSTEM_FUNCTION = 1674
- ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS = 1675
- ER_MESSAGE_AND_STATEMENT = 1676
- ER_SLAVE_CONVERSION_FAILED = 1677
- ER_SLAVE_CANT_CREATE_CONVERSION = 1678
- ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1679
- ER_PATH_LENGTH = 1680
- ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT = 1681
- ER_WRONG_NATIVE_TABLE_STRUCTURE = 1682
- ER_WRONG_PERFSCHEMA_USAGE = 1683
- ER_WARN_I_S_SKIPPED_TABLE = 1684
- ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1685
- ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1686
- ER_SPATIAL_MUST_HAVE_GEOM_COL = 1687
- ER_TOO_LONG_INDEX_COMMENT = 1688
- ER_LOCK_ABORTED = 1689
- ER_DATA_OUT_OF_RANGE = 1690
- ER_WRONG_SPVAR_TYPE_IN_LIMIT = 1691
- ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1692
- ER_BINLOG_UNSAFE_MIXED_STATEMENT = 1693
- ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1694
- ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1695
- ER_FAILED_READ_FROM_PAR_FILE = 1696
- ER_VALUES_IS_NOT_INT_TYPE_ERROR = 1697
- ER_ACCESS_DENIED_NO_PASSWORD_ERROR = 1698
- ER_SET_PASSWORD_AUTH_PLUGIN = 1699
- ER_GRANT_PLUGIN_USER_EXISTS = 1700
- ER_TRUNCATE_ILLEGAL_FK = 1701
- ER_PLUGIN_IS_PERMANENT = 1702
- ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN = 1703
- ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX = 1704
- ER_STMT_CACHE_FULL = 1705
- ER_MULTI_UPDATE_KEY_CONFLICT = 1706
- ER_TABLE_NEEDS_REBUILD = 1707
- WARN_OPTION_BELOW_LIMIT = 1708
- ER_INDEX_COLUMN_TOO_LONG = 1709
- ER_ERROR_IN_TRIGGER_BODY = 1710
- ER_ERROR_IN_UNKNOWN_TRIGGER_BODY = 1711
- ER_INDEX_CORRUPT = 1712
- ER_UNDO_RECORD_TOO_BIG = 1713
- ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT = 1714
- ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE = 1715
- ER_BINLOG_UNSAFE_REPLACE_SELECT = 1716
- ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT = 1717
- ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT = 1718
- ER_BINLOG_UNSAFE_UPDATE_IGNORE = 1719
- ER_PLUGIN_NO_UNINSTALL = 1720
- ER_PLUGIN_NO_INSTALL = 1721
- ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT = 1722
- ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC = 1723
- ER_BINLOG_UNSAFE_INSERT_TWO_KEYS = 1724
- ER_TABLE_IN_FK_CHECK = 1725
- ER_UNSUPPORTED_ENGINE = 1726
- ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST = 1727
- ER_CANNOT_LOAD_FROM_TABLE_V2 = 1728
- ER_MASTER_DELAY_VALUE_OUT_OF_RANGE = 1729
- ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT = 1730
- ER_PARTITION_EXCHANGE_DIFFERENT_OPTION = 1731
- ER_PARTITION_EXCHANGE_PART_TABLE = 1732
- ER_PARTITION_EXCHANGE_TEMP_TABLE = 1733
- ER_PARTITION_INSTEAD_OF_SUBPARTITION = 1734
- ER_UNKNOWN_PARTITION = 1735
- ER_TABLES_DIFFERENT_METADATA = 1736
- ER_ROW_DOES_NOT_MATCH_PARTITION = 1737
- ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX = 1738
- ER_WARN_INDEX_NOT_APPLICABLE = 1739
- ER_PARTITION_EXCHANGE_FOREIGN_KEY = 1740
- ER_NO_SUCH_KEY_VALUE = 1741
- ER_RPL_INFO_DATA_TOO_LONG = 1742
- ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE = 1743
- ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE = 1744
- ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX = 1745
- ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT = 1746
- ER_PARTITION_CLAUSE_ON_NONPARTITIONED = 1747
- ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET = 1748
- ER_NO_SUCH_PARTITION__UNUSED = 1749
- ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE = 1750
- ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE = 1751
- ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE = 1752
- ER_MTS_FEATURE_IS_NOT_SUPPORTED = 1753
- ER_MTS_UPDATED_DBS_GREATER_MAX = 1754
- ER_MTS_CANT_PARALLEL = 1755
- ER_MTS_INCONSISTENT_DATA = 1756
- ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING = 1757
- ER_DA_INVALID_CONDITION_NUMBER = 1758
- ER_INSECURE_PLAIN_TEXT = 1759
- ER_INSECURE_CHANGE_MASTER = 1760
- ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO = 1761
- ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO = 1762
- ER_SQLTHREAD_WITH_SECURE_SLAVE = 1763
- ER_TABLE_HAS_NO_FT = 1764
- ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER = 1765
- ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION = 1766
- ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST = 1767
- ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL = 1768
- ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION = 1769
- ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL = 1770
- ER_SKIPPING_LOGGED_TRANSACTION = 1771
- ER_MALFORMED_GTID_SET_SPECIFICATION = 1772
- ER_MALFORMED_GTID_SET_ENCODING = 1773
- ER_MALFORMED_GTID_SPECIFICATION = 1774
- ER_GNO_EXHAUSTED = 1775
- ER_BAD_SLAVE_AUTO_POSITION = 1776
- ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON = 1777
- ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET = 1778
- ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON = 1779
- ER_GTID_MODE_REQUIRES_BINLOG = 1780
- ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF = 1781
- ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON = 1782
- ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF = 1783
- ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF = 1784
- ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE = 1785
- ER_GTID_UNSAFE_CREATE_SELECT = 1786
- ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION = 1787
- ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME = 1788
- ER_MASTER_HAS_PURGED_REQUIRED_GTIDS = 1789
- ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID = 1790
- ER_UNKNOWN_EXPLAIN_FORMAT = 1791
- ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION = 1792
- ER_TOO_LONG_TABLE_PARTITION_COMMENT = 1793
- ER_SLAVE_CONFIGURATION = 1794
- ER_INNODB_FT_LIMIT = 1795
- ER_INNODB_NO_FT_TEMP_TABLE = 1796
- ER_INNODB_FT_WRONG_DOCID_COLUMN = 1797
- ER_INNODB_FT_WRONG_DOCID_INDEX = 1798
- ER_INNODB_ONLINE_LOG_TOO_BIG = 1799
- ER_UNKNOWN_ALTER_ALGORITHM = 1800
- ER_UNKNOWN_ALTER_LOCK = 1801
- ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS = 1802
- ER_MTS_RECOVERY_FAILURE = 1803
- ER_MTS_RESET_WORKERS = 1804
- ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 = 1805
- ER_SLAVE_SILENT_RETRY_TRANSACTION = 1806
- ER_DISCARD_FK_CHECKS_RUNNING = 1807
- ER_TABLE_SCHEMA_MISMATCH = 1808
- ER_TABLE_IN_SYSTEM_TABLESPACE = 1809
- ER_IO_READ_ERROR = 1810
- ER_IO_WRITE_ERROR = 1811
- ER_TABLESPACE_MISSING = 1812
- ER_TABLESPACE_EXISTS = 1813
- ER_TABLESPACE_DISCARDED = 1814
- ER_INTERNAL_ERROR = 1815
- ER_INNODB_IMPORT_ERROR = 1816
- ER_INNODB_INDEX_CORRUPT = 1817
- ER_INVALID_YEAR_COLUMN_LENGTH = 1818
- ER_NOT_VALID_PASSWORD = 1819
- ER_MUST_CHANGE_PASSWORD = 1820
- ER_FK_NO_INDEX_CHILD = 1821
- ER_FK_NO_INDEX_PARENT = 1822
- ER_FK_FAIL_ADD_SYSTEM = 1823
- ER_FK_CANNOT_OPEN_PARENT = 1824
- ER_FK_INCORRECT_OPTION = 1825
- ER_FK_DUP_NAME = 1826
- ER_PASSWORD_FORMAT = 1827
- ER_FK_COLUMN_CANNOT_DROP = 1828
- ER_FK_COLUMN_CANNOT_DROP_CHILD = 1829
- ER_FK_COLUMN_NOT_NULL = 1830
- ER_DUP_INDEX = 1831
- ER_FK_COLUMN_CANNOT_CHANGE = 1832
- ER_FK_COLUMN_CANNOT_CHANGE_CHILD = 1833
- ER_FK_CANNOT_DELETE_PARENT = 1834
- ER_MALFORMED_PACKET = 1835
- ER_READ_ONLY_MODE = 1836
- ER_GTID_NEXT_TYPE_UNDEFINED_GROUP = 1837
- ER_VARIABLE_NOT_SETTABLE_IN_SP = 1838
- ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF = 1839
- ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY = 1840
- ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY = 1841
- ER_GTID_PURGED_WAS_CHANGED = 1842
- ER_GTID_EXECUTED_WAS_CHANGED = 1843
- ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES = 1844
- ER_ALTER_OPERATION_NOT_SUPPORTED = 1845
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON = 1846
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY = 1847
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION = 1848
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME = 1849
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE = 1850
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK = 1851
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE = 1852
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK = 1853
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC = 1854
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS = 1855
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS = 1856
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS = 1857
- ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE = 1858
- ER_DUP_UNKNOWN_IN_INDEX = 1859
- ER_IDENT_CAUSES_TOO_LONG_PATH = 1860
- ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL = 1861
- ER_MUST_CHANGE_PASSWORD_LOGIN = 1862
- ER_ROW_IN_WRONG_PARTITION = 1863
- ER_ERROR_LAST = 1863
+ ER_ERROR_FIRST = 1000
+ ER_HASHCHK = 1000
+ ER_NISAMCHK = 1001
+ ER_NO = 1002
+ ER_YES = 1003
+ ER_CANT_CREATE_FILE = 1004
+ ER_CANT_CREATE_TABLE = 1005
+ ER_CANT_CREATE_DB = 1006
+ ER_DB_CREATE_EXISTS = 1007
+ ER_DB_DROP_EXISTS = 1008
+ ER_DB_DROP_DELETE = 1009
+ ER_DB_DROP_RMDIR = 1010
+ ER_CANT_DELETE_FILE = 1011
+ ER_CANT_FIND_SYSTEM_REC = 1012
+ ER_CANT_GET_STAT = 1013
+ ER_CANT_GET_WD = 1014
+ ER_CANT_LOCK = 1015
+ ER_CANT_OPEN_FILE = 1016
+ ER_FILE_NOT_FOUND = 1017
+ ER_CANT_READ_DIR = 1018
+ ER_CANT_SET_WD = 1019
+ ER_CHECKREAD = 1020
+ ER_DISK_FULL = 1021
+ ER_DUP_KEY = 1022
+ ER_ERROR_ON_CLOSE = 1023
+ ER_ERROR_ON_READ = 1024
+ ER_ERROR_ON_RENAME = 1025
+ ER_ERROR_ON_WRITE = 1026
+ ER_FILE_USED = 1027
+ ER_FILSORT_ABORT = 1028
+ ER_FORM_NOT_FOUND = 1029
+ ER_GET_ERRNO = 1030
+ ER_ILLEGAL_HA = 1031
+ ER_KEY_NOT_FOUND = 1032
+ ER_NOT_FORM_FILE = 1033
+ ER_NOT_KEYFILE = 1034
+ ER_OLD_KEYFILE = 1035
+ ER_OPEN_AS_READONLY = 1036
+ ER_OUTOFMEMORY = 1037
+ ER_OUT_OF_SORTMEMORY = 1038
+ ER_UNEXPECTED_EOF = 1039
+ ER_CON_COUNT_ERROR = 1040
+ ER_OUT_OF_RESOURCES = 1041
+ ER_BAD_HOST_ERROR = 1042
+ ER_HANDSHAKE_ERROR = 1043
+ ER_DBACCESS_DENIED_ERROR = 1044
+ ER_ACCESS_DENIED_ERROR = 1045
+ ER_NO_DB_ERROR = 1046
+ ER_UNKNOWN_COM_ERROR = 1047
+ ER_BAD_NULL_ERROR = 1048
+ ER_BAD_DB_ERROR = 1049
+ ER_TABLE_EXISTS_ERROR = 1050
+ ER_BAD_TABLE_ERROR = 1051
+ ER_NON_UNIQ_ERROR = 1052
+ ER_SERVER_SHUTDOWN = 1053
+ ER_BAD_FIELD_ERROR = 1054
+ ER_WRONG_FIELD_WITH_GROUP = 1055
+ ER_WRONG_GROUP_FIELD = 1056
+ ER_WRONG_SUM_SELECT = 1057
+ ER_WRONG_VALUE_COUNT = 1058
+ ER_TOO_LONG_IDENT = 1059
+ ER_DUP_FIELDNAME = 1060
+ ER_DUP_KEYNAME = 1061
+ ER_DUP_ENTRY = 1062
+ ER_WRONG_FIELD_SPEC = 1063
+ ER_PARSE_ERROR = 1064
+ ER_EMPTY_QUERY = 1065
+ ER_NONUNIQ_TABLE = 1066
+ ER_INVALID_DEFAULT = 1067
+ ER_MULTIPLE_PRI_KEY = 1068
+ ER_TOO_MANY_KEYS = 1069
+ ER_TOO_MANY_KEY_PARTS = 1070
+ ER_TOO_LONG_KEY = 1071
+ ER_KEY_COLUMN_DOES_NOT_EXITS = 1072
+ ER_BLOB_USED_AS_KEY = 1073
+ ER_TOO_BIG_FIELDLENGTH = 1074
+ ER_WRONG_AUTO_KEY = 1075
+ ER_READY = 1076
+ ER_NORMAL_SHUTDOWN = 1077
+ ER_GOT_SIGNAL = 1078
+ ER_SHUTDOWN_COMPLETE = 1079
+ ER_FORCING_CLOSE = 1080
+ ER_IPSOCK_ERROR = 1081
+ ER_NO_SUCH_INDEX = 1082
+ ER_WRONG_FIELD_TERMINATORS = 1083
+ ER_BLOBS_AND_NO_TERMINATED = 1084
+ ER_TEXTFILE_NOT_READABLE = 1085
+ ER_FILE_EXISTS_ERROR = 1086
+ ER_LOAD_INFO = 1087
+ ER_ALTER_INFO = 1088
+ ER_WRONG_SUB_KEY = 1089
+ ER_CANT_REMOVE_ALL_FIELDS = 1090
+ ER_CANT_DROP_FIELD_OR_KEY = 1091
+ ER_INSERT_INFO = 1092
+ ER_UPDATE_TABLE_USED = 1093
+ ER_NO_SUCH_THREAD = 1094
+ ER_KILL_DENIED_ERROR = 1095
+ ER_NO_TABLES_USED = 1096
+ ER_TOO_BIG_SET = 1097
+ ER_NO_UNIQUE_LOGFILE = 1098
+ ER_TABLE_NOT_LOCKED_FOR_WRITE = 1099
+ ER_TABLE_NOT_LOCKED = 1100
+ ER_BLOB_CANT_HAVE_DEFAULT = 1101
+ ER_WRONG_DB_NAME = 1102
+ ER_WRONG_TABLE_NAME = 1103
+ ER_TOO_BIG_SELECT = 1104
+ ER_UNKNOWN_ERROR = 1105
+ ER_UNKNOWN_PROCEDURE = 1106
+ ER_WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
+ ER_WRONG_PARAMETERS_TO_PROCEDURE = 1108
+ ER_UNKNOWN_TABLE = 1109
+ ER_FIELD_SPECIFIED_TWICE = 1110
+ ER_INVALID_GROUP_FUNC_USE = 1111
+ ER_UNSUPPORTED_EXTENSION = 1112
+ ER_TABLE_MUST_HAVE_COLUMNS = 1113
+ ER_RECORD_FILE_FULL = 1114
+ ER_UNKNOWN_CHARACTER_SET = 1115
+ ER_TOO_MANY_TABLES = 1116
+ ER_TOO_MANY_FIELDS = 1117
+ ER_TOO_BIG_ROWSIZE = 1118
+ ER_STACK_OVERRUN = 1119
+ ER_WRONG_OUTER_JOIN = 1120
+ ER_NULL_COLUMN_IN_INDEX = 1121
+ ER_CANT_FIND_UDF = 1122
+ ER_CANT_INITIALIZE_UDF = 1123
+ ER_UDF_NO_PATHS = 1124
+ ER_UDF_EXISTS = 1125
+ ER_CANT_OPEN_LIBRARY = 1126
+ ER_CANT_FIND_DL_ENTRY = 1127
+ ER_FUNCTION_NOT_DEFINED = 1128
+ ER_HOST_IS_BLOCKED = 1129
+ ER_HOST_NOT_PRIVILEGED = 1130
+ ER_PASSWORD_ANONYMOUS_USER = 1131
+ ER_PASSWORD_NOT_ALLOWED = 1132
+ ER_PASSWORD_NO_MATCH = 1133
+ ER_UPDATE_INFO = 1134
+ ER_CANT_CREATE_THREAD = 1135
+ ER_WRONG_VALUE_COUNT_ON_ROW = 1136
+ ER_CANT_REOPEN_TABLE = 1137
+ ER_INVALID_USE_OF_NULL = 1138
+ ER_REGEXP_ERROR = 1139
+ ER_MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
+ ER_NONEXISTING_GRANT = 1141
+ ER_TABLEACCESS_DENIED_ERROR = 1142
+ ER_COLUMNACCESS_DENIED_ERROR = 1143
+ ER_ILLEGAL_GRANT_FOR_TABLE = 1144
+ ER_GRANT_WRONG_HOST_OR_USER = 1145
+ ER_NO_SUCH_TABLE = 1146
+ ER_NONEXISTING_TABLE_GRANT = 1147
+ ER_NOT_ALLOWED_COMMAND = 1148
+ ER_SYNTAX_ERROR = 1149
+ ER_DELAYED_CANT_CHANGE_LOCK = 1150
+ ER_TOO_MANY_DELAYED_THREADS = 1151
+ ER_ABORTING_CONNECTION = 1152
+ ER_NET_PACKET_TOO_LARGE = 1153
+ ER_NET_READ_ERROR_FROM_PIPE = 1154
+ ER_NET_FCNTL_ERROR = 1155
+ ER_NET_PACKETS_OUT_OF_ORDER = 1156
+ ER_NET_UNCOMPRESS_ERROR = 1157
+ ER_NET_READ_ERROR = 1158
+ ER_NET_READ_INTERRUPTED = 1159
+ ER_NET_ERROR_ON_WRITE = 1160
+ ER_NET_WRITE_INTERRUPTED = 1161
+ ER_TOO_LONG_STRING = 1162
+ ER_TABLE_CANT_HANDLE_BLOB = 1163
+ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
+ ER_DELAYED_INSERT_TABLE_LOCKED = 1165
+ ER_WRONG_COLUMN_NAME = 1166
+ ER_WRONG_KEY_COLUMN = 1167
+ ER_WRONG_MRG_TABLE = 1168
+ ER_DUP_UNIQUE = 1169
+ ER_BLOB_KEY_WITHOUT_LENGTH = 1170
+ ER_PRIMARY_CANT_HAVE_NULL = 1171
+ ER_TOO_MANY_ROWS = 1172
+ ER_REQUIRES_PRIMARY_KEY = 1173
+ ER_NO_RAID_COMPILED = 1174
+ ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
+ ER_KEY_DOES_NOT_EXITS = 1176
+ ER_CHECK_NO_SUCH_TABLE = 1177
+ ER_CHECK_NOT_IMPLEMENTED = 1178
+ ER_CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
+ ER_ERROR_DURING_COMMIT = 1180
+ ER_ERROR_DURING_ROLLBACK = 1181
+ ER_ERROR_DURING_FLUSH_LOGS = 1182
+ ER_ERROR_DURING_CHECKPOINT = 1183
+ ER_NEW_ABORTING_CONNECTION = 1184
+ ER_DUMP_NOT_IMPLEMENTED = 1185
+ ER_FLUSH_MASTER_BINLOG_CLOSED = 1186
+ ER_INDEX_REBUILD = 1187
+ ER_MASTER = 1188
+ ER_MASTER_NET_READ = 1189
+ ER_MASTER_NET_WRITE = 1190
+ ER_FT_MATCHING_KEY_NOT_FOUND = 1191
+ ER_LOCK_OR_ACTIVE_TRANSACTION = 1192
+ ER_UNKNOWN_SYSTEM_VARIABLE = 1193
+ ER_CRASHED_ON_USAGE = 1194
+ ER_CRASHED_ON_REPAIR = 1195
+ ER_WARNING_NOT_COMPLETE_ROLLBACK = 1196
+ ER_TRANS_CACHE_FULL = 1197
+ ER_SLAVE_MUST_STOP = 1198
+ ER_SLAVE_NOT_RUNNING = 1199
+ ER_BAD_SLAVE = 1200
+ ER_MASTER_INFO = 1201
+ ER_SLAVE_THREAD = 1202
+ ER_TOO_MANY_USER_CONNECTIONS = 1203
+ ER_SET_CONSTANTS_ONLY = 1204
+ ER_LOCK_WAIT_TIMEOUT = 1205
+ ER_LOCK_TABLE_FULL = 1206
+ ER_READ_ONLY_TRANSACTION = 1207
+ ER_DROP_DB_WITH_READ_LOCK = 1208
+ ER_CREATE_DB_WITH_READ_LOCK = 1209
+ ER_WRONG_ARGUMENTS = 1210
+ ER_NO_PERMISSION_TO_CREATE_USER = 1211
+ ER_UNION_TABLES_IN_DIFFERENT_DIR = 1212
+ ER_LOCK_DEADLOCK = 1213
+ ER_TABLE_CANT_HANDLE_FT = 1214
+ ER_CANNOT_ADD_FOREIGN = 1215
+ ER_NO_REFERENCED_ROW = 1216
+ ER_ROW_IS_REFERENCED = 1217
+ ER_CONNECT_TO_MASTER = 1218
+ ER_QUERY_ON_MASTER = 1219
+ ER_ERROR_WHEN_EXECUTING_COMMAND = 1220
+ ER_WRONG_USAGE = 1221
+ ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
+ ER_CANT_UPDATE_WITH_READLOCK = 1223
+ ER_MIXING_NOT_ALLOWED = 1224
+ ER_DUP_ARGUMENT = 1225
+ ER_USER_LIMIT_REACHED = 1226
+ ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227
+ ER_LOCAL_VARIABLE = 1228
+ ER_GLOBAL_VARIABLE = 1229
+ ER_NO_DEFAULT = 1230
+ ER_WRONG_VALUE_FOR_VAR = 1231
+ ER_WRONG_TYPE_FOR_VAR = 1232
+ ER_VAR_CANT_BE_READ = 1233
+ ER_CANT_USE_OPTION_HERE = 1234
+ ER_NOT_SUPPORTED_YET = 1235
+ ER_MASTER_FATAL_ERROR_READING_BINLOG = 1236
+ ER_SLAVE_IGNORED_TABLE = 1237
+ ER_INCORRECT_GLOBAL_LOCAL_VAR = 1238
+ ER_WRONG_FK_DEF = 1239
+ ER_KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
+ ER_OPERAND_COLUMNS = 1241
+ ER_SUBQUERY_NO_1_ROW = 1242
+ ER_UNKNOWN_STMT_HANDLER = 1243
+ ER_CORRUPT_HELP_DB = 1244
+ ER_CYCLIC_REFERENCE = 1245
+ ER_AUTO_CONVERT = 1246
+ ER_ILLEGAL_REFERENCE = 1247
+ ER_DERIVED_MUST_HAVE_ALIAS = 1248
+ ER_SELECT_REDUCED = 1249
+ ER_TABLENAME_NOT_ALLOWED_HERE = 1250
+ ER_NOT_SUPPORTED_AUTH_MODE = 1251
+ ER_SPATIAL_CANT_HAVE_NULL = 1252
+ ER_COLLATION_CHARSET_MISMATCH = 1253
+ ER_SLAVE_WAS_RUNNING = 1254
+ ER_SLAVE_WAS_NOT_RUNNING = 1255
+ ER_TOO_BIG_FOR_UNCOMPRESS = 1256
+ ER_ZLIB_Z_MEM_ERROR = 1257
+ ER_ZLIB_Z_BUF_ERROR = 1258
+ ER_ZLIB_Z_DATA_ERROR = 1259
+ ER_CUT_VALUE_GROUP_CONCAT = 1260
+ ER_WARN_TOO_FEW_RECORDS = 1261
+ ER_WARN_TOO_MANY_RECORDS = 1262
+ ER_WARN_NULL_TO_NOTNULL = 1263
+ ER_WARN_DATA_OUT_OF_RANGE = 1264
+ WARN_DATA_TRUNCATED = 1265
+ ER_WARN_USING_OTHER_HANDLER = 1266
+ ER_CANT_AGGREGATE_2COLLATIONS = 1267
+ ER_DROP_USER = 1268
+ ER_REVOKE_GRANTS = 1269
+ ER_CANT_AGGREGATE_3COLLATIONS = 1270
+ ER_CANT_AGGREGATE_NCOLLATIONS = 1271
+ ER_VARIABLE_IS_NOT_STRUCT = 1272
+ ER_UNKNOWN_COLLATION = 1273
+ ER_SLAVE_IGNORED_SSL_PARAMS = 1274
+ ER_SERVER_IS_IN_SECURE_AUTH_MODE = 1275
+ ER_WARN_FIELD_RESOLVED = 1276
+ ER_BAD_SLAVE_UNTIL_COND = 1277
+ ER_MISSING_SKIP_SLAVE = 1278
+ ER_UNTIL_COND_IGNORED = 1279
+ ER_WRONG_NAME_FOR_INDEX = 1280
+ ER_WRONG_NAME_FOR_CATALOG = 1281
+ ER_WARN_QC_RESIZE = 1282
+ ER_BAD_FT_COLUMN = 1283
+ ER_UNKNOWN_KEY_CACHE = 1284
+ ER_WARN_HOSTNAME_WONT_WORK = 1285
+ ER_UNKNOWN_STORAGE_ENGINE = 1286
+ ER_WARN_DEPRECATED_SYNTAX = 1287
+ ER_NON_UPDATABLE_TABLE = 1288
+ ER_FEATURE_DISABLED = 1289
+ ER_OPTION_PREVENTS_STATEMENT = 1290
+ ER_DUPLICATED_VALUE_IN_TYPE = 1291
+ ER_TRUNCATED_WRONG_VALUE = 1292
+ ER_TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
+ ER_INVALID_ON_UPDATE = 1294
+ ER_UNSUPPORTED_PS = 1295
+ ER_GET_ERRMSG = 1296
+ ER_GET_TEMPORARY_ERRMSG = 1297
+ ER_UNKNOWN_TIME_ZONE = 1298
+ ER_WARN_INVALID_TIMESTAMP = 1299
+ ER_INVALID_CHARACTER_STRING = 1300
+ ER_WARN_ALLOWED_PACKET_OVERFLOWED = 1301
+ ER_CONFLICTING_DECLARATIONS = 1302
+ ER_SP_NO_RECURSIVE_CREATE = 1303
+ ER_SP_ALREADY_EXISTS = 1304
+ ER_SP_DOES_NOT_EXIST = 1305
+ ER_SP_DROP_FAILED = 1306
+ ER_SP_STORE_FAILED = 1307
+ ER_SP_LILABEL_MISMATCH = 1308
+ ER_SP_LABEL_REDEFINE = 1309
+ ER_SP_LABEL_MISMATCH = 1310
+ ER_SP_UNINIT_VAR = 1311
+ ER_SP_BADSELECT = 1312
+ ER_SP_BADRETURN = 1313
+ ER_SP_BADSTATEMENT = 1314
+ ER_UPDATE_LOG_DEPRECATED_IGNORED = 1315
+ ER_UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
+ ER_QUERY_INTERRUPTED = 1317
+ ER_SP_WRONG_NO_OF_ARGS = 1318
+ ER_SP_COND_MISMATCH = 1319
+ ER_SP_NORETURN = 1320
+ ER_SP_NORETURNEND = 1321
+ ER_SP_BAD_CURSOR_QUERY = 1322
+ ER_SP_BAD_CURSOR_SELECT = 1323
+ ER_SP_CURSOR_MISMATCH = 1324
+ ER_SP_CURSOR_ALREADY_OPEN = 1325
+ ER_SP_CURSOR_NOT_OPEN = 1326
+ ER_SP_UNDECLARED_VAR = 1327
+ ER_SP_WRONG_NO_OF_FETCH_ARGS = 1328
+ ER_SP_FETCH_NO_DATA = 1329
+ ER_SP_DUP_PARAM = 1330
+ ER_SP_DUP_VAR = 1331
+ ER_SP_DUP_COND = 1332
+ ER_SP_DUP_CURS = 1333
+ ER_SP_CANT_ALTER = 1334
+ ER_SP_SUBSELECT_NYI = 1335
+ ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
+ ER_SP_VARCOND_AFTER_CURSHNDLR = 1337
+ ER_SP_CURSOR_AFTER_HANDLER = 1338
+ ER_SP_CASE_NOT_FOUND = 1339
+ ER_FPARSER_TOO_BIG_FILE = 1340
+ ER_FPARSER_BAD_HEADER = 1341
+ ER_FPARSER_EOF_IN_COMMENT = 1342
+ ER_FPARSER_ERROR_IN_PARAMETER = 1343
+ ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
+ ER_VIEW_NO_EXPLAIN = 1345
+ ER_FRM_UNKNOWN_TYPE = 1346
+ ER_WRONG_OBJECT = 1347
+ ER_NONUPDATEABLE_COLUMN = 1348
+ ER_VIEW_SELECT_DERIVED = 1349
+ ER_VIEW_SELECT_CLAUSE = 1350
+ ER_VIEW_SELECT_VARIABLE = 1351
+ ER_VIEW_SELECT_TMPTABLE = 1352
+ ER_VIEW_WRONG_LIST = 1353
+ ER_WARN_VIEW_MERGE = 1354
+ ER_WARN_VIEW_WITHOUT_KEY = 1355
+ ER_VIEW_INVALID = 1356
+ ER_SP_NO_DROP_SP = 1357
+ ER_SP_GOTO_IN_HNDLR = 1358
+ ER_TRG_ALREADY_EXISTS = 1359
+ ER_TRG_DOES_NOT_EXIST = 1360
+ ER_TRG_ON_VIEW_OR_TEMP_TABLE = 1361
+ ER_TRG_CANT_CHANGE_ROW = 1362
+ ER_TRG_NO_SUCH_ROW_IN_TRG = 1363
+ ER_NO_DEFAULT_FOR_FIELD = 1364
+ ER_DIVISION_BY_ZERO = 1365
+ ER_TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
+ ER_ILLEGAL_VALUE_FOR_TYPE = 1367
+ ER_VIEW_NONUPD_CHECK = 1368
+ ER_VIEW_CHECK_FAILED = 1369
+ ER_PROCACCESS_DENIED_ERROR = 1370
+ ER_RELAY_LOG_FAIL = 1371
+ ER_PASSWD_LENGTH = 1372
+ ER_UNKNOWN_TARGET_BINLOG = 1373
+ ER_IO_ERR_LOG_INDEX_READ = 1374
+ ER_BINLOG_PURGE_PROHIBITED = 1375
+ ER_FSEEK_FAIL = 1376
+ ER_BINLOG_PURGE_FATAL_ERR = 1377
+ ER_LOG_IN_USE = 1378
+ ER_LOG_PURGE_UNKNOWN_ERR = 1379
+ ER_RELAY_LOG_INIT = 1380
+ ER_NO_BINARY_LOGGING = 1381
+ ER_RESERVED_SYNTAX = 1382
+ ER_WSAS_FAILED = 1383
+ ER_DIFF_GROUPS_PROC = 1384
+ ER_NO_GROUP_FOR_PROC = 1385
+ ER_ORDER_WITH_PROC = 1386
+ ER_LOGGING_PROHIBIT_CHANGING_OF = 1387
+ ER_NO_FILE_MAPPING = 1388
+ ER_WRONG_MAGIC = 1389
+ ER_PS_MANY_PARAM = 1390
+ ER_KEY_PART_0 = 1391
+ ER_VIEW_CHECKSUM = 1392
+ ER_VIEW_MULTIUPDATE = 1393
+ ER_VIEW_NO_INSERT_FIELD_LIST = 1394
+ ER_VIEW_DELETE_MERGE_VIEW = 1395
+ ER_CANNOT_USER = 1396
+ ER_XAER_NOTA = 1397
+ ER_XAER_INVAL = 1398
+ ER_XAER_RMFAIL = 1399
+ ER_XAER_OUTSIDE = 1400
+ ER_XAER_RMERR = 1401
+ ER_XA_RBROLLBACK = 1402
+ ER_NONEXISTING_PROC_GRANT = 1403
+ ER_PROC_AUTO_GRANT_FAIL = 1404
+ ER_PROC_AUTO_REVOKE_FAIL = 1405
+ ER_DATA_TOO_LONG = 1406
+ ER_SP_BAD_SQLSTATE = 1407
+ ER_STARTUP = 1408
+ ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
+ ER_CANT_CREATE_USER_WITH_GRANT = 1410
+ ER_WRONG_VALUE_FOR_TYPE = 1411
+ ER_TABLE_DEF_CHANGED = 1412
+ ER_SP_DUP_HANDLER = 1413
+ ER_SP_NOT_VAR_ARG = 1414
+ ER_SP_NO_RETSET = 1415
+ ER_CANT_CREATE_GEOMETRY_OBJECT = 1416
+ ER_FAILED_ROUTINE_BREAK_BINLOG = 1417
+ ER_BINLOG_UNSAFE_ROUTINE = 1418
+ ER_BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
+ ER_EXEC_STMT_WITH_OPEN_CURSOR = 1420
+ ER_STMT_HAS_NO_OPEN_CURSOR = 1421
+ ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
+ ER_NO_DEFAULT_FOR_VIEW_FIELD = 1423
+ ER_SP_NO_RECURSION = 1424
+ ER_TOO_BIG_SCALE = 1425
+ ER_TOO_BIG_PRECISION = 1426
+ ER_M_BIGGER_THAN_D = 1427
+ ER_WRONG_LOCK_OF_SYSTEM_TABLE = 1428
+ ER_CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
+ ER_QUERY_ON_FOREIGN_DATA_SOURCE = 1430
+ ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
+ ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
+ ER_FOREIGN_DATA_STRING_INVALID = 1433
+ ER_CANT_CREATE_FEDERATED_TABLE = 1434
+ ER_TRG_IN_WRONG_SCHEMA = 1435
+ ER_STACK_OVERRUN_NEED_MORE = 1436
+ ER_TOO_LONG_BODY = 1437
+ ER_WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
+ ER_TOO_BIG_DISPLAYWIDTH = 1439
+ ER_XAER_DUPID = 1440
+ ER_DATETIME_FUNCTION_OVERFLOW = 1441
+ ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
+ ER_VIEW_PREVENT_UPDATE = 1443
+ ER_PS_NO_RECURSION = 1444
+ ER_SP_CANT_SET_AUTOCOMMIT = 1445
+ ER_MALFORMED_DEFINER = 1446
+ ER_VIEW_FRM_NO_USER = 1447
+ ER_VIEW_OTHER_USER = 1448
+ ER_NO_SUCH_USER = 1449
+ ER_FORBID_SCHEMA_CHANGE = 1450
+ ER_ROW_IS_REFERENCED_2 = 1451
+ ER_NO_REFERENCED_ROW_2 = 1452
+ ER_SP_BAD_VAR_SHADOW = 1453
+ ER_TRG_NO_DEFINER = 1454
+ ER_OLD_FILE_FORMAT = 1455
+ ER_SP_RECURSION_LIMIT = 1456
+ ER_SP_PROC_TABLE_CORRUPT = 1457
+ ER_SP_WRONG_NAME = 1458
+ ER_TABLE_NEEDS_UPGRADE = 1459
+ ER_SP_NO_AGGREGATE = 1460
+ ER_MAX_PREPARED_STMT_COUNT_REACHED = 1461
+ ER_VIEW_RECURSIVE = 1462
+ ER_NON_GROUPING_FIELD_USED = 1463
+ ER_TABLE_CANT_HANDLE_SPKEYS = 1464
+ ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465
+ ER_REMOVED_SPACES = 1466
+ ER_AUTOINC_READ_FAILED = 1467
+ ER_USERNAME = 1468
+ ER_HOSTNAME = 1469
+ ER_WRONG_STRING_LENGTH = 1470
+ ER_NON_INSERTABLE_TABLE = 1471
+ ER_ADMIN_WRONG_MRG_TABLE = 1472
+ ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT = 1473
+ ER_NAME_BECOMES_EMPTY = 1474
+ ER_AMBIGUOUS_FIELD_TERM = 1475
+ ER_FOREIGN_SERVER_EXISTS = 1476
+ ER_FOREIGN_SERVER_DOESNT_EXIST = 1477
+ ER_ILLEGAL_HA_CREATE_OPTION = 1478
+ ER_PARTITION_REQUIRES_VALUES_ERROR = 1479
+ ER_PARTITION_WRONG_VALUES_ERROR = 1480
+ ER_PARTITION_MAXVALUE_ERROR = 1481
+ ER_PARTITION_SUBPARTITION_ERROR = 1482
+ ER_PARTITION_SUBPART_MIX_ERROR = 1483
+ ER_PARTITION_WRONG_NO_PART_ERROR = 1484
+ ER_PARTITION_WRONG_NO_SUBPART_ERROR = 1485
+ ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR = 1486
+ ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR = 1487
+ ER_FIELD_NOT_FOUND_PART_ERROR = 1488
+ ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR = 1489
+ ER_INCONSISTENT_PARTITION_INFO_ERROR = 1490
+ ER_PARTITION_FUNC_NOT_ALLOWED_ERROR = 1491
+ ER_PARTITIONS_MUST_BE_DEFINED_ERROR = 1492
+ ER_RANGE_NOT_INCREASING_ERROR = 1493
+ ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR = 1494
+ ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR = 1495
+ ER_PARTITION_ENTRY_ERROR = 1496
+ ER_MIX_HANDLER_ERROR = 1497
+ ER_PARTITION_NOT_DEFINED_ERROR = 1498
+ ER_TOO_MANY_PARTITIONS_ERROR = 1499
+ ER_SUBPARTITION_ERROR = 1500
+ ER_CANT_CREATE_HANDLER_FILE = 1501
+ ER_BLOB_FIELD_IN_PART_FUNC_ERROR = 1502
+ ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF = 1503
+ ER_NO_PARTS_ERROR = 1504
+ ER_PARTITION_MGMT_ON_NONPARTITIONED = 1505
+ ER_FOREIGN_KEY_ON_PARTITIONED = 1506
+ ER_DROP_PARTITION_NON_EXISTENT = 1507
+ ER_DROP_LAST_PARTITION = 1508
+ ER_COALESCE_ONLY_ON_HASH_PARTITION = 1509
+ ER_REORG_HASH_ONLY_ON_SAME_NO = 1510
+ ER_REORG_NO_PARAM_ERROR = 1511
+ ER_ONLY_ON_RANGE_LIST_PARTITION = 1512
+ ER_ADD_PARTITION_SUBPART_ERROR = 1513
+ ER_ADD_PARTITION_NO_NEW_PARTITION = 1514
+ ER_COALESCE_PARTITION_NO_PARTITION = 1515
+ ER_REORG_PARTITION_NOT_EXIST = 1516
+ ER_SAME_NAME_PARTITION = 1517
+ ER_NO_BINLOG_ERROR = 1518
+ ER_CONSECUTIVE_REORG_PARTITIONS = 1519
+ ER_REORG_OUTSIDE_RANGE = 1520
+ ER_PARTITION_FUNCTION_FAILURE = 1521
+ ER_PART_STATE_ERROR = 1522
+ ER_LIMITED_PART_RANGE = 1523
+ ER_PLUGIN_IS_NOT_LOADED = 1524
+ ER_WRONG_VALUE = 1525
+ ER_NO_PARTITION_FOR_GIVEN_VALUE = 1526
+ ER_FILEGROUP_OPTION_ONLY_ONCE = 1527
+ ER_CREATE_FILEGROUP_FAILED = 1528
+ ER_DROP_FILEGROUP_FAILED = 1529
+ ER_TABLESPACE_AUTO_EXTEND_ERROR = 1530
+ ER_WRONG_SIZE_NUMBER = 1531
+ ER_SIZE_OVERFLOW_ERROR = 1532
+ ER_ALTER_FILEGROUP_FAILED = 1533
+ ER_BINLOG_ROW_LOGGING_FAILED = 1534
+ ER_BINLOG_ROW_WRONG_TABLE_DEF = 1535
+ ER_BINLOG_ROW_RBR_TO_SBR = 1536
+ ER_EVENT_ALREADY_EXISTS = 1537
+ ER_EVENT_STORE_FAILED = 1538
+ ER_EVENT_DOES_NOT_EXIST = 1539
+ ER_EVENT_CANT_ALTER = 1540
+ ER_EVENT_DROP_FAILED = 1541
+ ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG = 1542
+ ER_EVENT_ENDS_BEFORE_STARTS = 1543
+ ER_EVENT_EXEC_TIME_IN_THE_PAST = 1544
+ ER_EVENT_OPEN_TABLE_FAILED = 1545
+ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT = 1546
+ ER_OBSOLETE_COL_COUNT_DOESNT_MATCH_CORRUPTED = 1547
+ ER_OBSOLETE_CANNOT_LOAD_FROM_TABLE = 1548
+ ER_EVENT_CANNOT_DELETE = 1549
+ ER_EVENT_COMPILE_ERROR = 1550
+ ER_EVENT_SAME_NAME = 1551
+ ER_EVENT_DATA_TOO_LONG = 1552
+ ER_DROP_INDEX_FK = 1553
+ ER_WARN_DEPRECATED_SYNTAX_WITH_VER = 1554
+ ER_CANT_WRITE_LOCK_LOG_TABLE = 1555
+ ER_CANT_LOCK_LOG_TABLE = 1556
+ ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED = 1557
+ ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE = 1558
+ ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR = 1559
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1560
+ ER_NDB_CANT_SWITCH_BINLOG_FORMAT = 1561
+ ER_PARTITION_NO_TEMPORARY = 1562
+ ER_PARTITION_CONST_DOMAIN_ERROR = 1563
+ ER_PARTITION_FUNCTION_IS_NOT_ALLOWED = 1564
+ ER_DDL_LOG_ERROR = 1565
+ ER_NULL_IN_VALUES_LESS_THAN = 1566
+ ER_WRONG_PARTITION_NAME = 1567
+ ER_CANT_CHANGE_TX_CHARACTERISTICS = 1568
+ ER_DUP_ENTRY_AUTOINCREMENT_CASE = 1569
+ ER_EVENT_MODIFY_QUEUE_ERROR = 1570
+ ER_EVENT_SET_VAR_ERROR = 1571
+ ER_PARTITION_MERGE_ERROR = 1572
+ ER_CANT_ACTIVATE_LOG = 1573
+ ER_RBR_NOT_AVAILABLE = 1574
+ ER_BASE64_DECODE_ERROR = 1575
+ ER_EVENT_RECURSION_FORBIDDEN = 1576
+ ER_EVENTS_DB_ERROR = 1577
+ ER_ONLY_INTEGERS_ALLOWED = 1578
+ ER_UNSUPORTED_LOG_ENGINE = 1579
+ ER_BAD_LOG_STATEMENT = 1580
+ ER_CANT_RENAME_LOG_TABLE = 1581
+ ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT = 1582
+ ER_WRONG_PARAMETERS_TO_NATIVE_FCT = 1583
+ ER_WRONG_PARAMETERS_TO_STORED_FCT = 1584
+ ER_NATIVE_FCT_NAME_COLLISION = 1585
+ ER_DUP_ENTRY_WITH_KEY_NAME = 1586
+ ER_BINLOG_PURGE_EMFILE = 1587
+ ER_EVENT_CANNOT_CREATE_IN_THE_PAST = 1588
+ ER_EVENT_CANNOT_ALTER_IN_THE_PAST = 1589
+ ER_SLAVE_INCIDENT = 1590
+ ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT = 1591
+ ER_BINLOG_UNSAFE_STATEMENT = 1592
+ ER_SLAVE_FATAL_ERROR = 1593
+ ER_SLAVE_RELAY_LOG_READ_FAILURE = 1594
+ ER_SLAVE_RELAY_LOG_WRITE_FAILURE = 1595
+ ER_SLAVE_CREATE_EVENT_FAILURE = 1596
+ ER_SLAVE_MASTER_COM_FAILURE = 1597
+ ER_BINLOG_LOGGING_IMPOSSIBLE = 1598
+ ER_VIEW_NO_CREATION_CTX = 1599
+ ER_VIEW_INVALID_CREATION_CTX = 1600
+ ER_SR_INVALID_CREATION_CTX = 1601
+ ER_TRG_CORRUPTED_FILE = 1602
+ ER_TRG_NO_CREATION_CTX = 1603
+ ER_TRG_INVALID_CREATION_CTX = 1604
+ ER_EVENT_INVALID_CREATION_CTX = 1605
+ ER_TRG_CANT_OPEN_TABLE = 1606
+ ER_CANT_CREATE_SROUTINE = 1607
+ ER_NEVER_USED = 1608
+ ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT = 1609
+ ER_SLAVE_CORRUPT_EVENT = 1610
+ ER_LOAD_DATA_INVALID_COLUMN = 1611
+ ER_LOG_PURGE_NO_FILE = 1612
+ ER_XA_RBTIMEOUT = 1613
+ ER_XA_RBDEADLOCK = 1614
+ ER_NEED_REPREPARE = 1615
+ ER_DELAYED_NOT_SUPPORTED = 1616
+ WARN_NO_MASTER_INFO = 1617
+ WARN_OPTION_IGNORED = 1618
+ WARN_PLUGIN_DELETE_BUILTIN = 1619
+ WARN_PLUGIN_BUSY = 1620
+ ER_VARIABLE_IS_READONLY = 1621
+ ER_WARN_ENGINE_TRANSACTION_ROLLBACK = 1622
+ ER_SLAVE_HEARTBEAT_FAILURE = 1623
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE = 1624
+ ER_NDB_REPLICATION_SCHEMA_ERROR = 1625
+ ER_CONFLICT_FN_PARSE_ERROR = 1626
+ ER_EXCEPTIONS_WRITE_ERROR = 1627
+ ER_TOO_LONG_TABLE_COMMENT = 1628
+ ER_TOO_LONG_FIELD_COMMENT = 1629
+ ER_FUNC_INEXISTENT_NAME_COLLISION = 1630
+ ER_DATABASE_NAME = 1631
+ ER_TABLE_NAME = 1632
+ ER_PARTITION_NAME = 1633
+ ER_SUBPARTITION_NAME = 1634
+ ER_TEMPORARY_NAME = 1635
+ ER_RENAMED_NAME = 1636
+ ER_TOO_MANY_CONCURRENT_TRXS = 1637
+ WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED = 1638
+ ER_DEBUG_SYNC_TIMEOUT = 1639
+ ER_DEBUG_SYNC_HIT_LIMIT = 1640
+ ER_DUP_SIGNAL_SET = 1641
+ ER_SIGNAL_WARN = 1642
+ ER_SIGNAL_NOT_FOUND = 1643
+ ER_SIGNAL_EXCEPTION = 1644
+ ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER = 1645
+ ER_SIGNAL_BAD_CONDITION_TYPE = 1646
+ WARN_COND_ITEM_TRUNCATED = 1647
+ ER_COND_ITEM_TOO_LONG = 1648
+ ER_UNKNOWN_LOCALE = 1649
+ ER_SLAVE_IGNORE_SERVER_IDS = 1650
+ ER_QUERY_CACHE_DISABLED = 1651
+ ER_SAME_NAME_PARTITION_FIELD = 1652
+ ER_PARTITION_COLUMN_LIST_ERROR = 1653
+ ER_WRONG_TYPE_COLUMN_VALUE_ERROR = 1654
+ ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR = 1655
+ ER_MAXVALUE_IN_VALUES_IN = 1656
+ ER_TOO_MANY_VALUES_ERROR = 1657
+ ER_ROW_SINGLE_PARTITION_FIELD_ERROR = 1658
+ ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD = 1659
+ ER_PARTITION_FIELDS_TOO_LONG = 1660
+ ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE = 1661
+ ER_BINLOG_ROW_MODE_AND_STMT_ENGINE = 1662
+ ER_BINLOG_UNSAFE_AND_STMT_ENGINE = 1663
+ ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE = 1664
+ ER_BINLOG_STMT_MODE_AND_ROW_ENGINE = 1665
+ ER_BINLOG_ROW_INJECTION_AND_STMT_MODE = 1666
+ ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1667
+ ER_BINLOG_UNSAFE_LIMIT = 1668
+ ER_BINLOG_UNSAFE_INSERT_DELAYED = 1669
+ ER_BINLOG_UNSAFE_SYSTEM_TABLE = 1670
+ ER_BINLOG_UNSAFE_AUTOINC_COLUMNS = 1671
+ ER_BINLOG_UNSAFE_UDF = 1672
+ ER_BINLOG_UNSAFE_SYSTEM_VARIABLE = 1673
+ ER_BINLOG_UNSAFE_SYSTEM_FUNCTION = 1674
+ ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS = 1675
+ ER_MESSAGE_AND_STATEMENT = 1676
+ ER_SLAVE_CONVERSION_FAILED = 1677
+ ER_SLAVE_CANT_CREATE_CONVERSION = 1678
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1679
+ ER_PATH_LENGTH = 1680
+ ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT = 1681
+ ER_WRONG_NATIVE_TABLE_STRUCTURE = 1682
+ ER_WRONG_PERFSCHEMA_USAGE = 1683
+ ER_WARN_I_S_SKIPPED_TABLE = 1684
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1685
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1686
+ ER_SPATIAL_MUST_HAVE_GEOM_COL = 1687
+ ER_TOO_LONG_INDEX_COMMENT = 1688
+ ER_LOCK_ABORTED = 1689
+ ER_DATA_OUT_OF_RANGE = 1690
+ ER_WRONG_SPVAR_TYPE_IN_LIMIT = 1691
+ ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1692
+ ER_BINLOG_UNSAFE_MIXED_STATEMENT = 1693
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1694
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1695
+ ER_FAILED_READ_FROM_PAR_FILE = 1696
+ ER_VALUES_IS_NOT_INT_TYPE_ERROR = 1697
+ ER_ACCESS_DENIED_NO_PASSWORD_ERROR = 1698
+ ER_SET_PASSWORD_AUTH_PLUGIN = 1699
+ ER_GRANT_PLUGIN_USER_EXISTS = 1700
+ ER_TRUNCATE_ILLEGAL_FK = 1701
+ ER_PLUGIN_IS_PERMANENT = 1702
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN = 1703
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX = 1704
+ ER_STMT_CACHE_FULL = 1705
+ ER_MULTI_UPDATE_KEY_CONFLICT = 1706
+ ER_TABLE_NEEDS_REBUILD = 1707
+ WARN_OPTION_BELOW_LIMIT = 1708
+ ER_INDEX_COLUMN_TOO_LONG = 1709
+ ER_ERROR_IN_TRIGGER_BODY = 1710
+ ER_ERROR_IN_UNKNOWN_TRIGGER_BODY = 1711
+ ER_INDEX_CORRUPT = 1712
+ ER_UNDO_RECORD_TOO_BIG = 1713
+ ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT = 1714
+ ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE = 1715
+ ER_BINLOG_UNSAFE_REPLACE_SELECT = 1716
+ ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT = 1717
+ ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT = 1718
+ ER_BINLOG_UNSAFE_UPDATE_IGNORE = 1719
+ ER_PLUGIN_NO_UNINSTALL = 1720
+ ER_PLUGIN_NO_INSTALL = 1721
+ ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT = 1722
+ ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC = 1723
+ ER_BINLOG_UNSAFE_INSERT_TWO_KEYS = 1724
+ ER_TABLE_IN_FK_CHECK = 1725
+ ER_UNSUPPORTED_ENGINE = 1726
+ ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST = 1727
+ ER_CANNOT_LOAD_FROM_TABLE_V2 = 1728
+ ER_MASTER_DELAY_VALUE_OUT_OF_RANGE = 1729
+ ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT = 1730
+ ER_PARTITION_EXCHANGE_DIFFERENT_OPTION = 1731
+ ER_PARTITION_EXCHANGE_PART_TABLE = 1732
+ ER_PARTITION_EXCHANGE_TEMP_TABLE = 1733
+ ER_PARTITION_INSTEAD_OF_SUBPARTITION = 1734
+ ER_UNKNOWN_PARTITION = 1735
+ ER_TABLES_DIFFERENT_METADATA = 1736
+ ER_ROW_DOES_NOT_MATCH_PARTITION = 1737
+ ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX = 1738
+ ER_WARN_INDEX_NOT_APPLICABLE = 1739
+ ER_PARTITION_EXCHANGE_FOREIGN_KEY = 1740
+ ER_NO_SUCH_KEY_VALUE = 1741
+ ER_RPL_INFO_DATA_TOO_LONG = 1742
+ ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE = 1743
+ ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE = 1744
+ ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX = 1745
+ ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT = 1746
+ ER_PARTITION_CLAUSE_ON_NONPARTITIONED = 1747
+ ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET = 1748
+ ER_NO_SUCH_PARTITION__UNUSED = 1749
+ ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE = 1750
+ ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE = 1751
+ ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE = 1752
+ ER_MTS_FEATURE_IS_NOT_SUPPORTED = 1753
+ ER_MTS_UPDATED_DBS_GREATER_MAX = 1754
+ ER_MTS_CANT_PARALLEL = 1755
+ ER_MTS_INCONSISTENT_DATA = 1756
+ ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING = 1757
+ ER_DA_INVALID_CONDITION_NUMBER = 1758
+ ER_INSECURE_PLAIN_TEXT = 1759
+ ER_INSECURE_CHANGE_MASTER = 1760
+ ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO = 1761
+ ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO = 1762
+ ER_SQLTHREAD_WITH_SECURE_SLAVE = 1763
+ ER_TABLE_HAS_NO_FT = 1764
+ ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER = 1765
+ ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION = 1766
+ ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST = 1767
+ ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL = 1768
+ ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION = 1769
+ ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL = 1770
+ ER_SKIPPING_LOGGED_TRANSACTION = 1771
+ ER_MALFORMED_GTID_SET_SPECIFICATION = 1772
+ ER_MALFORMED_GTID_SET_ENCODING = 1773
+ ER_MALFORMED_GTID_SPECIFICATION = 1774
+ ER_GNO_EXHAUSTED = 1775
+ ER_BAD_SLAVE_AUTO_POSITION = 1776
+ ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON = 1777
+ ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET = 1778
+ ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON = 1779
+ ER_GTID_MODE_REQUIRES_BINLOG = 1780
+ ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF = 1781
+ ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON = 1782
+ ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF = 1783
+ ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF = 1784
+ ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE = 1785
+ ER_GTID_UNSAFE_CREATE_SELECT = 1786
+ ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION = 1787
+ ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME = 1788
+ ER_MASTER_HAS_PURGED_REQUIRED_GTIDS = 1789
+ ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID = 1790
+ ER_UNKNOWN_EXPLAIN_FORMAT = 1791
+ ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION = 1792
+ ER_TOO_LONG_TABLE_PARTITION_COMMENT = 1793
+ ER_SLAVE_CONFIGURATION = 1794
+ ER_INNODB_FT_LIMIT = 1795
+ ER_INNODB_NO_FT_TEMP_TABLE = 1796
+ ER_INNODB_FT_WRONG_DOCID_COLUMN = 1797
+ ER_INNODB_FT_WRONG_DOCID_INDEX = 1798
+ ER_INNODB_ONLINE_LOG_TOO_BIG = 1799
+ ER_UNKNOWN_ALTER_ALGORITHM = 1800
+ ER_UNKNOWN_ALTER_LOCK = 1801
+ ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS = 1802
+ ER_MTS_RECOVERY_FAILURE = 1803
+ ER_MTS_RESET_WORKERS = 1804
+ ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 = 1805
+ ER_SLAVE_SILENT_RETRY_TRANSACTION = 1806
+ ER_DISCARD_FK_CHECKS_RUNNING = 1807
+ ER_TABLE_SCHEMA_MISMATCH = 1808
+ ER_TABLE_IN_SYSTEM_TABLESPACE = 1809
+ ER_IO_READ_ERROR = 1810
+ ER_IO_WRITE_ERROR = 1811
+ ER_TABLESPACE_MISSING = 1812
+ ER_TABLESPACE_EXISTS = 1813
+ ER_TABLESPACE_DISCARDED = 1814
+ ER_INTERNAL_ERROR = 1815
+ ER_INNODB_IMPORT_ERROR = 1816
+ ER_INNODB_INDEX_CORRUPT = 1817
+ ER_INVALID_YEAR_COLUMN_LENGTH = 1818
+ ER_NOT_VALID_PASSWORD = 1819
+ ER_MUST_CHANGE_PASSWORD = 1820
+ ER_FK_NO_INDEX_CHILD = 1821
+ ER_FK_NO_INDEX_PARENT = 1822
+ ER_FK_FAIL_ADD_SYSTEM = 1823
+ ER_FK_CANNOT_OPEN_PARENT = 1824
+ ER_FK_INCORRECT_OPTION = 1825
+ ER_FK_DUP_NAME = 1826
+ ER_PASSWORD_FORMAT = 1827
+ ER_FK_COLUMN_CANNOT_DROP = 1828
+ ER_FK_COLUMN_CANNOT_DROP_CHILD = 1829
+ ER_FK_COLUMN_NOT_NULL = 1830
+ ER_DUP_INDEX = 1831
+ ER_FK_COLUMN_CANNOT_CHANGE = 1832
+ ER_FK_COLUMN_CANNOT_CHANGE_CHILD = 1833
+ ER_FK_CANNOT_DELETE_PARENT = 1834
+ ER_MALFORMED_PACKET = 1835
+ ER_READ_ONLY_MODE = 1836
+ ER_GTID_NEXT_TYPE_UNDEFINED_GROUP = 1837
+ ER_VARIABLE_NOT_SETTABLE_IN_SP = 1838
+ ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF = 1839
+ ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY = 1840
+ ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY = 1841
+ ER_GTID_PURGED_WAS_CHANGED = 1842
+ ER_GTID_EXECUTED_WAS_CHANGED = 1843
+ ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES = 1844
+ ER_ALTER_OPERATION_NOT_SUPPORTED = 1845
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON = 1846
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY = 1847
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION = 1848
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME = 1849
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE = 1850
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK = 1851
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE = 1852
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK = 1853
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC = 1854
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS = 1855
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS = 1856
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS = 1857
+ ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE = 1858
+ ER_DUP_UNKNOWN_IN_INDEX = 1859
+ ER_IDENT_CAUSES_TOO_LONG_PATH = 1860
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL = 1861
+ ER_MUST_CHANGE_PASSWORD_LOGIN = 1862
+ ER_ROW_IN_WRONG_PARTITION = 1863
+ ER_ERROR_LAST = 1863
)
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/errname.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/errname.go
similarity index 99%
rename from vendor/github.com/siddontang/go-mysql/mysql/errname.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/errname.go
index 7f8575a..0ff7a13 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/errname.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/errname.go
@@ -591,7 +591,7 @@ var MySQLErrName = map[uint16]string{
ER_BINLOG_PURGE_EMFILE: "Too many files opened, please execute the command again",
ER_EVENT_CANNOT_CREATE_IN_THE_PAST: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.",
ER_EVENT_CANNOT_ALTER_IN_THE_PAST: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was not changed. Specify a time in the future.",
- ER_SLAVE_INCIDENT: "The incident %s occured on the master. Message: %-.64s",
+ ER_SLAVE_INCIDENT: "The incident %s occurred on the master. Message: %-.64s",
ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT: "Table has no partition for some existing values",
ER_BINLOG_UNSAFE_STATEMENT: "Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. %s",
ER_SLAVE_FATAL_ERROR: "Fatal error: %s",
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/error.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/error.go
similarity index 91%
rename from vendor/github.com/siddontang/go-mysql/mysql/error.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/error.go
index 876a408..abda6de 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/error.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/error.go
@@ -3,7 +3,7 @@ package mysql
import (
"fmt"
- "github.com/juju/errors"
+ "github.com/pingcap/errors"
)
var (
@@ -23,7 +23,7 @@ func (e *MyError) Error() string {
return fmt.Sprintf("ERROR %d (%s): %s", e.Code, e.State, e.Message)
}
-//default mysql error, must adapt errname message format
+// NewDefaultError: default mysql error, must adapt errname message format
func NewDefaultError(errCode uint16, args ...interface{}) *MyError {
e := new(MyError)
e.Code = errCode
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/field.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/field.go
similarity index 73%
rename from vendor/github.com/siddontang/go-mysql/mysql/field.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/field.go
index 891f00b..e0f8e6f 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/field.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/field.go
@@ -2,6 +2,8 @@ package mysql
import (
"encoding/binary"
+
+ "github.com/go-mysql-org/go-mysql/utils"
)
type FieldData []byte
@@ -23,9 +25,23 @@ type Field struct {
DefaultValue []byte
}
-func (p FieldData) Parse() (f *Field, err error) {
- f = new(Field)
+type FieldValueType uint8
+type FieldValue struct {
+ Type FieldValueType
+ value uint64 // Also for int64 and float64
+ str []byte
+}
+
+const (
+ FieldValueTypeNull = iota
+ FieldValueTypeUnsigned
+ FieldValueTypeSigned
+ FieldValueTypeFloat
+ FieldValueTypeString
+)
+
+func (f *Field) Parse(p FieldData) (err error) {
f.Data = p
var n int
@@ -114,7 +130,15 @@ func (p FieldData) Parse() (f *Field, err error) {
f.DefaultValue = p[pos:(pos + int(f.DefaultValueLength))]
}
- return
+ return nil
+}
+
+func (p FieldData) Parse() (f *Field, err error) {
+ f = new(Field)
+ if err = f.Parse(p); err != nil {
+ return nil, err
+ }
+ return f, nil
}
func (f *Field) Dump() []byte {
@@ -155,3 +179,34 @@ func (f *Field) Dump() []byte {
return data
}
+
+func (fv *FieldValue) AsUint64() uint64 {
+ return fv.value
+}
+
+func (fv *FieldValue) AsInt64() int64 {
+ return utils.Uint64ToInt64(fv.value)
+}
+
+func (fv *FieldValue) AsFloat64() float64 {
+ return utils.Uint64ToFloat64(fv.value)
+}
+
+func (fv *FieldValue) AsString() []byte {
+ return fv.str
+}
+
+func (fv *FieldValue) Value() interface{} {
+ switch fv.Type {
+ case FieldValueTypeUnsigned:
+ return fv.AsUint64()
+ case FieldValueTypeSigned:
+ return fv.AsInt64()
+ case FieldValueTypeFloat:
+ return fv.AsFloat64()
+ case FieldValueTypeString:
+ return fv.AsString()
+ default: // FieldValueTypeNull
+ return nil
+ }
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/gtid.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/gtid.go
similarity index 93%
rename from vendor/github.com/siddontang/go-mysql/mysql/gtid.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/gtid.go
index cde9901..f35c4d7 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/gtid.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/gtid.go
@@ -1,6 +1,6 @@
package mysql
-import "github.com/juju/errors"
+import "github.com/pingcap/errors"
type GTIDSet interface {
String() string
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/mariadb_gtid.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/mariadb_gtid.go
similarity index 92%
rename from vendor/github.com/siddontang/go-mysql/mysql/mariadb_gtid.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/mariadb_gtid.go
index 09fe7ac..8bfffd0 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/mariadb_gtid.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/mariadb_gtid.go
@@ -3,12 +3,12 @@ package mysql
import (
"bytes"
"fmt"
+ "sort"
"strconv"
"strings"
- "github.com/juju/errors"
+ "github.com/pingcap/errors"
"github.com/siddontang/go-log/log"
- "github.com/siddontang/go/hack"
)
// MariadbGTID represent mariadb gtid, [domain ID]-[server-id]-[sequence]
@@ -113,15 +113,9 @@ func ParseMariadbGTIDSet(str string) (GTIDSet, error) {
if str == "" {
return s, nil
}
-
- sp := strings.Split(str, ",")
-
- //todo, handle redundant same uuid
- for i := 0; i < len(sp); i++ {
- err := s.Update(sp[i])
- if err != nil {
- return nil, errors.Trace(err)
- }
+ err := s.Update(str)
+ if err != nil {
+ return nil, err
}
return s, nil
}
@@ -147,17 +141,29 @@ func (s *MariadbGTIDSet) AddSet(gtid *MariadbGTID) error {
// Update updates mariadb gtid set
func (s *MariadbGTIDSet) Update(GTIDStr string) error {
- gtid, err := ParseMariadbGTID(GTIDStr)
- if err != nil {
- return err
+ sp := strings.Split(GTIDStr, ",")
+ //todo, handle redundant same uuid
+ for i := 0; i < len(sp); i++ {
+ gtid, err := ParseMariadbGTID(sp[i])
+ if err != nil {
+ return errors.Trace(err)
+ }
+ err = s.AddSet(gtid)
+ if err != nil {
+ return errors.Trace(err)
+ }
}
-
- err = s.AddSet(gtid)
- return errors.Trace(err)
+ return nil
}
func (s *MariadbGTIDSet) String() string {
- return hack.String(s.Encode())
+ sets := make([]string, 0, len(s.Sets))
+ for _, set := range s.Sets {
+ sets = append(sets, set.String())
+ }
+ sort.Strings(sets)
+
+ return strings.Join(sets, ",")
}
// Encode encodes mariadb gtid set
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/mysql_gtid.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/mysql_gtid.go
similarity index 88%
rename from vendor/github.com/siddontang/go-mysql/mysql/mysql_gtid.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/mysql_gtid.go
index a937cb8..fbd5b7b 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/mysql_gtid.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/mysql_gtid.go
@@ -9,8 +9,8 @@ import (
"strconv"
"strings"
- "github.com/juju/errors"
- "github.com/satori/go.uuid"
+ "github.com/pingcap/errors"
+ uuid "github.com/satori/go.uuid"
"github.com/siddontang/go/hack"
)
@@ -32,8 +32,10 @@ func parseInterval(str string) (i Interval, err error) {
i.Stop = i.Start + 1
case 2:
i.Start, err = strconv.ParseInt(p[0], 10, 64)
- i.Stop, err = strconv.ParseInt(p[1], 10, 64)
- i.Stop = i.Stop + 1
+ if err == nil {
+ i.Stop, err = strconv.ParseInt(p[1], 10, 64)
+ i.Stop++
+ }
default:
err = errors.Errorf("invalid interval format, must n[-n]")
}
@@ -108,7 +110,7 @@ func (s IntervalSlice) Normalize() IntervalSlice {
return n
}
-// Return true if sub in s
+// Contain returns true if sub in s
func (s IntervalSlice) Contain(sub IntervalSlice) bool {
j := 0
for i := 0; i < len(sub); i++ {
@@ -230,14 +232,14 @@ func (s *UUIDSet) String() string {
}
func (s *UUIDSet) encode(w io.Writer) {
- w.Write(s.SID.Bytes())
+ _, _ = w.Write(s.SID.Bytes())
n := int64(len(s.Intervals))
- binary.Write(w, binary.LittleEndian, n)
+ _ = binary.Write(w, binary.LittleEndian, n)
for _, i := range s.Intervals {
- binary.Write(w, binary.LittleEndian, i.Start)
- binary.Write(w, binary.LittleEndian, i.Stop)
+ _ = binary.Write(w, binary.LittleEndian, i.Start)
+ _ = binary.Write(w, binary.LittleEndian, i.Stop)
}
}
@@ -292,7 +294,7 @@ func (s *UUIDSet) Decode(data []byte) error {
func (s *UUIDSet) Clone() *UUIDSet {
clone := new(UUIDSet)
- clone.SID, _ = uuid.FromString(s.SID.String())
+ copy(clone.SID[:], s.SID[:])
clone.Intervals = s.Intervals.Normalize()
return clone
@@ -318,7 +320,6 @@ func ParseMysqlGTIDSet(str string) (GTIDSet, error) {
} else {
s.AddSet(set)
}
-
}
return s, nil
}
@@ -362,13 +363,13 @@ func (s *MysqlGTIDSet) AddSet(set *UUIDSet) {
}
func (s *MysqlGTIDSet) Update(GTIDStr string) error {
- uuidSet, err := ParseUUIDSet(GTIDStr)
+ gtidSet, err := ParseMysqlGTIDSet(GTIDStr)
if err != nil {
return err
}
-
- s.AddSet(uuidSet)
-
+ for _, uuidSet := range gtidSet.(*MysqlGTIDSet).Sets {
+ s.AddSet(uuidSet)
+ }
return nil
}
@@ -398,6 +399,10 @@ func (s *MysqlGTIDSet) Equal(o GTIDSet) bool {
return false
}
+ if len(sub.Sets) != len(s.Sets) {
+ return false
+ }
+
for key, set := range sub.Sets {
o, ok := s.Sets[key]
if !ok {
@@ -410,15 +415,28 @@ func (s *MysqlGTIDSet) Equal(o GTIDSet) bool {
}
return true
-
}
func (s *MysqlGTIDSet) String() string {
+ // there is only one element in gtid set
+ if len(s.Sets) == 1 {
+ for _, set := range s.Sets {
+ return set.String()
+ }
+ }
+
+ // sort multi set
var buf bytes.Buffer
- sep := ""
+ sets := make([]string, 0, len(s.Sets))
for _, set := range s.Sets {
+ sets = append(sets, set.String())
+ }
+ sort.Strings(sets)
+
+ sep := ""
+ for _, set := range sets {
buf.WriteString(sep)
- buf.WriteString(set.String())
+ buf.WriteString(set)
sep = ","
}
@@ -428,9 +446,9 @@ func (s *MysqlGTIDSet) String() string {
func (s *MysqlGTIDSet) Encode() []byte {
var buf bytes.Buffer
- binary.Write(&buf, binary.LittleEndian, uint64(len(s.Sets)))
+ _ = binary.Write(&buf, binary.LittleEndian, uint64(len(s.Sets)))
- for i, _ := range s.Sets {
+ for i := range s.Sets {
s.Sets[i].encode(&buf)
}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/parse_binary.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/parse_binary.go
similarity index 96%
rename from vendor/github.com/siddontang/go-mysql/mysql/parse_binary.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/parse_binary.go
index b9b8179..03b35ca 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/parse_binary.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/parse_binary.go
@@ -20,7 +20,7 @@ func ParseBinaryUint16(data []byte) uint16 {
}
func ParseBinaryInt24(data []byte) int32 {
- u32 := uint32(ParseBinaryUint24(data))
+ u32 := ParseBinaryUint24(data)
if u32&0x00800000 != 0 {
u32 |= 0xFF000000
}
diff --git a/vendor/github.com/go-mysql-org/go-mysql/mysql/position.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/position.go
new file mode 100644
index 0000000..c592d63
--- /dev/null
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/position.go
@@ -0,0 +1,80 @@
+package mysql
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// For binlog filename + position based replication
+type Position struct {
+ Name string
+ Pos uint32
+}
+
+func (p Position) Compare(o Position) int {
+ // First compare binlog name
+ nameCmp := CompareBinlogFileName(p.Name, o.Name)
+ if nameCmp != 0 {
+ return nameCmp
+ }
+ // Same binlog file, compare position
+ if p.Pos > o.Pos {
+ return 1
+ } else if p.Pos < o.Pos {
+ return -1
+ } else {
+ return 0
+ }
+}
+
+func (p Position) String() string {
+ return fmt.Sprintf("(%s, %d)", p.Name, p.Pos)
+}
+
+func CompareBinlogFileName(a, b string) int {
+ // sometimes it's convenient to construct a `Position` literal with no `Name`
+ if a == "" && b == "" {
+ return 0
+ } else if a == "" {
+ return -1
+ } else if b == "" {
+ return 1
+ }
+
+ splitBinlogName := func(n string) (string, int) {
+ // mysqld appends a numeric extension to the binary log base name to generate binary log file names
+ // ...
+ // If you supply an extension in the log name (for example, --log-bin=base_name.extension),
+ // the extension is silently removed and ignored.
+ // ref: https://dev.mysql.com/doc/refman/8.0/en/binary-log.html
+ i := strings.LastIndexByte(n, '.')
+ if i == -1 {
+ // try keeping backward compatibility
+ return n, 0
+ }
+
+ seq, err := strconv.Atoi(n[i+1:])
+ if err != nil {
+ panic(fmt.Sprintf("binlog file %s doesn't contain numeric extension", err))
+ }
+ return n[:i], seq
+ }
+
+ aBase, aSeq := splitBinlogName(a)
+ bBase, bSeq := splitBinlogName(b)
+
+ if aBase > bBase {
+ return 1
+ } else if aBase < bBase {
+ return -1
+ }
+
+ if aSeq > bSeq {
+ return 1
+ } else if aSeq < bSeq {
+ return -1
+ } else {
+ return 0
+ }
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/result.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/result.go
similarity index 65%
rename from vendor/github.com/siddontang/go-mysql/mysql/result.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/result.go
index d6c80e4..797a4af 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/result.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/result.go
@@ -12,3 +12,10 @@ type Result struct {
type Executer interface {
Execute(query string, args ...interface{}) (*Result, error)
}
+
+func (r *Result) Close() {
+ if r.Resultset != nil {
+ r.Resultset.returnToPool()
+ r.Resultset = nil
+ }
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/resultset.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/resultset.go
similarity index 50%
rename from vendor/github.com/siddontang/go-mysql/mysql/resultset.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/resultset.go
index b01e1a5..f244b7d 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/resultset.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/resultset.go
@@ -3,237 +3,66 @@ package mysql
import (
"fmt"
"strconv"
+ "sync"
- "github.com/juju/errors"
+ "github.com/pingcap/errors"
"github.com/siddontang/go/hack"
)
-type RowData []byte
-
-func (p RowData) Parse(f []*Field, binary bool) ([]interface{}, error) {
- if binary {
- return p.ParseBinary(f)
- } else {
- return p.ParseText(f)
- }
-}
-
-func (p RowData) ParseText(f []*Field) ([]interface{}, error) {
- data := make([]interface{}, len(f))
-
- var err error
- var v []byte
- var isNull bool
- var pos int = 0
- var n int = 0
-
- for i := range f {
- v, isNull, n, err = LengthEncodedString(p[pos:])
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- pos += n
-
- if isNull {
- data[i] = nil
- } else {
- isUnsigned := f[i].Flag&UNSIGNED_FLAG != 0
-
- switch f[i].Type {
- case MYSQL_TYPE_TINY, MYSQL_TYPE_SHORT, MYSQL_TYPE_INT24,
- MYSQL_TYPE_LONGLONG, MYSQL_TYPE_YEAR:
- if isUnsigned {
- data[i], err = strconv.ParseUint(string(v), 10, 64)
- } else {
- data[i], err = strconv.ParseInt(string(v), 10, 64)
- }
- case MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE:
- data[i], err = strconv.ParseFloat(string(v), 64)
- default:
- data[i] = v
- }
-
- if err != nil {
- return nil, errors.Trace(err)
- }
- }
- }
-
- return data, nil
-}
-
-func (p RowData) ParseBinary(f []*Field) ([]interface{}, error) {
- data := make([]interface{}, len(f))
-
- if p[0] != OK_HEADER {
- return nil, ErrMalformPacket
- }
-
- pos := 1 + ((len(f) + 7 + 2) >> 3)
-
- nullBitmap := p[1:pos]
-
- var isNull bool
- var n int
- var err error
- var v []byte
- for i := range data {
- if nullBitmap[(i+2)/8]&(1<<(uint(i+2)%8)) > 0 {
- data[i] = nil
- continue
- }
-
- isUnsigned := f[i].Flag&UNSIGNED_FLAG != 0
-
- switch f[i].Type {
- case MYSQL_TYPE_NULL:
- data[i] = nil
- continue
-
- case MYSQL_TYPE_TINY:
- if isUnsigned {
- data[i] = ParseBinaryUint8(p[pos : pos+1])
- } else {
- data[i] = ParseBinaryInt8(p[pos : pos+1])
- }
- pos++
- continue
-
- case MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR:
- if isUnsigned {
- data[i] = ParseBinaryUint16(p[pos : pos+2])
- } else {
- data[i] = ParseBinaryInt16(p[pos : pos+2])
- }
- pos += 2
- continue
-
- case MYSQL_TYPE_INT24:
- if isUnsigned {
- data[i] = ParseBinaryUint24(p[pos : pos+3])
- } else {
- data[i] = ParseBinaryInt24(p[pos : pos+3])
- }
- //3 byte
- pos += 3
- continue
-
- case MYSQL_TYPE_LONG:
- if isUnsigned {
- data[i] = ParseBinaryUint32(p[pos : pos+4])
- } else {
- data[i] = ParseBinaryInt32(p[pos : pos+4])
- }
- pos += 4
- continue
-
- case MYSQL_TYPE_LONGLONG:
- if isUnsigned {
- data[i] = ParseBinaryUint64(p[pos : pos+8])
- } else {
- data[i] = ParseBinaryInt64(p[pos : pos+8])
- }
- pos += 8
- continue
-
- case MYSQL_TYPE_FLOAT:
- data[i] = ParseBinaryFloat32(p[pos : pos+4])
- pos += 4
- continue
-
- case MYSQL_TYPE_DOUBLE:
- data[i] = ParseBinaryFloat64(p[pos : pos+8])
- pos += 8
- continue
-
- case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR,
- MYSQL_TYPE_BIT, MYSQL_TYPE_ENUM, MYSQL_TYPE_SET, MYSQL_TYPE_TINY_BLOB,
- MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_BLOB,
- MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY:
- v, isNull, n, err = LengthEncodedString(p[pos:])
- pos += n
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- if !isNull {
- data[i] = v
- continue
- } else {
- data[i] = nil
- continue
- }
- case MYSQL_TYPE_DATE, MYSQL_TYPE_NEWDATE:
- var num uint64
- num, isNull, n = LengthEncodedInt(p[pos:])
-
- pos += n
-
- if isNull {
- data[i] = nil
- continue
- }
-
- data[i], err = FormatBinaryDate(int(num), p[pos:])
- pos += int(num)
-
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- case MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_DATETIME:
- var num uint64
- num, isNull, n = LengthEncodedInt(p[pos:])
-
- pos += n
-
- if isNull {
- data[i] = nil
- continue
- }
-
- data[i], err = FormatBinaryDateTime(int(num), p[pos:])
- pos += int(num)
-
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- case MYSQL_TYPE_TIME:
- var num uint64
- num, isNull, n = LengthEncodedInt(p[pos:])
-
- pos += n
-
- if isNull {
- data[i] = nil
- continue
- }
-
- data[i], err = FormatBinaryTime(int(num), p[pos:])
- pos += int(num)
-
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- default:
- return nil, errors.Errorf("Stmt Unknown FieldType %d %s", f[i].Type, f[i].Name)
- }
- }
-
- return data, nil
-}
-
type Resultset struct {
Fields []*Field
FieldNames map[string]int
- Values [][]interface{}
+ Values [][]FieldValue
+
+ RawPkg []byte
RowDatas []RowData
}
+var (
+ resultsetPool = sync.Pool{
+ New: func() interface{} {
+ return &Resultset{}
+ },
+ }
+)
+
+func NewResultset(fieldsCount int) *Resultset {
+ r := resultsetPool.Get().(*Resultset)
+ r.Reset(fieldsCount)
+ return r
+}
+
+func (r *Resultset) returnToPool() {
+ resultsetPool.Put(r)
+}
+
+func (r *Resultset) Reset(fieldsCount int) {
+ r.RawPkg = r.RawPkg[:0]
+
+ r.Fields = r.Fields[:0]
+ r.Values = r.Values[:0]
+ r.RowDatas = r.RowDatas[:0]
+
+ if r.FieldNames != nil {
+ for k := range r.FieldNames {
+ delete(r.FieldNames, k)
+ }
+ } else {
+ r.FieldNames = make(map[string]int)
+ }
+
+ if fieldsCount == 0 {
+ return
+ }
+
+ if cap(r.Fields) < fieldsCount {
+ r.Fields = make([]*Field, fieldsCount)
+ } else {
+ r.Fields = r.Fields[:fieldsCount]
+ }
+}
+
func (r *Resultset) RowNumber() int {
return len(r.Values)
}
@@ -251,7 +80,7 @@ func (r *Resultset) GetValue(row, column int) (interface{}, error) {
return nil, errors.Errorf("invalid column index %d", column)
}
- return r.Values[row][column], nil
+ return r.Values[row][column].Value(), nil
}
func (r *Resultset) NameIndex(name string) (int, error) {
@@ -313,7 +142,7 @@ func (r *Resultset) GetUint(row, column int) (uint64, error) {
case uint32:
return uint64(v), nil
case uint64:
- return uint64(v), nil
+ return v, nil
case float32:
return uint64(v), nil
case float64:
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/resultset_helper.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/resultset_helper.go
similarity index 93%
rename from vendor/github.com/siddontang/go-mysql/mysql/resultset_helper.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/resultset_helper.go
index 307684d..3c22c9c 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/resultset_helper.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/resultset_helper.go
@@ -4,7 +4,7 @@ import (
"math"
"strconv"
- "github.com/juju/errors"
+ "github.com/pingcap/errors"
"github.com/siddontang/go/hack"
)
@@ -17,7 +17,7 @@ func formatTextValue(value interface{}) ([]byte, error) {
case int32:
return strconv.AppendInt(nil, int64(v), 10), nil
case int64:
- return strconv.AppendInt(nil, int64(v), 10), nil
+ return strconv.AppendInt(nil, v, 10), nil
case int:
return strconv.AppendInt(nil, int64(v), 10), nil
case uint8:
@@ -27,13 +27,13 @@ func formatTextValue(value interface{}) ([]byte, error) {
case uint32:
return strconv.AppendUint(nil, uint64(v), 10), nil
case uint64:
- return strconv.AppendUint(nil, uint64(v), 10), nil
+ return strconv.AppendUint(nil, v, 10), nil
case uint:
return strconv.AppendUint(nil, uint64(v), 10), nil
case float32:
return strconv.AppendFloat(nil, float64(v), 'f', -1, 64), nil
case float64:
- return strconv.AppendFloat(nil, float64(v), 'f', -1, 64), nil
+ return strconv.AppendFloat(nil, v, 'f', -1, 64), nil
case []byte:
return v, nil
case string:
@@ -64,7 +64,7 @@ func formatBinaryValue(value interface{}) ([]byte, error) {
case uint32:
return Uint64ToBytes(uint64(v)), nil
case uint64:
- return Uint64ToBytes(uint64(v)), nil
+ return Uint64ToBytes(v), nil
case uint:
return Uint64ToBytes(uint64(v)), nil
case float32:
@@ -146,7 +146,10 @@ func BuildSimpleTextResultset(names []string, values [][]interface{}) (*Resultse
}
if r.Fields[j] == nil {
r.Fields[j] = &Field{Name: hack.Slice(names[j]), Type: typ}
- formatField(r.Fields[j], value)
+ err = formatField(r.Fields[j], value)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
} else if typ != r.Fields[j].Type {
// we got another type in the same column. in general, we treat it as an error, except
// the case, when old value was null, and the new one isn't null, so we can update
@@ -154,7 +157,10 @@ func BuildSimpleTextResultset(names []string, values [][]interface{}) (*Resultse
oldIsNull, newIsNull := r.Fields[j].Type == MYSQL_TYPE_NULL, typ == MYSQL_TYPE_NULL
if oldIsNull && !newIsNull { // old is null, new isn't, update type info.
r.Fields[j].Type = typ
- formatField(r.Fields[j], value)
+ err = formatField(r.Fields[j], value)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
} else if !oldIsNull && !newIsNull { // different non-null types, that's an error.
return nil, errors.Errorf("row types aren't consistent")
}
diff --git a/vendor/github.com/go-mysql-org/go-mysql/mysql/rowdata.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/rowdata.go
new file mode 100644
index 0000000..d3cfd0a
--- /dev/null
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/rowdata.go
@@ -0,0 +1,259 @@
+package mysql
+
+import (
+ "strconv"
+
+ "github.com/go-mysql-org/go-mysql/utils"
+ "github.com/pingcap/errors"
+)
+
+type RowData []byte
+
+func (p RowData) Parse(f []*Field, binary bool, dst []FieldValue) ([]FieldValue, error) {
+ if binary {
+ return p.ParseBinary(f, dst)
+ } else {
+ return p.ParseText(f, dst)
+ }
+}
+
+func (p RowData) ParseText(f []*Field, dst []FieldValue) ([]FieldValue, error) {
+ for len(dst) < len(f) {
+ dst = append(dst, FieldValue{})
+ }
+ data := dst[:len(f)]
+
+ var err error
+ var v []byte
+ var isNull bool
+ var pos int = 0
+ var n int = 0
+
+ for i := range f {
+ v, isNull, n, err = LengthEncodedString(p[pos:])
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ pos += n
+
+ if isNull {
+ data[i].Type = FieldValueTypeNull
+ } else {
+ isUnsigned := f[i].Flag&UNSIGNED_FLAG != 0
+
+ switch f[i].Type {
+ case MYSQL_TYPE_TINY, MYSQL_TYPE_SHORT, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG, MYSQL_TYPE_YEAR:
+ if isUnsigned {
+ var val uint64
+ data[i].Type = FieldValueTypeUnsigned
+ val, err = strconv.ParseUint(utils.ByteSliceToString(v), 10, 64)
+ data[i].value = val
+ } else {
+ var val int64
+ data[i].Type = FieldValueTypeSigned
+ val, err = strconv.ParseInt(utils.ByteSliceToString(v), 10, 64)
+ data[i].value = utils.Int64ToUint64(val)
+ }
+ case MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE:
+ var val float64
+ data[i].Type = FieldValueTypeFloat
+ val, err = strconv.ParseFloat(utils.ByteSliceToString(v), 64)
+ data[i].value = utils.Float64ToUint64(val)
+ default:
+ data[i].Type = FieldValueTypeString
+ data[i].str = append(data[i].str[:0], v...)
+ }
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ }
+ }
+
+ return data, nil
+}
+
+// ParseBinary parses the binary format of data
+// see https://dev.mysql.com/doc/internals/en/binary-protocol-value.html
+func (p RowData) ParseBinary(f []*Field, dst []FieldValue) ([]FieldValue, error) {
+ for len(dst) < len(f) {
+ dst = append(dst, FieldValue{})
+ }
+ data := dst[:len(f)]
+
+ if p[0] != OK_HEADER {
+ return nil, ErrMalformPacket
+ }
+
+ pos := 1 + ((len(f) + 7 + 2) >> 3)
+
+ nullBitmap := p[1:pos]
+
+ var isNull bool
+ var n int
+ var err error
+ var v []byte
+ for i := range data {
+ if nullBitmap[(i+2)/8]&(1<<(uint(i+2)%8)) > 0 {
+ data[i].Type = FieldValueTypeNull
+ continue
+ }
+
+ isUnsigned := f[i].Flag&UNSIGNED_FLAG != 0
+
+ switch f[i].Type {
+ case MYSQL_TYPE_NULL:
+ data[i].Type = FieldValueTypeNull
+ continue
+
+ case MYSQL_TYPE_TINY:
+ if isUnsigned {
+ v := ParseBinaryUint8(p[pos : pos+1])
+ data[i].Type = FieldValueTypeUnsigned
+ data[i].value = uint64(v)
+ } else {
+ v := ParseBinaryInt8(p[pos : pos+1])
+ data[i].Type = FieldValueTypeSigned
+ data[i].value = utils.Int64ToUint64(int64(v))
+ }
+ pos++
+ continue
+
+ case MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR:
+ if isUnsigned {
+ v := ParseBinaryUint16(p[pos : pos+2])
+ data[i].Type = FieldValueTypeUnsigned
+ data[i].value = uint64(v)
+ } else {
+ v := ParseBinaryInt16(p[pos : pos+2])
+ data[i].Type = FieldValueTypeSigned
+ data[i].value = utils.Int64ToUint64(int64(v))
+ }
+ pos += 2
+ continue
+
+ case MYSQL_TYPE_INT24, MYSQL_TYPE_LONG:
+ if isUnsigned {
+ v := ParseBinaryUint32(p[pos : pos+4])
+ data[i].Type = FieldValueTypeUnsigned
+ data[i].value = uint64(v)
+ } else {
+ v := ParseBinaryInt32(p[pos : pos+4])
+ data[i].Type = FieldValueTypeSigned
+ data[i].value = utils.Int64ToUint64(int64(v))
+ }
+ pos += 4
+ continue
+
+ case MYSQL_TYPE_LONGLONG:
+ if isUnsigned {
+ v := ParseBinaryUint64(p[pos : pos+8])
+ data[i].Type = FieldValueTypeUnsigned
+ data[i].value = v
+ } else {
+ v := ParseBinaryInt64(p[pos : pos+8])
+ data[i].Type = FieldValueTypeSigned
+ data[i].value = utils.Int64ToUint64(v)
+ }
+ pos += 8
+ continue
+
+ case MYSQL_TYPE_FLOAT:
+ v := ParseBinaryFloat32(p[pos : pos+4])
+ data[i].Type = FieldValueTypeFloat
+ data[i].value = utils.Float64ToUint64(float64(v))
+ pos += 4
+ continue
+
+ case MYSQL_TYPE_DOUBLE:
+ v := ParseBinaryFloat64(p[pos : pos+8])
+ data[i].Type = FieldValueTypeFloat
+ data[i].value = utils.Float64ToUint64(v)
+ pos += 8
+ continue
+
+ case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR,
+ MYSQL_TYPE_BIT, MYSQL_TYPE_ENUM, MYSQL_TYPE_SET, MYSQL_TYPE_TINY_BLOB,
+ MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_BLOB,
+ MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY:
+ v, isNull, n, err = LengthEncodedString(p[pos:])
+ pos += n
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ if !isNull {
+ data[i].Type = FieldValueTypeString
+ data[i].str = append(data[i].str[:0], v...)
+ continue
+ } else {
+ data[i].Type = FieldValueTypeNull
+ continue
+ }
+
+ case MYSQL_TYPE_DATE, MYSQL_TYPE_NEWDATE:
+ var num uint64
+ num, isNull, n = LengthEncodedInt(p[pos:])
+
+ pos += n
+
+ if isNull {
+ data[i].Type = FieldValueTypeNull
+ continue
+ }
+
+ data[i].Type = FieldValueTypeString
+ data[i].str, err = FormatBinaryDate(int(num), p[pos:])
+ pos += int(num)
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ case MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_DATETIME:
+ var num uint64
+ num, isNull, n = LengthEncodedInt(p[pos:])
+
+ pos += n
+
+ if isNull {
+ data[i].Type = FieldValueTypeNull
+ continue
+ }
+
+ data[i].Type = FieldValueTypeString
+ data[i].str, err = FormatBinaryDateTime(int(num), p[pos:])
+ pos += int(num)
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ case MYSQL_TYPE_TIME:
+ var num uint64
+ num, isNull, n = LengthEncodedInt(p[pos:])
+
+ pos += n
+
+ if isNull {
+ data[i].Type = FieldValueTypeNull
+ continue
+ }
+
+ data[i].Type = FieldValueTypeString
+ data[i].str, err = FormatBinaryTime(int(num), p[pos:])
+ pos += int(num)
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ default:
+ return nil, errors.Errorf("Stmt Unknown FieldType %d %s", f[i].Type, f[i].Name)
+ }
+ }
+
+ return data, nil
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/state.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/state.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/mysql/state.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/state.go
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/util.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/util.go
similarity index 93%
rename from vendor/github.com/siddontang/go-mysql/mysql/util.go
rename to vendor/github.com/go-mysql-org/go-mysql/mysql/util.go
index 757910e..054fe4f 100644
--- a/vendor/github.com/siddontang/go-mysql/mysql/util.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/util.go
@@ -2,17 +2,17 @@ package mysql
import (
"crypto/rand"
+ "crypto/rsa"
"crypto/sha1"
+ "crypto/sha256"
"encoding/binary"
"fmt"
"io"
"runtime"
"strings"
- "github.com/juju/errors"
+ "github.com/pingcap/errors"
"github.com/siddontang/go/hack"
- "crypto/sha256"
- "crypto/rsa"
)
func Pstack() string {
@@ -50,7 +50,7 @@ func CalcPassword(scramble, password []byte) []byte {
return scramble
}
-// Hash password using MySQL 8+ method (SHA256)
+// CalcCachingSha2Password: Hash password using MySQL 8+ method (SHA256)
func CalcCachingSha2Password(scramble []byte, password string) []byte {
if len(password) == 0 {
return nil
@@ -78,7 +78,6 @@ func CalcCachingSha2Password(scramble []byte, password string) []byte {
return message1
}
-
func EncryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
plain := make([]byte, len(password)+1)
copy(plain, password)
@@ -90,7 +89,7 @@ func EncryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte,
return rsa.EncryptOAEP(sha1v, rand.Reader, pub, plain, nil)
}
-// encodes a uint64 value and appends it to the given bytes slice
+// AppendLengthEncodedInteger: encodes a uint64 value and appends it to the given bytes slice
func AppendLengthEncodedInteger(b []byte, n uint64) []byte {
switch {
case n <= 250:
@@ -115,7 +114,7 @@ func RandomBuf(size int) ([]byte, error) {
// avoid to generate '\0'
for i, b := range buf {
- if uint8(b) == 0 {
+ if b == 0 {
buf[i] = '0'
}
}
@@ -123,7 +122,7 @@ func RandomBuf(size int) ([]byte, error) {
return buf, nil
}
-// little endian
+// FixedLengthInt: little endian
func FixedLengthInt(buf []byte) uint64 {
var num uint64 = 0
for i, b := range buf {
@@ -132,7 +131,7 @@ func FixedLengthInt(buf []byte) uint64 {
return num
}
-// big endian
+// BFixedLengthInt: big endian
func BFixedLengthInt(buf []byte) uint64 {
var num uint64 = 0
for i, b := range buf {
@@ -162,8 +161,8 @@ func LengthEncodedInt(b []byte) (num uint64, isNull bool, n int) {
// 254: value of following 8
case 0xfe:
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
- uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
- uint64(b[7])<<48 | uint64(b[8])<<56,
+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+ uint64(b[7])<<48 | uint64(b[8])<<56,
false, 9
}
@@ -189,7 +188,7 @@ func PutLengthEncodedInt(n uint64) []byte {
return nil
}
-// returns the string read as a bytes slice, whether the value is NULL,
+// LengthEncodedString returns the string read as a bytes slice, whether the value is NULL,
// the number of bytes read and an error, in case the string is longer than
// the input slice
func LengthEncodedString(b []byte) ([]byte, bool, int, error) {
@@ -346,7 +345,7 @@ var (
EncodeMap [256]byte
)
-// only support utf-8
+// Escape: only support utf-8
func Escape(sql string) string {
dest := make([]byte, 0, 2*len(sql))
diff --git a/vendor/github.com/go-mysql-org/go-mysql/packet/conn.go b/vendor/github.com/go-mysql-org/go-mysql/packet/conn.go
new file mode 100644
index 0000000..c60f68e
--- /dev/null
+++ b/vendor/github.com/go-mysql-org/go-mysql/packet/conn.go
@@ -0,0 +1,288 @@
+package packet
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "net"
+ "sync"
+
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "encoding/pem"
+
+ . "github.com/go-mysql-org/go-mysql/mysql"
+ "github.com/go-mysql-org/go-mysql/utils"
+ "github.com/pingcap/errors"
+)
+
+type BufPool struct {
+ pool *sync.Pool
+}
+
+func NewBufPool() *BufPool {
+ return &BufPool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ },
+ }
+}
+
+func (b *BufPool) Get() *bytes.Buffer {
+ return b.pool.Get().(*bytes.Buffer)
+}
+
+func (b *BufPool) Return(buf *bytes.Buffer) {
+ buf.Reset()
+ b.pool.Put(buf)
+}
+
+/*
+ Conn is the base class to handle MySQL protocol.
+*/
+type Conn struct {
+ net.Conn
+
+ // we removed the buffer reader because it will cause the SSLRequest to block (tls connection handshake won't be
+ // able to read the "Client Hello" data since it has been buffered into the buffer reader)
+
+ bufPool *BufPool
+ br *bufio.Reader
+ reader io.Reader
+
+ copyNBuf []byte
+
+ header [4]byte
+
+ Sequence uint8
+}
+
+func NewConn(conn net.Conn) *Conn {
+ c := new(Conn)
+ c.Conn = conn
+
+ c.bufPool = NewBufPool()
+ c.br = bufio.NewReaderSize(c, 65536) // 64kb
+ c.reader = c.br
+
+ c.copyNBuf = make([]byte, 16*1024)
+
+ return c
+}
+
+func NewTLSConn(conn net.Conn) *Conn {
+ c := new(Conn)
+ c.Conn = conn
+
+ c.bufPool = NewBufPool()
+ c.reader = c
+
+ c.copyNBuf = make([]byte, 16*1024)
+
+ return c
+}
+
+func (c *Conn) ReadPacket() ([]byte, error) {
+ return c.ReadPacketReuseMem(nil)
+}
+
+func (c *Conn) ReadPacketReuseMem(dst []byte) ([]byte, error) {
+ // Here we use `sync.Pool` to avoid allocate/destroy buffers frequently.
+ buf := utils.BytesBufferGet()
+ defer utils.BytesBufferPut(buf)
+
+ if err := c.ReadPacketTo(buf); err != nil {
+ return nil, errors.Trace(err)
+ } else {
+ result := append(dst, buf.Bytes()...)
+ return result, nil
+ }
+}
+
+func (c *Conn) copyN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
+ for n > 0 {
+ bcap := cap(c.copyNBuf)
+ if int64(bcap) > n {
+ bcap = int(n)
+ }
+ buf := c.copyNBuf[:bcap]
+
+ rd, err := io.ReadAtLeast(src, buf, bcap)
+ n -= int64(rd)
+
+ if err != nil {
+ return written, errors.Trace(err)
+ }
+
+ wr, err := dst.Write(buf)
+ written += int64(wr)
+ if err != nil {
+ return written, errors.Trace(err)
+ }
+ }
+
+ return written, nil
+}
+
+func (c *Conn) ReadPacketTo(w io.Writer) error {
+ if _, err := io.ReadFull(c.reader, c.header[:4]); err != nil {
+ return errors.Wrapf(ErrBadConn, "io.ReadFull(header) failed. err %v", err)
+ }
+
+ length := int(uint32(c.header[0]) | uint32(c.header[1])<<8 | uint32(c.header[2])<<16)
+ sequence := c.header[3]
+
+ if sequence != c.Sequence {
+ return errors.Errorf("invalid sequence %d != %d", sequence, c.Sequence)
+ }
+
+ c.Sequence++
+
+ if buf, ok := w.(*bytes.Buffer); ok {
+ // Allocate the buffer with expected length directly instead of call `grow` and migrate data many times.
+ buf.Grow(length)
+ }
+
+ if n, err := c.copyN(w, c.reader, int64(length)); err != nil {
+ return errors.Wrapf(ErrBadConn, "io.CopyN failed. err %v, copied %v, expected %v", err, n, length)
+ } else if n != int64(length) {
+ return errors.Wrapf(ErrBadConn, "io.CopyN failed(n != int64(length)). %v bytes copied, while %v expected", n, length)
+ } else {
+ if length < MaxPayloadLen {
+ return nil
+ }
+
+ if err := c.ReadPacketTo(w); err != nil {
+ return errors.Wrap(err, "ReadPacketTo failed")
+ }
+ }
+
+ return nil
+}
+
+// WritePacket: data already has 4 bytes header
+// will modify data inplace
+func (c *Conn) WritePacket(data []byte) error {
+ length := len(data) - 4
+
+ for length >= MaxPayloadLen {
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+
+ data[3] = c.Sequence
+
+ if n, err := c.Write(data[:4+MaxPayloadLen]); err != nil {
+ return errors.Wrapf(ErrBadConn, "Write(payload portion) failed. err %v", err)
+ } else if n != (4 + MaxPayloadLen) {
+ return errors.Wrapf(ErrBadConn, "Write(payload portion) failed. only %v bytes written, while %v expected", n, 4+MaxPayloadLen)
+ } else {
+ c.Sequence++
+ length -= MaxPayloadLen
+ data = data[MaxPayloadLen:]
+ }
+ }
+
+ data[0] = byte(length)
+ data[1] = byte(length >> 8)
+ data[2] = byte(length >> 16)
+ data[3] = c.Sequence
+
+ if n, err := c.Write(data); err != nil {
+ return errors.Wrapf(ErrBadConn, "Write failed. err %v", err)
+ } else if n != len(data) {
+ return errors.Wrapf(ErrBadConn, "Write failed. only %v bytes written, while %v expected", n, len(data))
+ } else {
+ c.Sequence++
+ return nil
+ }
+}
+
+// WriteClearAuthPacket: Client clear text authentication packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (c *Conn) WriteClearAuthPacket(password string) error {
+ // Calculate the packet length and add a tailing 0
+ pktLen := len(password) + 1
+ data := make([]byte, 4+pktLen)
+
+ // Add the clear password [null terminated string]
+ copy(data[4:], password)
+ data[4+pktLen-1] = 0x00
+
+ return errors.Wrap(c.WritePacket(data), "WritePacket failed")
+}
+
+// WritePublicKeyAuthPacket: Caching sha2 authentication. Public key request and send encrypted password
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (c *Conn) WritePublicKeyAuthPacket(password string, cipher []byte) error {
+ // request public key
+ data := make([]byte, 4+1)
+ data[4] = 2 // cachingSha2PasswordRequestPublicKey
+ if err := c.WritePacket(data); err != nil {
+ return errors.Wrap(err, "WritePacket(single byte) failed")
+ }
+
+ data, err := c.ReadPacket()
+ if err != nil {
+ return errors.Wrap(err, "ReadPacket failed")
+ }
+
+ block, _ := pem.Decode(data[1:])
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return errors.Wrap(err, "x509.ParsePKIXPublicKey failed")
+ }
+
+ plain := make([]byte, len(password)+1)
+ copy(plain, password)
+ for i := range plain {
+ j := i % len(cipher)
+ plain[i] ^= cipher[j]
+ }
+ sha1v := sha1.New()
+ enc, _ := rsa.EncryptOAEP(sha1v, rand.Reader, pub.(*rsa.PublicKey), plain, nil)
+ data = make([]byte, 4+len(enc))
+ copy(data[4:], enc)
+ return errors.Wrap(c.WritePacket(data), "WritePacket failed")
+}
+
+func (c *Conn) WriteEncryptedPassword(password string, seed []byte, pub *rsa.PublicKey) error {
+ enc, err := EncryptPassword(password, seed, pub)
+ if err != nil {
+ return errors.Wrap(err, "EncryptPassword failed")
+ }
+ return errors.Wrap(c.WriteAuthSwitchPacket(enc, false), "WriteAuthSwitchPacket failed")
+}
+
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (c *Conn) WriteAuthSwitchPacket(authData []byte, addNUL bool) error {
+ pktLen := 4 + len(authData)
+ if addNUL {
+ pktLen++
+ }
+ data := make([]byte, pktLen)
+
+ // Add the auth data [EOF]
+ copy(data[4:], authData)
+ if addNUL {
+ data[pktLen-1] = 0x00
+ }
+
+ return errors.Wrap(c.WritePacket(data), "WritePacket failed")
+}
+
+func (c *Conn) ResetSequence() {
+ c.Sequence = 0
+}
+
+func (c *Conn) Close() error {
+ c.Sequence = 0
+ if c.Conn != nil {
+ return errors.Wrap(c.Conn.Close(), "Conn.Close failed")
+ }
+ return nil
+}
diff --git a/vendor/github.com/siddontang/go-mysql/replication/backup.go b/vendor/github.com/go-mysql-org/go-mysql/replication/backup.go
similarity index 89%
rename from vendor/github.com/siddontang/go-mysql/replication/backup.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/backup.go
index 24a25ae..c299ebc 100644
--- a/vendor/github.com/siddontang/go-mysql/replication/backup.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/replication/backup.go
@@ -7,11 +7,11 @@ import (
"path"
"time"
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
+ . "github.com/go-mysql-org/go-mysql/mysql"
+ "github.com/pingcap/errors"
)
-// Like mysqlbinlog remote raw backup
+// StartBackup: Like mysqlbinlog remote raw backup
// Backup remote binlog from position (filename, offset) and write in backupDir
func (b *BinlogSyncer) StartBackup(backupDir string, p Position, timeout time.Duration) error {
if timeout == 0 {
@@ -22,7 +22,9 @@ func (b *BinlogSyncer) StartBackup(backupDir string, p Position, timeout time.Du
// Force use raw mode
b.parser.SetRawMode(true)
- os.MkdirAll(backupDir, 0755)
+ if err := os.MkdirAll(backupDir, 0755); err != nil {
+ return errors.Trace(err)
+ }
s, err := b.StartSync(p)
if err != nil {
@@ -82,7 +84,6 @@ func (b *BinlogSyncer) StartBackup(backupDir string, p Position, timeout time.Du
if _, err = f.Write(BinLogFileHeader); err != nil {
return errors.Trace(err)
}
-
}
if n, err := f.Write(e.RawData); err != nil {
diff --git a/vendor/github.com/siddontang/go-mysql/replication/binlogstreamer.go b/vendor/github.com/go-mysql-org/go-mysql/replication/binlogstreamer.go
similarity index 84%
rename from vendor/github.com/siddontang/go-mysql/replication/binlogstreamer.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/binlogstreamer.go
index c1e4057..56b8622 100644
--- a/vendor/github.com/siddontang/go-mysql/replication/binlogstreamer.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/replication/binlogstreamer.go
@@ -3,7 +3,8 @@ package replication
import (
"context"
"time"
- "github.com/juju/errors"
+
+ "github.com/pingcap/errors"
"github.com/siddontang/go-log/log"
)
@@ -36,9 +37,9 @@ func (s *BinlogStreamer) GetEvent(ctx context.Context) (*BinlogEvent, error) {
}
}
-// Get the binlog event with starttime, if current binlog event timestamp smaller than specify starttime
-// return nil event
-func (s *BinlogStreamer) GetEventWithStartTime(ctx context.Context,startTime time.Time) (*BinlogEvent, error) {
+// GetEventWithStartTime gets the binlog event with starttime, if current binlog event timestamp smaller than specify starttime
+// return nil event
+func (s *BinlogStreamer) GetEventWithStartTime(ctx context.Context, startTime time.Time) (*BinlogEvent, error) {
if s.err != nil {
return nil, ErrNeedSyncAgain
}
@@ -48,7 +49,7 @@ func (s *BinlogStreamer) GetEventWithStartTime(ctx context.Context,startTime tim
if int64(c.Header.Timestamp) >= startUnix {
return c, nil
}
- return nil,nil
+ return nil, nil
case s.err = <-s.ech:
return nil, s.err
case <-ctx.Done():
@@ -67,14 +68,16 @@ func (s *BinlogStreamer) DumpEvents() []*BinlogEvent {
}
func (s *BinlogStreamer) close() {
- s.closeWithError(ErrSyncClosed)
+ s.closeWithError(nil)
}
func (s *BinlogStreamer) closeWithError(err error) {
if err == nil {
err = ErrSyncClosed
+ } else {
+ log.Errorf("close sync with err: %v", err)
}
- log.Errorf("close sync with err: %v", err)
+
select {
case s.ech <- err:
default:
diff --git a/vendor/github.com/siddontang/go-mysql/replication/binlogsyncer.go b/vendor/github.com/go-mysql-org/go-mysql/replication/binlogsyncer.go
similarity index 78%
rename from vendor/github.com/siddontang/go-mysql/replication/binlogsyncer.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/binlogsyncer.go
index 552798b..a5dfcb5 100644
--- a/vendor/github.com/siddontang/go-mysql/replication/binlogsyncer.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/replication/binlogsyncer.go
@@ -7,15 +7,14 @@ import (
"fmt"
"net"
"os"
- "strings"
"sync"
"time"
- "github.com/juju/errors"
- "github.com/satori/go.uuid"
+ "github.com/go-mysql-org/go-mysql/client"
+ . "github.com/go-mysql-org/go-mysql/mysql"
+ "github.com/pingcap/errors"
+ uuid "github.com/satori/go.uuid"
"github.com/siddontang/go-log/log"
- "github.com/siddontang/go-mysql/client"
- . "github.com/siddontang/go-mysql/mysql"
)
var (
@@ -84,15 +83,31 @@ type BinlogSyncerConfig struct {
// read timeout
ReadTimeout time.Duration
- // maximum number of attempts to re-establish a broken connection
+ // maximum number of attempts to re-establish a broken connection, zero or negative number means infinite retry.
+ // this configuration will not work if DisableRetrySync is true
MaxReconnectAttempts int
+ // whether disable re-sync for broken connection
+ DisableRetrySync bool
+
// Only works when MySQL/MariaDB variable binlog_checksum=CRC32.
// For MySQL, binlog_checksum was introduced since 5.6.2, but CRC32 was set as default value since 5.6.6 .
// https://dev.mysql.com/doc/refman/5.6/en/replication-options-binary-log.html#option_mysqld_binlog-checksum
// For MariaDB, binlog_checksum was introduced since MariaDB 5.3, but CRC32 was set as default value since MariaDB 10.2.1 .
// https://mariadb.com/kb/en/library/replication-and-binary-log-server-system-variables/#binlog_checksum
VerifyChecksum bool
+
+ // DumpCommandFlag is used to send binglog dump command. Default 0, aka BINLOG_DUMP_NEVER_STOP.
+ // For MySQL, BINLOG_DUMP_NEVER_STOP and BINLOG_DUMP_NON_BLOCK are available.
+ // https://dev.mysql.com/doc/internals/en/com-binlog-dump.html#binlog-dump-non-block
+ // For MariaDB, BINLOG_DUMP_NEVER_STOP, BINLOG_DUMP_NON_BLOCK and BINLOG_SEND_ANNOTATE_ROWS_EVENT are available.
+ // https://mariadb.com/kb/en/library/com_binlog_dump/
+ // https://mariadb.com/kb/en/library/annotate_rows_event/
+ DumpCommandFlag uint16
+
+ //Option function is used to set outside of BinlogSyncerConfig, between mysql connection and COM_REGISTER_SLAVE
+ //For MariaDB: slave_gtid_ignore_duplicates、skip_replication、slave_until_gtid
+ Option func(*client.Conn) error
}
// BinlogSyncer syncs binlog event from server.
@@ -109,7 +124,7 @@ type BinlogSyncer struct {
nextPos Position
- gset GTIDSet
+ prevGset, currGset GTIDSet
running bool
@@ -137,6 +152,7 @@ func NewBinlogSyncer(cfg BinlogSyncerConfig) *BinlogSyncer {
b.cfg = cfg
b.parser = NewBinlogParser()
+ b.parser.SetFlavor(cfg.Flavor)
b.parser.SetRawMode(b.cfg.RawModeEnabled)
b.parser.SetParseTime(b.cfg.ParseTime)
b.parser.SetTimestampStringLocation(b.cfg.TimestampStringLocation)
@@ -167,7 +183,22 @@ func (b *BinlogSyncer) close() {
b.cancel()
if b.c != nil {
- b.c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
+ err := b.c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
+ if err != nil {
+ log.Warnf(`could not set read deadline: %s`, err)
+ }
+ }
+
+ // kill last connection id
+ if b.lastConnectionID > 0 {
+ // Use a new connection to kill the binlog syncer
+ // because calling KILL from the same connection
+ // doesn't actually disconnect it.
+ c, err := b.newConnection()
+ if err == nil {
+ b.killConnection(c, b.lastConnectionID)
+ c.Close()
+ }
}
b.wg.Wait()
@@ -193,48 +224,38 @@ func (b *BinlogSyncer) registerSlave() error {
b.c.Close()
}
- addr := ""
- if strings.Contains(b.cfg.Host, "/") {
- addr = b.cfg.Host
- } else {
- addr = fmt.Sprintf("%s:%d", b.cfg.Host, b.cfg.Port)
- }
-
- log.Infof("register slave for master server %s", addr)
var err error
- b.c, err = client.Connect(addr, b.cfg.User, b.cfg.Password, "", func(c *client.Conn) {
- c.SetTLSConfig(b.cfg.TLSConfig)
- })
+ b.c, err = b.newConnection()
if err != nil {
return errors.Trace(err)
}
+ if b.cfg.Option != nil {
+ if err = b.cfg.Option(b.c); err != nil {
+ return errors.Trace(err)
+ }
+ }
+
if len(b.cfg.Charset) != 0 {
- b.c.SetCharset(b.cfg.Charset)
+ if err = b.c.SetCharset(b.cfg.Charset); err != nil {
+ return errors.Trace(err)
+ }
}
//set read timeout
if b.cfg.ReadTimeout > 0 {
- b.c.SetReadDeadline(time.Now().Add(b.cfg.ReadTimeout))
+ _ = b.c.SetReadDeadline(time.Now().Add(b.cfg.ReadTimeout))
}
if b.cfg.RecvBufferSize > 0 {
if tcp, ok := b.c.Conn.Conn.(*net.TCPConn); ok {
- tcp.SetReadBuffer(b.cfg.RecvBufferSize)
+ _ = tcp.SetReadBuffer(b.cfg.RecvBufferSize)
}
}
// kill last connection id
if b.lastConnectionID > 0 {
- cmd := fmt.Sprintf("KILL %d", b.lastConnectionID)
- if _, err := b.c.Execute(cmd); err != nil {
- log.Errorf("kill connection %d error %v", b.lastConnectionID, err)
- // Unknown thread id
- if code := ErrorCode(err.Error()); code != ER_NO_SUCH_THREAD {
- return errors.Trace(err)
- }
- }
- log.Infof("kill last connection id %d", b.lastConnectionID)
+ b.killConnection(b.c, b.lastConnectionID)
}
// save last last connection id for kill
@@ -262,7 +283,6 @@ func (b *BinlogSyncer) registerSlave() error {
// if _, err = b.c.Execute(`SET @master_binlog_checksum=@@global.binlog_checksum`); err != nil {
// return errors.Trace(err)
// }
-
}
}
@@ -371,7 +391,7 @@ func (b *BinlogSyncer) StartSync(pos Position) (*BinlogStreamer, error) {
func (b *BinlogSyncer) StartSyncGTID(gset GTIDSet) (*BinlogStreamer, error) {
log.Infof("begin to sync binlog from GTID set %s", gset)
- b.gset = gset
+ b.prevGset = gset
b.m.Lock()
defer b.m.Unlock()
@@ -380,6 +400,10 @@ func (b *BinlogSyncer) StartSyncGTID(gset GTIDSet) (*BinlogStreamer, error) {
return nil, errors.Trace(errSyncRunning)
}
+ // establishing network connection here and will start getting binlog events from "gset + 1", thus until first
+ // MariadbGTIDEvent/GTIDEvent event is received - we effectively do not have a "current GTID"
+ b.currGset = nil
+
if err := b.prepare(); err != nil {
return nil, errors.Trace(err)
}
@@ -412,7 +436,7 @@ func (b *BinlogSyncer) writeBinlogDumpCommand(p Position) error {
binary.LittleEndian.PutUint32(data[pos:], p.Pos)
pos += 4
- binary.LittleEndian.PutUint16(data[pos:], BINLOG_DUMP_NEVER_STOP)
+ binary.LittleEndian.PutUint16(data[pos:], b.cfg.DumpCommandFlag)
pos += 2
binary.LittleEndian.PutUint32(data[pos:], b.cfg.ServerID)
@@ -564,9 +588,14 @@ func (b *BinlogSyncer) retrySync() error {
b.parser.Reset()
- if b.gset != nil {
- log.Infof("begin to re-sync from %s", b.gset.String())
- if err := b.prepareSyncGTID(b.gset); err != nil {
+ if b.prevGset != nil {
+ msg := fmt.Sprintf("begin to re-sync from %s", b.prevGset.String())
+ if b.currGset != nil {
+ msg = fmt.Sprintf("%v (last read GTID=%v)", msg, b.currGset)
+ }
+ log.Infof(msg)
+
+ if err := b.prepareSyncGTID(b.prevGset); err != nil {
return errors.Trace(err)
}
} else {
@@ -599,6 +628,10 @@ func (b *BinlogSyncer) prepareSyncPos(pos Position) error {
func (b *BinlogSyncer) prepareSyncGTID(gset GTIDSet) error {
var err error
+ // re establishing network connection here and will start getting binlog events from "gset + 1", thus until first
+ // MariadbGTIDEvent/GTIDEvent event is received - we effectively do not have a "current GTID"
+ b.currGset = nil
+
if err = b.prepare(); err != nil {
return errors.Trace(err)
}
@@ -627,17 +660,29 @@ func (b *BinlogSyncer) onStream(s *BinlogStreamer) {
for {
data, err := b.c.ReadPacket()
+ select {
+ case <-b.ctx.Done():
+ s.close()
+ return
+ default:
+ }
+
if err != nil {
log.Error(err)
-
// we meet connection error, should re-connect again with
// last nextPos or nextGTID we got.
- if len(b.nextPos.Name) == 0 && b.gset == nil {
+ if len(b.nextPos.Name) == 0 && b.prevGset == nil {
// we can't get the correct position, close.
s.closeWithError(err)
return
}
+ if b.cfg.DisableRetrySync {
+ log.Warn("retry sync is disabled")
+ s.closeWithError(err)
+ return
+ }
+
for {
select {
case <-b.ctx.Done():
@@ -666,7 +711,7 @@ func (b *BinlogSyncer) onStream(s *BinlogStreamer) {
//set read timeout
if b.cfg.ReadTimeout > 0 {
- b.c.SetReadDeadline(time.Now().Add(b.cfg.ReadTimeout))
+ _ = b.c.SetReadDeadline(time.Now().Add(b.cfg.ReadTimeout))
}
// Reset retry count on successful packet receieve
@@ -716,33 +761,56 @@ func (b *BinlogSyncer) parseEvent(s *BinlogStreamer, data []byte) error {
// Some events like FormatDescriptionEvent return 0, ignore.
b.nextPos.Pos = e.Header.LogPos
}
+
+ getCurrentGtidSet := func() GTIDSet {
+ if b.currGset == nil {
+ return nil
+ }
+ return b.currGset.Clone()
+ }
+
+ advanceCurrentGtidSet := func(gtid string) error {
+ if b.currGset == nil {
+ b.currGset = b.prevGset.Clone()
+ }
+ prev := b.currGset.Clone()
+ err := b.currGset.Update(gtid)
+ if err == nil {
+ // right after reconnect we will see same gtid as we saw before, thus currGset will not get changed
+ if !b.currGset.Equal(prev) {
+ b.prevGset = prev
+ }
+ }
+ return err
+ }
+
switch event := e.Event.(type) {
case *RotateEvent:
b.nextPos.Name = string(event.NextLogName)
b.nextPos.Pos = uint32(event.Position)
log.Infof("rotate to %s", b.nextPos)
case *GTIDEvent:
- if b.gset == nil {
+ if b.prevGset == nil {
break
}
u, _ := uuid.FromBytes(event.SID)
- err := b.gset.Update(fmt.Sprintf("%s:%d", u.String(), event.GNO))
+ err := advanceCurrentGtidSet(fmt.Sprintf("%s:%d", u.String(), event.GNO))
if err != nil {
return errors.Trace(err)
}
case *MariadbGTIDEvent:
- if b.gset == nil {
+ if b.prevGset == nil {
break
}
GTID := event.GTID
- err := b.gset.Update(fmt.Sprintf("%d-%d-%d", GTID.DomainID, GTID.ServerID, GTID.SequenceNumber))
+ err := advanceCurrentGtidSet(fmt.Sprintf("%d-%d-%d", GTID.DomainID, GTID.ServerID, GTID.SequenceNumber))
if err != nil {
return errors.Trace(err)
}
case *XIDEvent:
- event.GSet = b.getGtidSet()
+ event.GSet = getCurrentGtidSet()
case *QueryEvent:
- event.GSet = b.getGtidSet()
+ event.GSet = getCurrentGtidSet()
}
needStop := false
@@ -766,14 +834,32 @@ func (b *BinlogSyncer) parseEvent(s *BinlogStreamer, data []byte) error {
return nil
}
-func (b *BinlogSyncer) getGtidSet() GTIDSet {
- if b.gset == nil {
- return nil
- }
- return b.gset.Clone()
-}
-
// LastConnectionID returns last connectionID.
func (b *BinlogSyncer) LastConnectionID() uint32 {
return b.lastConnectionID
}
+
+func (b *BinlogSyncer) newConnection() (*client.Conn, error) {
+ var addr string
+ if b.cfg.Port != 0 {
+ addr = fmt.Sprintf("%s:%d", b.cfg.Host, b.cfg.Port)
+ } else {
+ addr = b.cfg.Host
+ }
+
+ return client.Connect(addr, b.cfg.User, b.cfg.Password, "", func(c *client.Conn) {
+ c.SetTLSConfig(b.cfg.TLSConfig)
+ })
+}
+
+func (b *BinlogSyncer) killConnection(conn *client.Conn, id uint32) {
+ cmd := fmt.Sprintf("KILL %d", id)
+ if _, err := conn.Execute(cmd); err != nil {
+ log.Errorf("kill connection %d error %v", id, err)
+ // Unknown thread id
+ if code := ErrorCode(err.Error()); code != ER_NO_SUCH_THREAD {
+ log.Error(errors.Trace(err))
+ }
+ }
+ log.Infof("kill last connection id %d", id)
+}
diff --git a/vendor/github.com/siddontang/go-mysql/replication/const.go b/vendor/github.com/go-mysql-org/go-mysql/replication/const.go
similarity index 69%
rename from vendor/github.com/siddontang/go-mysql/replication/const.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/const.go
index ef82b6c..6230257 100644
--- a/vendor/github.com/siddontang/go-mysql/replication/const.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/replication/const.go
@@ -26,10 +26,11 @@ const (
)
const (
- BINLOG_DUMP_NEVER_STOP uint16 = 0x00
- BINLOG_DUMP_NON_BLOCK uint16 = 0x01
- BINLOG_THROUGH_POSITION uint16 = 0x02
- BINLOG_THROUGH_GTID uint16 = 0x04
+ BINLOG_DUMP_NEVER_STOP uint16 = 0x00
+ BINLOG_DUMP_NON_BLOCK uint16 = 0x01
+ BINLOG_SEND_ANNOTATE_ROWS_EVENT uint16 = 0x02
+ BINLOG_THROUGH_POSITION uint16 = 0x02
+ BINLOG_THROUGH_GTID uint16 = 0x04
)
const (
@@ -38,6 +39,15 @@ const (
BINLOG_ROW_IMAGE_NOBLOB = "NOBLOB"
)
+const (
+ BINLOG_MARIADB_FL_STANDALONE = 1 << iota /*1 - FL_STANDALONE is set when there is no terminating COMMIT event*/
+ BINLOG_MARIADB_FL_GROUP_COMMIT_ID /*2 - FL_GROUP_COMMIT_ID is set when event group is part of a group commit on the master. Groups with same commit_id are part of the same group commit.*/
+ BINLOG_MARIADB_FL_TRANSACTIONAL /*4 - FL_TRANSACTIONAL is set for an event group that can be safely rolled back (no MyISAM, eg.).*/
+ BINLOG_MARIADB_FL_ALLOW_PARALLEL /*8 - FL_ALLOW_PARALLEL reflects the (negation of the) value of @@SESSION.skip_parallel_replication at the time of commit*/
+ BINLOG_MARIADB_FL_WAITED /*16 = FL_WAITED is set if a row lock wait (or other wait) is detected during the execution of the transaction*/
+ BINLOG_MARIADB_FL_DDL /*32 - FL_DDL is set for event group containing DDL*/
+)
+
type EventType byte
const (
@@ -77,6 +87,9 @@ const (
GTID_EVENT
ANONYMOUS_GTID_EVENT
PREVIOUS_GTIDS_EVENT
+ TRANSACTION_CONTEXT_EVENT
+ VIEW_CHANGE_EVENT
+ XA_PREPARE_LOG_EVENT
)
const (
@@ -169,6 +182,12 @@ func (e EventType) String() string {
return "MariadbGTIDEvent"
case MARIADB_GTID_LIST_EVENT:
return "MariadbGTIDListEvent"
+ case TRANSACTION_CONTEXT_EVENT:
+ return "TransactionContextEvent"
+ case VIEW_CHANGE_EVENT:
+ return "ViewChangeEvent"
+ case XA_PREPARE_LOG_EVENT:
+ return "XAPrepareLogEvent"
default:
return "UnknownEvent"
@@ -183,3 +202,18 @@ const (
BINLOG_CHECKSUM_ALG_UNDEF byte = 255 // special value to tag undetermined yet checksum
// or events from checksum-unaware servers
)
+
+// These are TABLE_MAP_EVENT's optional metadata field type, from: libbinlogevents/include/rows_event.h
+const (
+ TABLE_MAP_OPT_META_SIGNEDNESS byte = iota + 1
+ TABLE_MAP_OPT_META_DEFAULT_CHARSET
+ TABLE_MAP_OPT_META_COLUMN_CHARSET
+ TABLE_MAP_OPT_META_COLUMN_NAME
+ TABLE_MAP_OPT_META_SET_STR_VALUE
+ TABLE_MAP_OPT_META_ENUM_STR_VALUE
+ TABLE_MAP_OPT_META_GEOMETRY_TYPE
+ TABLE_MAP_OPT_META_SIMPLE_PRIMARY_KEY
+ TABLE_MAP_OPT_META_PRIMARY_KEY_WITH_PREFIX
+ TABLE_MAP_OPT_META_ENUM_AND_SET_DEFAULT_CHARSET
+ TABLE_MAP_OPT_META_ENUM_AND_SET_COLUMN_CHARSET
+)
diff --git a/vendor/github.com/siddontang/go-mysql/replication/doc.go b/vendor/github.com/go-mysql-org/go-mysql/replication/doc.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/replication/doc.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/doc.go
diff --git a/vendor/github.com/siddontang/go-mysql/replication/event.go b/vendor/github.com/go-mysql-org/go-mysql/replication/event.go
similarity index 65%
rename from vendor/github.com/siddontang/go-mysql/replication/event.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/event.go
index 737b431..90b38d3 100644
--- a/vendor/github.com/siddontang/go-mysql/replication/event.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/replication/event.go
@@ -2,6 +2,7 @@ package replication
import (
"encoding/binary"
+ "encoding/hex"
"fmt"
"io"
"strconv"
@@ -9,9 +10,10 @@ import (
"time"
"unicode"
- "github.com/juju/errors"
- "github.com/satori/go.uuid"
- . "github.com/siddontang/go-mysql/mysql"
+ "github.com/pingcap/errors"
+ uuid "github.com/satori/go.uuid"
+
+ . "github.com/go-mysql-org/go-mysql/mysql"
)
const (
@@ -20,6 +22,7 @@ const (
LogicalTimestampTypeCode = 2
PartLogicalTimestampLength = 8
BinlogChecksumLength = 4
+ UndefinedServerVer = 999999 // UNDEFINED_SERVER_VERSION
)
type BinlogEvent struct {
@@ -98,7 +101,7 @@ func (h *EventHeader) Decode(data []byte) error {
}
func (h *EventHeader) Dump(w io.Writer) {
- fmt.Fprintf(w, "=== %s ===\n", EventType(h.EventType))
+ fmt.Fprintf(w, "=== %s ===\n", h.EventType)
fmt.Fprintf(w, "Date: %s\n", time.Unix(int64(h.Timestamp), 0).Format(TimeFormat))
fmt.Fprintf(w, "Log position: %d\n", h.LogPos)
fmt.Fprintf(w, "Event size: %d\n", h.EventSize)
@@ -217,6 +220,55 @@ func (e *RotateEvent) Dump(w io.Writer) {
fmt.Fprintln(w)
}
+type PreviousGTIDsEvent struct {
+ GTIDSets string
+}
+
+func (e *PreviousGTIDsEvent) Decode(data []byte) error {
+ var previousGTIDSets []string
+ pos := 0
+ uuidCount := binary.LittleEndian.Uint16(data[pos : pos+8])
+ pos += 8
+
+ for i := uint16(0); i < uuidCount; i++ {
+ uuid := e.decodeUuid(data[pos : pos+16])
+ pos += 16
+ sliceCount := binary.LittleEndian.Uint16(data[pos : pos+8])
+ pos += 8
+ var intervals []string
+ for i := uint16(0); i < sliceCount; i++ {
+ start := e.decodeInterval(data[pos : pos+8])
+ pos += 8
+ stop := e.decodeInterval(data[pos : pos+8])
+ pos += 8
+ interval := ""
+ if stop == start+1 {
+ interval = fmt.Sprintf("%d", start)
+ } else {
+ interval = fmt.Sprintf("%d-%d", start, stop-1)
+ }
+ intervals = append(intervals, interval)
+ }
+ previousGTIDSets = append(previousGTIDSets, fmt.Sprintf("%s:%s", uuid, strings.Join(intervals, ":")))
+ }
+ e.GTIDSets = fmt.Sprintf("%s", strings.Join(previousGTIDSets, ","))
+ return nil
+}
+
+func (e *PreviousGTIDsEvent) Dump(w io.Writer) {
+ fmt.Fprintf(w, "Previous GTID Event: %s\n", e.GTIDSets)
+ fmt.Fprintln(w)
+}
+
+func (e *PreviousGTIDsEvent) decodeUuid(data []byte) string {
+ return fmt.Sprintf("%s-%s-%s-%s-%s", hex.EncodeToString(data[0:4]), hex.EncodeToString(data[4:6]),
+ hex.EncodeToString(data[6:8]), hex.EncodeToString(data[8:10]), hex.EncodeToString(data[10:]))
+}
+
+func (e *PreviousGTIDsEvent) decodeInterval(data []byte) uint64 {
+ return binary.LittleEndian.Uint64(data)
+}
+
type XIDEvent struct {
XID uint64
@@ -258,7 +310,7 @@ func (e *QueryEvent) Decode(data []byte) error {
e.ExecutionTime = binary.LittleEndian.Uint32(data[pos:])
pos += 4
- schemaLength := uint8(data[pos])
+ schemaLength := data[pos]
pos++
e.ErrorCode = binary.LittleEndian.Uint16(data[pos:])
@@ -299,36 +351,118 @@ type GTIDEvent struct {
GNO int64
LastCommitted int64
SequenceNumber int64
+
+ // ImmediateCommitTimestamp/OriginalCommitTimestamp are introduced in MySQL-8.0.1, see:
+ // https://mysqlhighavailability.com/replication-features-in-mysql-8-0-1/
+ ImmediateCommitTimestamp uint64
+ OriginalCommitTimestamp uint64
+
+ // Total transaction length (including this GTIDEvent), introduced in MySQL-8.0.2, see:
+ // https://mysqlhighavailability.com/taking-advantage-of-new-transaction-length-metadata/
+ TransactionLength uint64
+
+ // ImmediateServerVersion/OriginalServerVersion are introduced in MySQL-8.0.14, see
+ // https://dev.mysql.com/doc/refman/8.0/en/replication-compatibility.html
+ ImmediateServerVersion uint32
+ OriginalServerVersion uint32
}
func (e *GTIDEvent) Decode(data []byte) error {
pos := 0
- e.CommitFlag = uint8(data[pos])
+ e.CommitFlag = data[pos]
pos++
e.SID = data[pos : pos+SidLength]
pos += SidLength
e.GNO = int64(binary.LittleEndian.Uint64(data[pos:]))
pos += 8
+
if len(data) >= 42 {
- if uint8(data[pos]) == LogicalTimestampTypeCode {
+ if data[pos] == LogicalTimestampTypeCode {
pos++
e.LastCommitted = int64(binary.LittleEndian.Uint64(data[pos:]))
pos += PartLogicalTimestampLength
e.SequenceNumber = int64(binary.LittleEndian.Uint64(data[pos:]))
+ pos += 8
+
+ // IMMEDIATE_COMMIT_TIMESTAMP_LENGTH = 7
+ if len(data)-pos < 7 {
+ return nil
+ }
+ e.ImmediateCommitTimestamp = FixedLengthInt(data[pos : pos+7])
+ pos += 7
+ if (e.ImmediateCommitTimestamp & (uint64(1) << 55)) != 0 {
+ // If the most significant bit set, another 7 byte follows representing OriginalCommitTimestamp
+ e.ImmediateCommitTimestamp &= ^(uint64(1) << 55)
+ e.OriginalCommitTimestamp = FixedLengthInt(data[pos : pos+7])
+ pos += 7
+ } else {
+ // Otherwise OriginalCommitTimestamp == ImmediateCommitTimestamp
+ e.OriginalCommitTimestamp = e.ImmediateCommitTimestamp
+ }
+
+ // TRANSACTION_LENGTH_MIN_LENGTH = 1
+ if len(data)-pos < 1 {
+ return nil
+ }
+ var n int
+ e.TransactionLength, _, n = LengthEncodedInt(data[pos:])
+ pos += n
+
+ // IMMEDIATE_SERVER_VERSION_LENGTH = 4
+ e.ImmediateServerVersion = UndefinedServerVer
+ e.OriginalServerVersion = UndefinedServerVer
+ if len(data)-pos < 4 {
+ return nil
+ }
+ e.ImmediateServerVersion = binary.LittleEndian.Uint32(data[pos:])
+ pos += 4
+ if (e.ImmediateServerVersion & (uint32(1) << 31)) != 0 {
+ // If the most significant bit set, another 4 byte follows representing OriginalServerVersion
+ e.ImmediateServerVersion &= ^(uint32(1) << 31)
+ e.OriginalServerVersion = binary.LittleEndian.Uint32(data[pos:])
+ pos += 4
+ } else {
+ // Otherwise OriginalServerVersion == ImmediateServerVersion
+ e.OriginalServerVersion = e.ImmediateServerVersion
+ }
}
}
return nil
}
func (e *GTIDEvent) Dump(w io.Writer) {
+ fmtTime := func(t time.Time) string {
+ if t.IsZero() {
+ return ""
+ }
+ return t.Format(time.RFC3339Nano)
+ }
+
fmt.Fprintf(w, "Commit flag: %d\n", e.CommitFlag)
u, _ := uuid.FromBytes(e.SID)
fmt.Fprintf(w, "GTID_NEXT: %s:%d\n", u.String(), e.GNO)
fmt.Fprintf(w, "LAST_COMMITTED: %d\n", e.LastCommitted)
fmt.Fprintf(w, "SEQUENCE_NUMBER: %d\n", e.SequenceNumber)
+ fmt.Fprintf(w, "Immediate commmit timestamp: %d (%s)\n", e.ImmediateCommitTimestamp, fmtTime(e.ImmediateCommitTime()))
+ fmt.Fprintf(w, "Orignal commmit timestamp: %d (%s)\n", e.OriginalCommitTimestamp, fmtTime(e.OriginalCommitTime()))
+ fmt.Fprintf(w, "Transaction length: %d\n", e.TransactionLength)
+ fmt.Fprintf(w, "Immediate server version: %d\n", e.ImmediateServerVersion)
+ fmt.Fprintf(w, "Orignal server version: %d\n", e.OriginalServerVersion)
fmt.Fprintln(w)
}
+// ImmediateCommitTime returns the commit time of this trx on the immediate server
+// or zero time if not available.
+func (e *GTIDEvent) ImmediateCommitTime() time.Time {
+ return microSecTimestampToTime(e.ImmediateCommitTimestamp)
+}
+
+// OriginalCommitTime returns the commit time of this trx on the original server
+// or zero time if not available.
+func (e *GTIDEvent) OriginalCommitTime() time.Time {
+ return microSecTimestampToTime(e.OriginalCommitTimestamp)
+}
+
type BeginLoadQueryEvent struct {
FileID uint32
BlockData []byte
@@ -372,7 +506,7 @@ func (e *ExecuteLoadQueryEvent) Decode(data []byte) error {
e.ExecutionTime = binary.LittleEndian.Uint32(data[pos:])
pos += 4
- e.SchemaLength = uint8(data[pos])
+ e.SchemaLength = data[pos]
pos++
e.ErrorCode = binary.LittleEndian.Uint16(data[pos:])
@@ -390,7 +524,7 @@ func (e *ExecuteLoadQueryEvent) Decode(data []byte) error {
e.EndPos = binary.LittleEndian.Uint32(data[pos:])
pos += 4
- e.DupHandlingFlags = uint8(data[pos])
+ e.DupHandlingFlags = data[pos]
return nil
}
@@ -440,20 +574,43 @@ func (e *MariadbBinlogCheckPointEvent) Dump(w io.Writer) {
}
type MariadbGTIDEvent struct {
- GTID MariadbGTID
+ GTID MariadbGTID
+ Flags byte
+ CommitID uint64
+}
+
+func (e *MariadbGTIDEvent) IsDDL() bool {
+ return (e.Flags & BINLOG_MARIADB_FL_DDL) != 0
+}
+
+func (e *MariadbGTIDEvent) IsStandalone() bool {
+ return (e.Flags & BINLOG_MARIADB_FL_STANDALONE) != 0
+}
+
+func (e *MariadbGTIDEvent) IsGroupCommit() bool {
+ return (e.Flags & BINLOG_MARIADB_FL_GROUP_COMMIT_ID) != 0
}
func (e *MariadbGTIDEvent) Decode(data []byte) error {
+ pos := 0
e.GTID.SequenceNumber = binary.LittleEndian.Uint64(data)
- e.GTID.DomainID = binary.LittleEndian.Uint32(data[8:])
+ pos += 8
+ e.GTID.DomainID = binary.LittleEndian.Uint32(data[pos:])
+ pos += 4
+ e.Flags = data[pos]
+ pos += 1
- // we don't care commit id now, maybe later
+ if (e.Flags & BINLOG_MARIADB_FL_GROUP_COMMIT_ID) > 0 {
+ e.CommitID = binary.LittleEndian.Uint64(data[pos:])
+ }
return nil
}
func (e *MariadbGTIDEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "GTID: %v\n", e.GTID)
+ fmt.Fprintf(w, "Flags: %v\n", e.Flags)
+ fmt.Fprintf(w, "CommitID: %v\n", e.CommitID)
fmt.Fprintln(w)
}
@@ -476,6 +633,7 @@ func (e *MariadbGTIDListEvent) Decode(data []byte) error {
e.GTIDs[i].ServerID = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.GTIDs[i].SequenceNumber = binary.LittleEndian.Uint64(data[pos:])
+ pos += 8
}
return nil
diff --git a/vendor/github.com/siddontang/go-mysql/replication/generic_event.go b/vendor/github.com/go-mysql-org/go-mysql/replication/generic_event.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/replication/generic_event.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/generic_event.go
diff --git a/vendor/github.com/siddontang/go-mysql/replication/json_binary.go b/vendor/github.com/go-mysql-org/go-mysql/replication/json_binary.go
similarity index 94%
rename from vendor/github.com/siddontang/go-mysql/replication/json_binary.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/json_binary.go
index 6529f01..ab6c075 100644
--- a/vendor/github.com/siddontang/go-mysql/replication/json_binary.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/replication/json_binary.go
@@ -5,8 +5,8 @@ import (
"fmt"
"math"
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
+ . "github.com/go-mysql-org/go-mysql/mysql"
+ "github.com/pingcap/errors"
"github.com/siddontang/go/hack"
)
@@ -76,7 +76,10 @@ func (e *RowsEvent) decodeJsonBinary(data []byte) ([]byte, error) {
if len(data) == 0 {
return []byte{}, nil
}
- d := jsonBinaryDecoder{useDecimal: e.useDecimal}
+ d := jsonBinaryDecoder{
+ useDecimal: e.useDecimal,
+ ignoreDecodeErr: e.ignoreJSONDecodeErr,
+ }
if d.isDataShort(data, 1) {
return nil, d.err
@@ -91,8 +94,9 @@ func (e *RowsEvent) decodeJsonBinary(data []byte) ([]byte, error) {
}
type jsonBinaryDecoder struct {
- useDecimal bool
- err error
+ useDecimal bool
+ ignoreDecodeErr bool
+ err error
}
func (d *jsonBinaryDecoder) decodeValue(tp byte, data []byte) interface{} {
@@ -145,7 +149,14 @@ func (d *jsonBinaryDecoder) decodeObjectOrArray(data []byte, isSmall bool, isObj
count := d.decodeCount(data, isSmall)
size := d.decodeCount(data[offsetSize:], isSmall)
- if d.isDataShort(data, int(size)) {
+ if d.isDataShort(data, size) {
+ // Before MySQL 5.7.22, json type generated column may have invalid value,
+ // bug ref: https://bugs.mysql.com/bug.php?id=88791
+ // As generated column value is not used in replication, we can just ignore
+ // this error and return a dummy value for this column.
+ if d.ignoreDecodeErr {
+ d.err = nil
+ }
return nil
}
@@ -441,7 +452,6 @@ func (d *jsonBinaryDecoder) decodeDateTime(data []byte) interface{} {
frac := v % (1 << 24)
return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d.%06d", year, month, day, hour, minute, second, frac)
-
}
func (d *jsonBinaryDecoder) decodeCount(data []byte, isSmall bool) int {
diff --git a/vendor/github.com/siddontang/go-mysql/replication/parser.go b/vendor/github.com/go-mysql-org/go-mysql/replication/parser.go
similarity index 92%
rename from vendor/github.com/siddontang/go-mysql/replication/parser.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/parser.go
index 6fe1cc0..77eb7e9 100644
--- a/vendor/github.com/siddontang/go-mysql/replication/parser.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/replication/parser.go
@@ -10,7 +10,7 @@ import (
"sync/atomic"
"time"
- "github.com/juju/errors"
+ "github.com/pingcap/errors"
)
var (
@@ -19,6 +19,9 @@ var (
)
type BinlogParser struct {
+ // "mysql" or "mariadb", if not set, use "mysql" by default
+ flavor string
+
format *FormatDescriptionEvent
tables map[uint64]*TableMapEvent
@@ -32,8 +35,9 @@ type BinlogParser struct {
// used to start/stop processing
stopProcessing uint32
- useDecimal bool
- verifyChecksum bool
+ useDecimal bool
+ ignoreJSONDecodeErr bool
+ verifyChecksum bool
}
func NewBinlogParser() *BinlogParser {
@@ -76,7 +80,7 @@ func (p *BinlogParser) ParseFile(name string, offset int64, onEvent OnEventFunc)
offset = 4
} else if offset > 4 {
// FORMAT_DESCRIPTION event should be read by default always (despite that fact passed offset may be higher than 4)
- if _, err = f.Seek(4, os.SEEK_SET); err != nil {
+ if _, err = f.Seek(4, io.SeekStart); err != nil {
return errors.Errorf("seek %s to %d error %v", name, offset, err)
}
@@ -85,7 +89,7 @@ func (p *BinlogParser) ParseFile(name string, offset int64, onEvent OnEventFunc)
}
}
- if _, err = f.Seek(offset, os.SEEK_SET); err != nil {
+ if _, err = f.Seek(offset, io.SeekStart); err != nil {
return errors.Errorf("seek %s to %d error %v", name, offset, err)
}
@@ -119,7 +123,7 @@ func (p *BinlogParser) parseSingleEvent(r io.Reader, onEvent OnEventFunc) (bool,
return false, errors.Trace(err)
}
- if h.EventSize <= uint32(EventHeaderSize) {
+ if h.EventSize < uint32(EventHeaderSize) {
return false, errors.Errorf("invalid event header, event size is %d, too small", h.EventSize)
}
if n, err = io.CopyN(&buf, r, int64(h.EventSize-EventHeaderSize)); err != nil {
@@ -153,7 +157,6 @@ func (p *BinlogParser) parseSingleEvent(r io.Reader, onEvent OnEventFunc) (bool,
}
func (p *BinlogParser) ParseReader(r io.Reader, onEvent OnEventFunc) error {
-
for {
if atomic.LoadUint32(&p.stopProcessing) == 1 {
break
@@ -191,10 +194,18 @@ func (p *BinlogParser) SetUseDecimal(useDecimal bool) {
p.useDecimal = useDecimal
}
+func (p *BinlogParser) SetIgnoreJSONDecodeError(ignoreJSONDecodeErr bool) {
+ p.ignoreJSONDecodeErr = ignoreJSONDecodeErr
+}
+
func (p *BinlogParser) SetVerifyChecksum(verify bool) {
p.verifyChecksum = verify
}
+func (p *BinlogParser) SetFlavor(flavor string) {
+ p.flavor = flavor
+}
+
func (p *BinlogParser) parseHeader(data []byte) (*EventHeader, error) {
h := new(EventHeader)
err := h.Decode(data)
@@ -229,7 +240,9 @@ func (p *BinlogParser) parseEvent(h *EventHeader, data []byte, rawData []byte) (
case XID_EVENT:
e = &XIDEvent{}
case TABLE_MAP_EVENT:
- te := &TableMapEvent{}
+ te := &TableMapEvent{
+ flavor: p.flavor,
+ }
if p.format.EventTypeHeaderLengths[TABLE_MAP_EVENT-1] == 6 {
te.tableIDSize = 4
} else {
@@ -251,7 +264,7 @@ func (p *BinlogParser) parseEvent(h *EventHeader, data []byte, rawData []byte) (
case GTID_EVENT:
e = >IDEvent{}
case ANONYMOUS_GTID_EVENT:
- e = >IDEvent{}
+ e = >IDEvent{}
case BEGIN_LOAD_QUERY_EVENT:
e = &BeginLoadQueryEvent{}
case EXECUTE_LOAD_QUERY_EVENT:
@@ -266,6 +279,8 @@ func (p *BinlogParser) parseEvent(h *EventHeader, data []byte, rawData []byte) (
ee := &MariadbGTIDEvent{}
ee.GTID.ServerID = h.ServerID
e = ee
+ case PREVIOUS_GTIDS_EVENT:
+ e = &PreviousGTIDsEvent{}
default:
e = &GenericEvent{}
}
@@ -292,7 +307,7 @@ func (p *BinlogParser) parseEvent(h *EventHeader, data []byte, rawData []byte) (
return e, nil
}
-// Given the bytes for a a binary log event: return the decoded event.
+// Parse: Given the bytes for a a binary log event: return the decoded event.
// With the exception of the FORMAT_DESCRIPTION_EVENT event type
// there must have previously been passed a FORMAT_DESCRIPTION_EVENT
// into the parser for this to work properly on any given event.
@@ -355,6 +370,7 @@ func (p *BinlogParser) newRowsEvent(h *EventHeader) *RowsEvent {
e.parseTime = p.parseTime
e.timestampStringLocation = p.timestampStringLocation
e.useDecimal = p.useDecimal
+ e.ignoreJSONDecodeErr = p.ignoreJSONDecodeErr
switch h.EventType {
case WRITE_ROWS_EVENTv0:
diff --git a/vendor/github.com/siddontang/go-mysql/replication/row_event.go b/vendor/github.com/go-mysql-org/go-mysql/replication/row_event.go
similarity index 51%
rename from vendor/github.com/siddontang/go-mysql/replication/row_event.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/row_event.go
index 9172f6e..d49e51a 100644
--- a/vendor/github.com/siddontang/go-mysql/replication/row_event.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/replication/row_event.go
@@ -9,16 +9,18 @@ import (
"strconv"
"time"
- "github.com/juju/errors"
+ "github.com/pingcap/errors"
"github.com/shopspring/decimal"
"github.com/siddontang/go-log/log"
- . "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go/hack"
+
+ . "github.com/go-mysql-org/go-mysql/mysql"
)
var errMissingTableMapEvent = errors.New("invalid table id, no corresponding table map event")
type TableMapEvent struct {
+ flavor string
tableIDSize int
TableID uint64
@@ -34,6 +36,52 @@ type TableMapEvent struct {
//len = (ColumnCount + 7) / 8
NullBitmap []byte
+
+ /*
+ The followings are available only after MySQL-8.0.1 or MariaDB-10.5.0
+ see:
+ - https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_row_metadata
+ - https://mysqlhighavailability.com/more-metadata-is-written-into-binary-log/
+ - https://jira.mariadb.org/browse/MDEV-20477
+ */
+
+ // SignednessBitmap stores signedness info for numeric columns.
+ SignednessBitmap []byte
+
+ // DefaultCharset/ColumnCharset stores collation info for character columns.
+
+ // DefaultCharset[0] is the default collation of character columns.
+ // For character columns that have different charset,
+ // (character column index, column collation) pairs follows
+ DefaultCharset []uint64
+ // ColumnCharset contains collation sequence for all character columns
+ ColumnCharset []uint64
+
+ // SetStrValue stores values for set columns.
+ SetStrValue [][][]byte
+ setStrValueString [][]string
+
+ // EnumStrValue stores values for enum columns.
+ EnumStrValue [][][]byte
+ enumStrValueString [][]string
+
+ // ColumnName list all column names.
+ ColumnName [][]byte
+ columnNameString []string // the same as ColumnName in string type, just for reuse
+
+ // GeometryType stores real type for geometry columns.
+ GeometryType []uint64
+
+ // PrimaryKey is a sequence of column indexes of primary key.
+ PrimaryKey []uint64
+
+ // PrimaryKeyPrefix is the prefix length used for each column of primary key.
+ // 0 means that the whole column length is used.
+ PrimaryKeyPrefix []uint64
+
+ // EnumSetDefaultCharset/EnumSetColumnCharset is similar to DefaultCharset/ColumnCharset but for enum/set columns.
+ EnumSetDefaultCharset []uint64
+ EnumSetColumnCharset []uint64
}
func (e *TableMapEvent) Decode(data []byte) error {
@@ -88,13 +136,17 @@ func (e *TableMapEvent) Decode(data []byte) error {
e.NullBitmap = data[pos : pos+nullBitmapSize]
- // TODO: handle optional field meta
+ pos += nullBitmapSize
+
+ if err = e.decodeOptionalMeta(data[pos:]); err != nil {
+ return err
+ }
return nil
}
func bitmapByteSize(columnCount int) int {
- return int(columnCount+7) / 8
+ return (columnCount + 7) / 8
}
// see mysql sql/log_event.h
@@ -186,6 +238,169 @@ func (e *TableMapEvent) decodeMeta(data []byte) error {
return nil
}
+func (e *TableMapEvent) decodeOptionalMeta(data []byte) (err error) {
+ pos := 0
+ for pos < len(data) {
+ // optional metadata fields are stored in Type, Length, Value(TLV) format
+ // Type takes 1 byte. Length is a packed integer value. Values takes Length bytes
+ t := data[pos]
+ pos++
+
+ l, _, n := LengthEncodedInt(data[pos:])
+ pos += n
+
+ v := data[pos : pos+int(l)]
+ pos += int(l)
+
+ switch t {
+ case TABLE_MAP_OPT_META_SIGNEDNESS:
+ e.SignednessBitmap = v
+
+ case TABLE_MAP_OPT_META_DEFAULT_CHARSET:
+ e.DefaultCharset, err = e.decodeDefaultCharset(v)
+ if err != nil {
+ return err
+ }
+
+ case TABLE_MAP_OPT_META_COLUMN_CHARSET:
+ e.ColumnCharset, err = e.decodeIntSeq(v)
+ if err != nil {
+ return err
+ }
+
+ case TABLE_MAP_OPT_META_COLUMN_NAME:
+ if err = e.decodeColumnNames(v); err != nil {
+ return err
+ }
+
+ case TABLE_MAP_OPT_META_SET_STR_VALUE:
+ e.SetStrValue, err = e.decodeStrValue(v)
+ if err != nil {
+ return err
+ }
+
+ case TABLE_MAP_OPT_META_ENUM_STR_VALUE:
+ e.EnumStrValue, err = e.decodeStrValue(v)
+ if err != nil {
+ return err
+ }
+
+ case TABLE_MAP_OPT_META_GEOMETRY_TYPE:
+ e.GeometryType, err = e.decodeIntSeq(v)
+ if err != nil {
+ return err
+ }
+
+ case TABLE_MAP_OPT_META_SIMPLE_PRIMARY_KEY:
+ if err = e.decodeSimplePrimaryKey(v); err != nil {
+ return err
+ }
+
+ case TABLE_MAP_OPT_META_PRIMARY_KEY_WITH_PREFIX:
+ if err = e.decodePrimaryKeyWithPrefix(v); err != nil {
+ return err
+ }
+
+ case TABLE_MAP_OPT_META_ENUM_AND_SET_DEFAULT_CHARSET:
+ e.EnumSetDefaultCharset, err = e.decodeDefaultCharset(v)
+ if err != nil {
+ return err
+ }
+
+ case TABLE_MAP_OPT_META_ENUM_AND_SET_COLUMN_CHARSET:
+ e.EnumSetColumnCharset, err = e.decodeIntSeq(v)
+ if err != nil {
+ return err
+ }
+
+ default:
+ // Ignore for future extension
+ }
+ }
+
+ return nil
+}
+
+func (e *TableMapEvent) decodeIntSeq(v []byte) (ret []uint64, err error) {
+ p := 0
+ for p < len(v) {
+ i, _, n := LengthEncodedInt(v[p:])
+ p += n
+ ret = append(ret, i)
+ }
+ return
+}
+
+func (e *TableMapEvent) decodeDefaultCharset(v []byte) (ret []uint64, err error) {
+ ret, err = e.decodeIntSeq(v)
+ if err != nil {
+ return
+ }
+ if len(ret)%2 != 1 {
+ return nil, errors.Errorf("Expect odd item in DefaultCharset but got %d", len(ret))
+ }
+ return
+}
+
+func (e *TableMapEvent) decodeColumnNames(v []byte) error {
+ p := 0
+ e.ColumnName = make([][]byte, 0, e.ColumnCount)
+ for p < len(v) {
+ n := int(v[p])
+ p++
+ e.ColumnName = append(e.ColumnName, v[p:p+n])
+ p += n
+ }
+
+ if len(e.ColumnName) != int(e.ColumnCount) {
+ return errors.Errorf("Expect %d column names but got %d", e.ColumnCount, len(e.ColumnName))
+ }
+ return nil
+}
+
+func (e *TableMapEvent) decodeStrValue(v []byte) (ret [][][]byte, err error) {
+ p := 0
+ for p < len(v) {
+ nVal, _, n := LengthEncodedInt(v[p:])
+ p += n
+ vals := make([][]byte, 0, int(nVal))
+ for i := 0; i < int(nVal); i++ {
+ val, _, n, err := LengthEncodedString(v[p:])
+ if err != nil {
+ return nil, err
+ }
+ p += n
+ vals = append(vals, val)
+ }
+ ret = append(ret, vals)
+ }
+ return
+}
+
+func (e *TableMapEvent) decodeSimplePrimaryKey(v []byte) error {
+ p := 0
+ for p < len(v) {
+ i, _, n := LengthEncodedInt(v[p:])
+ e.PrimaryKey = append(e.PrimaryKey, i)
+ e.PrimaryKeyPrefix = append(e.PrimaryKeyPrefix, 0)
+ p += n
+ }
+ return nil
+}
+
+func (e *TableMapEvent) decodePrimaryKeyWithPrefix(v []byte) error {
+ p := 0
+ for p < len(v) {
+ i, _, n := LengthEncodedInt(v[p:])
+ e.PrimaryKey = append(e.PrimaryKey, i)
+ p += n
+ i, _, n = LengthEncodedInt(v[p:])
+ e.PrimaryKeyPrefix = append(e.PrimaryKeyPrefix, i)
+ p += n
+ }
+ return nil
+}
+
func (e *TableMapEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "TableID: %d\n", e.TableID)
fmt.Fprintf(w, "TableID size: %d\n", e.tableIDSize)
@@ -195,9 +410,404 @@ func (e *TableMapEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Column count: %d\n", e.ColumnCount)
fmt.Fprintf(w, "Column type: \n%s", hex.Dump(e.ColumnType))
fmt.Fprintf(w, "NULL bitmap: \n%s", hex.Dump(e.NullBitmap))
+
+ fmt.Fprintf(w, "Signedness bitmap: \n%s", hex.Dump(e.SignednessBitmap))
+ fmt.Fprintf(w, "Default charset: %v\n", e.DefaultCharset)
+ fmt.Fprintf(w, "Column charset: %v\n", e.ColumnCharset)
+ fmt.Fprintf(w, "Set str value: %v\n", e.SetStrValueString())
+ fmt.Fprintf(w, "Enum str value: %v\n", e.EnumStrValueString())
+ fmt.Fprintf(w, "Column name: %v\n", e.ColumnNameString())
+ fmt.Fprintf(w, "Geometry type: %v\n", e.GeometryType)
+ fmt.Fprintf(w, "Primary key: %v\n", e.PrimaryKey)
+ fmt.Fprintf(w, "Primary key prefix: %v\n", e.PrimaryKeyPrefix)
+ fmt.Fprintf(w, "Enum/set default charset: %v\n", e.EnumSetDefaultCharset)
+ fmt.Fprintf(w, "Enum/set column charset: %v\n", e.EnumSetColumnCharset)
+
+ unsignedMap := e.UnsignedMap()
+ fmt.Fprintf(w, "UnsignedMap: %#v\n", unsignedMap)
+
+ collationMap := e.CollationMap()
+ fmt.Fprintf(w, "CollationMap: %#v\n", collationMap)
+
+ enumSetCollationMap := e.EnumSetCollationMap()
+ fmt.Fprintf(w, "EnumSetCollationMap: %#v\n", enumSetCollationMap)
+
+ enumStrValueMap := e.EnumStrValueMap()
+ fmt.Fprintf(w, "EnumStrValueMap: %#v\n", enumStrValueMap)
+
+ setStrValueMap := e.SetStrValueMap()
+ fmt.Fprintf(w, "SetStrValueMap: %#v\n", setStrValueMap)
+
+ geometryTypeMap := e.GeometryTypeMap()
+ fmt.Fprintf(w, "GeometryTypeMap: %#v\n", geometryTypeMap)
+
+ nameMaxLen := 0
+ for _, name := range e.ColumnName {
+ if len(name) > nameMaxLen {
+ nameMaxLen = len(name)
+ }
+ }
+ nameFmt := " %s"
+ if nameMaxLen > 0 {
+ nameFmt = fmt.Sprintf(" %%-%ds", nameMaxLen)
+ }
+
+ primaryKey := map[int]struct{}{}
+ for _, pk := range e.PrimaryKey {
+ primaryKey[int(pk)] = struct{}{}
+ }
+
+ fmt.Fprintf(w, "Columns: \n")
+ for i := 0; i < int(e.ColumnCount); i++ {
+ if len(e.ColumnName) == 0 {
+ fmt.Fprintf(w, nameFmt, "")
+ } else {
+ fmt.Fprintf(w, nameFmt, e.ColumnName[i])
+ }
+
+ fmt.Fprintf(w, " type=%-3d", e.realType(i))
+
+ if e.IsNumericColumn(i) {
+ if len(unsignedMap) == 0 {
+ fmt.Fprintf(w, " unsigned=")
+ } else if unsignedMap[i] {
+ fmt.Fprintf(w, " unsigned=yes")
+ } else {
+ fmt.Fprintf(w, " unsigned=no ")
+ }
+ }
+ if e.IsCharacterColumn(i) {
+ if len(collationMap) == 0 {
+ fmt.Fprintf(w, " collation=")
+ } else {
+ fmt.Fprintf(w, " collation=%d ", collationMap[i])
+ }
+ }
+ if e.IsEnumColumn(i) {
+ if len(enumSetCollationMap) == 0 {
+ fmt.Fprintf(w, " enum_collation=")
+ } else {
+ fmt.Fprintf(w, " enum_collation=%d", enumSetCollationMap[i])
+ }
+
+ if len(enumStrValueMap) == 0 {
+ fmt.Fprintf(w, " enum=")
+ } else {
+ fmt.Fprintf(w, " enum=%v", enumStrValueMap[i])
+ }
+ }
+ if e.IsSetColumn(i) {
+ if len(enumSetCollationMap) == 0 {
+ fmt.Fprintf(w, " set_collation=")
+ } else {
+ fmt.Fprintf(w, " set_collation=%d", enumSetCollationMap[i])
+ }
+
+ if len(setStrValueMap) == 0 {
+ fmt.Fprintf(w, " set=")
+ } else {
+ fmt.Fprintf(w, " set=%v", setStrValueMap[i])
+ }
+ }
+ if e.IsGeometryColumn(i) {
+ if len(geometryTypeMap) == 0 {
+ fmt.Fprintf(w, " geometry_type=")
+ } else {
+ fmt.Fprintf(w, " geometry_type=%v", geometryTypeMap[i])
+ }
+ }
+
+ available, nullable := e.Nullable(i)
+ if !available {
+ fmt.Fprintf(w, " null=")
+ } else if nullable {
+ fmt.Fprintf(w, " null=yes")
+ } else {
+ fmt.Fprintf(w, " null=no ")
+ }
+
+ if _, ok := primaryKey[i]; ok {
+ fmt.Fprintf(w, " pri")
+ }
+
+ fmt.Fprintf(w, "\n")
+ }
+
fmt.Fprintln(w)
}
+// Nullable returns the nullablity of the i-th column.
+// If null bits are not available, available is false.
+// i must be in range [0, ColumnCount).
+func (e *TableMapEvent) Nullable(i int) (available, nullable bool) {
+ if len(e.NullBitmap) == 0 {
+ return
+ }
+ return true, e.NullBitmap[i/8]&(1< unsigned.
+// Note that only numeric columns will be returned.
+// nil is returned if not available or no numeric columns at all.
+func (e *TableMapEvent) UnsignedMap() map[int]bool {
+ if len(e.SignednessBitmap) == 0 {
+ return nil
+ }
+ p := 0
+ ret := make(map[int]bool)
+ for i := 0; i < int(e.ColumnCount); i++ {
+ if !e.IsNumericColumn(i) {
+ continue
+ }
+ ret[i] = e.SignednessBitmap[p/8]&(1< collation id.
+// Note that only character columns will be returned.
+// nil is returned if not available or no character columns at all.
+func (e *TableMapEvent) CollationMap() map[int]uint64 {
+ return e.collationMap(e.IsCharacterColumn, e.DefaultCharset, e.ColumnCharset)
+}
+
+// EnumSetCollationMap returns a map: column index -> collation id.
+// Note that only enum or set columns will be returned.
+// nil is returned if not available or no enum/set columns at all.
+func (e *TableMapEvent) EnumSetCollationMap() map[int]uint64 {
+ return e.collationMap(e.IsEnumOrSetColumn, e.EnumSetDefaultCharset, e.EnumSetColumnCharset)
+}
+
+func (e *TableMapEvent) collationMap(includeType func(int) bool, defaultCharset, columnCharset []uint64) map[int]uint64 {
+ if len(defaultCharset) != 0 {
+ defaultCollation := defaultCharset[0]
+
+ // character column index -> collation
+ collations := make(map[int]uint64)
+ for i := 1; i < len(defaultCharset); i += 2 {
+ collations[int(defaultCharset[i])] = defaultCharset[i+1]
+ }
+
+ p := 0
+ ret := make(map[int]uint64)
+ for i := 0; i < int(e.ColumnCount); i++ {
+ if !includeType(i) {
+ continue
+ }
+
+ if collation, ok := collations[p]; ok {
+ ret[i] = collation
+ } else {
+ ret[i] = defaultCollation
+ }
+ p++
+ }
+
+ return ret
+ }
+
+ if len(columnCharset) != 0 {
+ p := 0
+ ret := make(map[int]uint64)
+ for i := 0; i < int(e.ColumnCount); i++ {
+ if !includeType(i) {
+ continue
+ }
+
+ ret[i] = columnCharset[p]
+ p++
+ }
+
+ return ret
+ }
+
+ return nil
+}
+
+// EnumStrValueMap returns a map: column index -> enum string value.
+// Note that only enum columns will be returned.
+// nil is returned if not available or no enum columns at all.
+func (e *TableMapEvent) EnumStrValueMap() map[int][]string {
+ return e.strValueMap(e.IsEnumColumn, e.EnumStrValueString())
+}
+
+// SetStrValueMap returns a map: column index -> set string value.
+// Note that only set columns will be returned.
+// nil is returned if not available or no set columns at all.
+func (e *TableMapEvent) SetStrValueMap() map[int][]string {
+ return e.strValueMap(e.IsSetColumn, e.SetStrValueString())
+}
+
+func (e *TableMapEvent) strValueMap(includeType func(int) bool, strValue [][]string) map[int][]string {
+ if len(strValue) == 0 {
+ return nil
+ }
+ p := 0
+ ret := make(map[int][]string)
+ for i := 0; i < int(e.ColumnCount); i++ {
+ if !includeType(i) {
+ continue
+ }
+ ret[i] = strValue[p]
+ p++
+ }
+ return ret
+}
+
+// GeometryTypeMap returns a map: column index -> geometry type.
+// Note that only geometry columns will be returned.
+// nil is returned if not available or no geometry columns at all.
+func (e *TableMapEvent) GeometryTypeMap() map[int]uint64 {
+ if len(e.GeometryType) == 0 {
+ return nil
+ }
+ p := 0
+ ret := make(map[int]uint64)
+ for i := 0; i < int(e.ColumnCount); i++ {
+ if !e.IsGeometryColumn(i) {
+ continue
+ }
+
+ ret[i] = e.GeometryType[p]
+ p++
+ }
+ return ret
+}
+
+// Below realType and IsXXXColumn are base from:
+// table_def::type in sql/rpl_utility.h
+// Table_map_log_event::print_columns in mysql-8.0/sql/log_event.cc and mariadb-10.5/sql/log_event_client.cc
+
+func (e *TableMapEvent) realType(i int) byte {
+ typ := e.ColumnType[i]
+
+ switch typ {
+ case MYSQL_TYPE_STRING:
+ rtyp := byte(e.ColumnMeta[i] >> 8)
+ if rtyp == MYSQL_TYPE_ENUM || rtyp == MYSQL_TYPE_SET {
+ return rtyp
+ }
+
+ case MYSQL_TYPE_DATE:
+ return MYSQL_TYPE_NEWDATE
+ }
+
+ return typ
+}
+
+func (e *TableMapEvent) IsNumericColumn(i int) bool {
+ switch e.realType(i) {
+ case MYSQL_TYPE_TINY,
+ MYSQL_TYPE_SHORT,
+ MYSQL_TYPE_INT24,
+ MYSQL_TYPE_LONG,
+ MYSQL_TYPE_LONGLONG,
+ MYSQL_TYPE_NEWDECIMAL,
+ MYSQL_TYPE_FLOAT,
+ MYSQL_TYPE_DOUBLE:
+ return true
+
+ default:
+ return false
+ }
+}
+
+// IsCharacterColumn returns true if the column type is considered as character type.
+// Note that JSON/GEOMETRY types are treated as character type in mariadb.
+// (JSON is an alias for LONGTEXT in mariadb: https://mariadb.com/kb/en/json-data-type/)
+func (e *TableMapEvent) IsCharacterColumn(i int) bool {
+ switch e.realType(i) {
+ case MYSQL_TYPE_STRING,
+ MYSQL_TYPE_VAR_STRING,
+ MYSQL_TYPE_VARCHAR,
+ MYSQL_TYPE_BLOB:
+ return true
+
+ case MYSQL_TYPE_GEOMETRY:
+ if e.flavor == "mariadb" {
+ return true
+ }
+ return false
+
+ default:
+ return false
+ }
+}
+
+func (e *TableMapEvent) IsEnumColumn(i int) bool {
+ return e.realType(i) == MYSQL_TYPE_ENUM
+}
+
+func (e *TableMapEvent) IsSetColumn(i int) bool {
+ return e.realType(i) == MYSQL_TYPE_SET
+}
+
+func (e *TableMapEvent) IsGeometryColumn(i int) bool {
+ return e.realType(i) == MYSQL_TYPE_GEOMETRY
+}
+
+func (e *TableMapEvent) IsEnumOrSetColumn(i int) bool {
+ rtyp := e.realType(i)
+ return rtyp == MYSQL_TYPE_ENUM || rtyp == MYSQL_TYPE_SET
+}
+
// RowsEventStmtEndFlag is set in the end of the statement.
const RowsEventStmtEndFlag = 0x01
@@ -228,14 +838,16 @@ type RowsEvent struct {
ColumnBitmap2 []byte
//rows: invalid: int64, float64, bool, []byte, string
- Rows [][]interface{}
+ Rows [][]interface{}
+ SkippedColumns [][]int
parseTime bool
timestampStringLocation *time.Location
useDecimal bool
+ ignoreJSONDecodeErr bool
}
-func (e *RowsEvent) Decode(data []byte) error {
+func (e *RowsEvent) Decode(data []byte) (err2 error) {
pos := 0
e.TableID = FixedLengthInt(data[0:e.tableIDSize])
pos += e.tableIDSize
@@ -279,10 +891,20 @@ func (e *RowsEvent) Decode(data []byte) error {
// ... repeat rows until event-end
defer func() {
if r := recover(); r != nil {
- log.Fatalf("parse rows event panic %v, data %q, parsed rows %#v, table map %#v\n%s", r, data, e, e.Table, Pstack())
+ errStr := fmt.Sprintf("parse rows event panic %v, data %q, parsed rows %#v, table map %#v", r, data, e, e.Table)
+ log.Errorf("%s\n%s", errStr, Pstack())
+ err2 = errors.Trace(errors.New(errStr))
}
}()
+ // Pre-allocate memory for rows.
+ rowsLen := e.ColumnCount
+ if e.needBitmap2 {
+ rowsLen += e.ColumnCount
+ }
+ e.SkippedColumns = make([][]int, 0, rowsLen)
+ e.Rows = make([][]interface{}, 0, rowsLen)
+
for pos < len(data) {
if n, err = e.decodeRows(data[pos:], e.Table, e.ColumnBitmap1); err != nil {
return errors.Trace(err)
@@ -306,6 +928,7 @@ func isBitSet(bitmap []byte, i int) bool {
func (e *RowsEvent) decodeRows(data []byte, table *TableMapEvent, bitmap []byte) (int, error) {
row := make([]interface{}, e.ColumnCount)
+ skips := make([]int, 0)
pos := 0
@@ -327,6 +950,7 @@ func (e *RowsEvent) decodeRows(data []byte, table *TableMapEvent, bitmap []byte)
var err error
for i := 0; i < int(e.ColumnCount); i++ {
if !isBitSet(bitmap, i) {
+ skips = append(skips, i)
continue
}
@@ -347,6 +971,7 @@ func (e *RowsEvent) decodeRows(data []byte, table *TableMapEvent, bitmap []byte)
}
e.Rows = append(e.Rows, row)
+ e.SkippedColumns = append(e.SkippedColumns, skips)
return pos, nil
}
@@ -376,7 +1001,7 @@ func (e *RowsEvent) decodeValue(data []byte, tp byte, meta uint16) (v interface{
if b0&0x30 != 0x30 {
length = int(uint16(b1) | (uint16((b0&0x30)^0x30) << 4))
- tp = byte(b0 | 0x30)
+ tp = b0 | 0x30
} else {
length = int(meta & 0xFF)
tp = b0
@@ -419,31 +1044,44 @@ func (e *RowsEvent) decodeValue(data []byte, tp byte, meta uint16) (v interface{
n = int(nbits+7) / 8
//use int64 for bit
- v, err = decodeBit(data, int(nbits), int(n))
+ v, err = decodeBit(data, int(nbits), n)
case MYSQL_TYPE_TIMESTAMP:
n = 4
t := binary.LittleEndian.Uint32(data)
- v = time.Unix(int64(t), 0)
+ if t == 0 {
+ v = formatZeroTime(0, 0)
+ } else {
+ v = e.parseFracTime(fracTime{
+ Time: time.Unix(int64(t), 0),
+ Dec: 0,
+ timestampStringLocation: e.timestampStringLocation,
+ })
+ }
case MYSQL_TYPE_TIMESTAMP2:
v, n, err = decodeTimestamp2(data, meta, e.timestampStringLocation)
- //v = e.parseFracTime(v)
+ v = e.parseFracTime(v)
case MYSQL_TYPE_DATETIME:
n = 8
i64 := binary.LittleEndian.Uint64(data)
-
- if i64 == 0 { // commented by Shlomi Noach. Yes I know about `git blame`
- return "0000-00-00 00:00:00", n, nil
+ if i64 == 0 {
+ v = formatZeroTime(0, 0)
+ } else {
+ d := i64 / 1000000
+ t := i64 % 1000000
+ v = e.parseFracTime(fracTime{
+ Time: time.Date(
+ int(d/10000),
+ time.Month((d%10000)/100),
+ int(d%100),
+ int(t/10000),
+ int((t%10000)/100),
+ int(t%100),
+ 0,
+ time.UTC,
+ ),
+ Dec: 0,
+ })
}
- d := i64 / 1000000
- t := i64 % 1000000
- v = time.Date(int(d/10000),
- time.Month((d%10000)/100),
- int(d%100),
- int(t/10000),
- int((t%10000)/100),
- int(t%100),
- 0,
- time.UTC).Format(TimeFormat)
case MYSQL_TYPE_DATETIME2:
v, n, err = decodeDatetime2(data, meta)
v = e.parseFracTime(v)
@@ -453,11 +1091,7 @@ func (e *RowsEvent) decodeValue(data []byte, tp byte, meta uint16) (v interface{
if i32 == 0 {
v = "00:00:00"
} else {
- sign := ""
- if i32 < 0 {
- sign = "-"
- }
- v = fmt.Sprintf("%s%02d:%02d:%02d", sign, i32/10000, (i32%10000)/100, i32%100)
+ v = fmt.Sprintf("%02d:%02d:%02d", i32/10000, (i32%10000)/100, i32%100)
}
case MYSQL_TYPE_TIME2:
v, n, err = decodeTime2(data, meta)
@@ -472,7 +1106,12 @@ func (e *RowsEvent) decodeValue(data []byte, tp byte, meta uint16) (v interface{
case MYSQL_TYPE_YEAR:
n = 1
- v = int(data[0]) + 1900
+ year := int(data[0])
+ if year == 0 {
+ v = year
+ } else {
+ v = year + 1900
+ }
case MYSQL_TYPE_ENUM:
l := meta & 0xFF
switch l {
@@ -514,14 +1153,15 @@ func (e *RowsEvent) decodeValue(data []byte, tp byte, meta uint16) (v interface{
default:
err = fmt.Errorf("unsupport type %d in binlog and don't know how to handle", tp)
}
- return
+
+ return v, n, err
}
func decodeString(data []byte, length int) (v string, n int) {
if length < 256 {
length = int(data[0])
- n = int(length) + 1
+ n = length + 1
v = hack.String(data[1:n])
} else {
length = int(binary.LittleEndian.Uint16(data[0:]))
@@ -548,9 +1188,9 @@ func decodeDecimalDecompressValue(compIndx int, data []byte, mask uint8) (size i
func decodeDecimal(data []byte, precision int, decimals int, useDecimal bool) (interface{}, int, error) {
//see python mysql replication and https://github.com/jeremycole/mysql_binlog
- integral := (precision - decimals)
- uncompIntegral := int(integral / digitsPerInteger)
- uncompFractional := int(decimals / digitsPerInteger)
+ integral := precision - decimals
+ uncompIntegral := integral / digitsPerInteger
+ uncompFractional := decimals / digitsPerInteger
compIntegral := integral - (uncompIntegral * digitsPerInteger)
compFractional := decimals - (uncompFractional * digitsPerInteger)
@@ -690,8 +1330,11 @@ func decodeTimestamp2(data []byte, dec uint16, timestampStringLocation *time.Loc
return formatZeroTime(int(usec), int(dec)), n, nil
}
- t := time.Unix(sec, usec*1000)
- return t, n, nil
+ return fracTime{
+ Time: time.Unix(sec, usec*1000),
+ Dec: int(dec),
+ timestampStringLocation: timestampStringLocation,
+ }, n, nil
}
const DATETIMEF_INT_OFS int64 = 0x8000000000
@@ -737,14 +1380,23 @@ func decodeDatetime2(data []byte, dec uint16) (interface{}, int, error) {
minute := int((hms >> 6) % (1 << 6))
hour := int((hms >> 12))
- if frac != 0 {
- return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d.%06d", year, month, day, hour, minute, second, frac), n, nil // commented by Shlomi Noach. Yes I know about `git blame`
+ // DATETIME encoding for nonfractional part after MySQL 5.6.4
+ // https://dev.mysql.com/doc/internals/en/date-and-time-data-type-representation.html
+ // integer value for 1970-01-01 00:00:00 is
+ // year*13+month = 25611 = 0b110010000001011
+ // day = 1 = 0b00001
+ // hour = 0 = 0b00000
+ // minute = 0 = 0b000000
+ // second = 0 = 0b000000
+ // integer value = 0b1100100000010110000100000000000000000 = 107420450816
+ if intPart < 107420450816 {
+ return formatBeforeUnixZeroTime(year, month, day, hour, minute, second, int(frac), int(dec)), n, nil
}
- return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second), n, nil // commented by Shlomi Noach. Yes I know about `git blame`
- // return fracTime{
- // Time: time.Date(year, time.Month(month), day, hour, minute, second, int(frac*1000), time.UTC),
- // Dec: int(dec),
- // }, n, nil
+
+ return fracTime{
+ Time: time.Date(year, time.Month(month), day, hour, minute, second, int(frac*1000), time.UTC),
+ Dec: int(dec),
+ }, n, nil
}
const TIMEF_OFS int64 = 0x800000000000
diff --git a/vendor/github.com/siddontang/go-mysql/replication/time.go b/vendor/github.com/go-mysql-org/go-mysql/replication/time.go
similarity index 61%
rename from vendor/github.com/siddontang/go-mysql/replication/time.go
rename to vendor/github.com/go-mysql-org/go-mysql/replication/time.go
index bd27c4e..2adc832 100644
--- a/vendor/github.com/siddontang/go-mysql/replication/time.go
+++ b/vendor/github.com/go-mysql-org/go-mysql/replication/time.go
@@ -39,6 +39,24 @@ func formatZeroTime(frac int, dec int) string {
return s[0 : len(s)-(6-dec)]
}
+func formatBeforeUnixZeroTime(year, month, day, hour, minute, second, frac, dec int) string {
+ if dec == 0 {
+ return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second)
+ }
+
+ s := fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d.%06d", year, month, day, hour, minute, second, frac)
+
+ // dec must < 6, if frac is 924000, but dec is 3, we must output 924 here.
+ return s[0 : len(s)-(6-dec)]
+}
+
+func microSecTimestampToTime(ts uint64) time.Time {
+ if ts == 0 {
+ return time.Time{}
+ }
+ return time.Unix(int64(ts/1000000), int64(ts%1000000)*1000)
+}
+
func init() {
fracTimeFormat = make([]string, 7)
fracTimeFormat[0] = "2006-01-02 15:04:05"
diff --git a/vendor/github.com/go-mysql-org/go-mysql/utils/byte_slice_pool.go b/vendor/github.com/go-mysql-org/go-mysql/utils/byte_slice_pool.go
new file mode 100644
index 0000000..cd51544
--- /dev/null
+++ b/vendor/github.com/go-mysql-org/go-mysql/utils/byte_slice_pool.go
@@ -0,0 +1,36 @@
+package utils
+
+import "sync"
+
+var (
+ byteSlicePool = sync.Pool{
+ New: func() interface{} {
+ return []byte{}
+ },
+ }
+ byteSliceChan = make(chan []byte, 10)
+)
+
+func ByteSliceGet(length int) (data []byte) {
+ select {
+ case data = <-byteSliceChan:
+ default:
+ data = byteSlicePool.Get().([]byte)[:0]
+ }
+
+ if cap(data) < length {
+ data = make([]byte, length)
+ } else {
+ data = data[:length]
+ }
+
+ return data
+}
+
+func ByteSlicePut(data []byte) {
+ select {
+ case byteSliceChan <- data:
+ default:
+ byteSlicePool.Put(data) //nolint:staticcheck
+ }
+}
diff --git a/vendor/github.com/go-mysql-org/go-mysql/utils/bytes_buffer_pool.go b/vendor/github.com/go-mysql-org/go-mysql/utils/bytes_buffer_pool.go
new file mode 100644
index 0000000..a1ca870
--- /dev/null
+++ b/vendor/github.com/go-mysql-org/go-mysql/utils/bytes_buffer_pool.go
@@ -0,0 +1,35 @@
+package utils
+
+import (
+ "bytes"
+ "sync"
+)
+
+var (
+ bytesBufferPool = sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+ }
+ bytesBufferChan = make(chan *bytes.Buffer, 10)
+)
+
+func BytesBufferGet() (data *bytes.Buffer) {
+ select {
+ case data = <-bytesBufferChan:
+ default:
+ data = bytesBufferPool.Get().(*bytes.Buffer)
+ }
+
+ data.Reset()
+
+ return data
+}
+
+func BytesBufferPut(data *bytes.Buffer) {
+ select {
+ case bytesBufferChan <- data:
+ default:
+ bytesBufferPool.Put(data)
+ }
+}
diff --git a/vendor/github.com/go-mysql-org/go-mysql/utils/zeroalloc.go b/vendor/github.com/go-mysql-org/go-mysql/utils/zeroalloc.go
new file mode 100644
index 0000000..ca3798c
--- /dev/null
+++ b/vendor/github.com/go-mysql-org/go-mysql/utils/zeroalloc.go
@@ -0,0 +1,27 @@
+package utils
+
+import "unsafe"
+
+func StringToByteSlice(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(&s))
+}
+
+func ByteSliceToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func Uint64ToInt64(val uint64) int64 {
+ return *(*int64)(unsafe.Pointer(&val))
+}
+
+func Uint64ToFloat64(val uint64) float64 {
+ return *(*float64)(unsafe.Pointer(&val))
+}
+
+func Int64ToUint64(val int64) uint64 {
+ return *(*uint64)(unsafe.Pointer(&val))
+}
+
+func Float64ToUint64(val float64) uint64 {
+ return *(*uint64)(unsafe.Pointer(&val))
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/.github/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/.github/CONTRIBUTING.md
deleted file mode 100644
index 8fe16bc..0000000
--- a/vendor/github.com/go-sql-driver/mysql/.github/CONTRIBUTING.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Contributing Guidelines
-
-## Reporting Issues
-
-Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
-
-## Contributing Code
-
-By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
-Don't forget to add yourself to the AUTHORS file.
-
-### Code Review
-
-Everyone is invited to review and comment on pull requests.
-If it looks fine to you, comment with "LGTM" (Looks good to me).
-
-If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
-
-Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
-
-## Development Ideas
-
-If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
diff --git a/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index d9771f1..0000000
--- a/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,21 +0,0 @@
-### Issue description
-Tell us what should happen and what happens instead
-
-### Example code
-```go
-If possible, please enter some example code here to reproduce the issue.
-```
-
-### Error log
-```
-If you have an error log, please paste it here.
-```
-
-### Configuration
-*Driver version (or git SHA):*
-
-*Go version:* run `go version` in your console
-
-*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
-
-*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10
diff --git a/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 6f5c7eb..0000000
--- a/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-### Description
-Please explain the changes you made here.
-
-### Checklist
-- [ ] Code compiles correctly
-- [ ] Created tests which fail without the change (if possible)
-- [ ] All tests passing
-- [ ] Extended the README / documentation, if necessary
-- [ ] Added myself / the copyright holder to the AUTHORS file
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml
deleted file mode 100644
index 56fcf25..0000000
--- a/vendor/github.com/go-sql-driver/mysql/.travis.yml
+++ /dev/null
@@ -1,129 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.10.x
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - master
-
-before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
-
-before_script:
- - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
- - sudo service mysql restart
- - .travis/wait_mysql.sh
- - mysql -e 'create database gotest;'
-
-matrix:
- include:
- - env: DB=MYSQL8
- sudo: required
- dist: trusty
- go: 1.10.x
- services:
- - docker
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- - docker pull mysql:8.0
- - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
- mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- - cp .travis/docker.cnf ~/.my.cnf
- - .travis/wait_mysql.sh
- before_script:
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3307
- - export MYSQL_TEST_CONCURRENT=1
-
- - env: DB=MYSQL57
- sudo: required
- dist: trusty
- go: 1.10.x
- services:
- - docker
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- - docker pull mysql:5.7
- - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
- mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- - cp .travis/docker.cnf ~/.my.cnf
- - .travis/wait_mysql.sh
- before_script:
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3307
- - export MYSQL_TEST_CONCURRENT=1
-
- - env: DB=MARIA55
- sudo: required
- dist: trusty
- go: 1.10.x
- services:
- - docker
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- - docker pull mariadb:5.5
- - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
- mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- - cp .travis/docker.cnf ~/.my.cnf
- - .travis/wait_mysql.sh
- before_script:
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3307
- - export MYSQL_TEST_CONCURRENT=1
-
- - env: DB=MARIA10_1
- sudo: required
- dist: trusty
- go: 1.10.x
- services:
- - docker
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- - docker pull mariadb:10.1
- - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
- mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- - cp .travis/docker.cnf ~/.my.cnf
- - .travis/wait_mysql.sh
- before_script:
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3307
- - export MYSQL_TEST_CONCURRENT=1
-
- - os: osx
- osx_image: xcode10.1
- addons:
- homebrew:
- packages:
- - mysql
- update: true
- go: 1.12.x
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- before_script:
- - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB\nlocal_infile=1" >> /usr/local/etc/my.cnf
- - mysql.server start
- - mysql -uroot -e 'CREATE USER gotest IDENTIFIED BY "secret"'
- - mysql -uroot -e 'GRANT ALL ON *.* TO gotest'
- - mysql -uroot -e 'create database gotest;'
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3306
- - export MYSQL_TEST_CONCURRENT=1
-
-script:
- - go test -v -covermode=count -coverprofile=coverage.out
- - go vet ./...
- - .travis/gofmt.sh
-after_script:
- - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf b/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
deleted file mode 100644
index e57754e..0000000
--- a/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
+++ /dev/null
@@ -1,5 +0,0 @@
-[client]
-user = gotest
-password = secret
-host = 127.0.0.1
-port = 3307
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh b/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
deleted file mode 100755
index 9bf0d16..0000000
--- a/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-set -ev
-
-# Only check for go1.10+ since the gofmt style changed
-if [[ $(go version) =~ go1\.([0-9]+) ]] && ((${BASH_REMATCH[1]} >= 10)); then
- test -z "$(gofmt -d -s . | tee /dev/stderr)"
-fi
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh b/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
deleted file mode 100755
index e87993e..0000000
--- a/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-while :
-do
- if mysql -e 'select version()' 2>&1 | grep 'version()\|ERROR 2059 (HY000):'; then
- break
- fi
- sleep 3
-done
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index 0896ba1..50afa2c 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -16,9 +16,12 @@ Achille Roussel
Alex Snast
Alexey Palazhchenko
Andrew Reid
+Animesh Ray
Arne Hormann
+Ariel Mashraki
Asta Xie
Bulat Gaifullin
+Caine Jette
Carlos Nieto
Chris Moos
Craig Wilson
@@ -53,6 +56,7 @@ Julien Schmidt
Justin Li
Justin Nuß
Kamil Dziedzic
+Kei Kamikawa
Kevin Malachowski
Kieron Woodhouse
Lennart Rudolph
@@ -75,20 +79,26 @@ Reed Allman
Richard Wilkes
Robert Russell
Runrioter Wung
+Sho Iizuka
+Sho Ikeda
Shuode Li
Simon J Mudd
Soroush Pour
Stan Putrya
Stanley Gunawan
Steven Hartland
+Tan Jinhua <312841925 at qq.com>
Thomas Wodarek
Tim Ruffles
Tom Jenkinson
Vladimir Kovpak
+Vladyslav Zhelezniak
Xiangyu Hu
Xiaobing Jiang
Xiuming Chen
+Xuehong Chan
Zhenye Xie
+Zhixin Wen
# Organizations
@@ -104,3 +114,4 @@ Multiplay Ltd.
Percona LLC
Pivotal Inc.
Stripe Inc.
+Zendesk Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
index 9cb97b3..72a738e 100644
--- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -1,3 +1,29 @@
+## Version 1.6 (2021-04-01)
+
+Changes:
+
+ - Migrate the CI service from travis-ci to GitHub Actions (#1176, #1183, #1190)
+ - `NullTime` is deprecated (#960, #1144)
+ - Reduce allocations when building SET command (#1111)
+ - Performance improvement for time formatting (#1118)
+ - Performance improvement for time parsing (#1098, #1113)
+
+New Features:
+
+ - Implement `driver.Validator` interface (#1106, #1174)
+ - Support returning `uint64` from `Valuer` in `ConvertValue` (#1143)
+ - Add `json.RawMessage` for converter and prepared statement (#1059)
+ - Interpolate `json.RawMessage` as `string` (#1058)
+ - Implements `CheckNamedValue` (#1090)
+
+Bugfixes:
+
+ - Stop rounding times (#1121, #1172)
+ - Put zero filler into the SSL handshake packet (#1066)
+ - Fix checking cancelled connections back into the connection pool (#1095)
+ - Fix remove last 0 byte for mysql_old_password when password is empty (#1133)
+
+
## Version 1.5 (2020-01-07)
Changes:
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index d2627a4..0b13154 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -35,7 +35,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* Supports queries larger than 16MB
* Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
* Intelligent `LONG DATA` handling in prepared statements
- * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
+ * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
@@ -56,15 +56,37 @@ Make sure [Git is installed](https://git-scm.com/downloads) on your machine and
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
+
```go
-import "database/sql"
-import _ "github.com/go-sql-driver/mysql"
+import (
+ "database/sql"
+ "time"
+
+ _ "github.com/go-sql-driver/mysql"
+)
+
+// ...
db, err := sql.Open("mysql", "user:password@/dbname")
+if err != nil {
+ panic(err)
+}
+// See "Important settings" section.
+db.SetConnMaxLifetime(time.Minute * 3)
+db.SetMaxOpenConns(10)
+db.SetMaxIdleConns(10)
```
[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
+### Important settings
+
+`db.SetConnMaxLifetime()` is required to ensure connections are closed by the driver safely before connection is closed by MySQL server, OS, or other middlewares. Since some middlewares close idle connections by 5 minutes, we recommend timeout shorter than 5 minutes. This setting helps load balancing and changing system variables too.
+
+`db.SetMaxOpenConns()` is highly recommended to limit the number of connection used by the application. There is no recommended limit number because it depends on application and MySQL server.
+
+`db.SetMaxIdleConns()` is recommended to be set same to (or greater than) `db.SetMaxOpenConns()`. When it is smaller than `SetMaxOpenConns()`, connections can be opened and closed very frequently than you expect. Idle connections can be closed by the `db.SetConnMaxLifetime()`. If you want to close idle connections more rapidly, you can use `db.SetConnMaxIdleTime()` since Go 1.15.
+
### DSN (Data Source Name)
@@ -122,7 +144,7 @@ Valid Values: true, false
Default: false
```
-`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
+`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
##### `allowCleartextPasswords`
@@ -133,7 +155,7 @@ Valid Values: true, false
Default: false
```
-`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](https://dev.mysql.com/doc/en/cleartext-pluggable-authentication.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
##### `allowNativePasswords`
@@ -230,7 +252,7 @@ Default: false
If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
-*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are rejected as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
##### `loc`
@@ -376,7 +398,7 @@ Rules:
Examples:
* `autocommit=1`: `SET autocommit=1`
* [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
- * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
+ * [`transaction_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation): `SET transaction_isolation='REPEATABLE-READ'`
#### Examples
@@ -445,7 +467,7 @@ For this feature you need direct access to the package. Therefore you must chang
import "github.com/go-sql-driver/mysql"
```
-Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
@@ -459,8 +481,6 @@ However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` v
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
-Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
-
### Unicode support
Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
@@ -477,7 +497,7 @@ To run the driver tests you may need to adjust the configuration. See the [Testi
Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
-See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/.github/CONTRIBUTING.md) for details.
---------------------------------------
@@ -498,4 +518,3 @@ Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
-
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
index fec7040..b2f19e8 100644
--- a/vendor/github.com/go-sql-driver/mysql/auth.go
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -15,6 +15,7 @@ import (
"crypto/sha256"
"crypto/x509"
"encoding/pem"
+ "fmt"
"sync"
)
@@ -136,10 +137,6 @@ func pwHash(password []byte) (result [2]uint32) {
// Hash password using insecure pre 4.1 method
func scrambleOldPassword(scramble []byte, password string) []byte {
- if len(password) == 0 {
- return nil
- }
-
scramble = scramble[:8]
hashPw := pwHash([]byte(password))
@@ -247,6 +244,9 @@ func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
if !mc.cfg.AllowOldPasswords {
return nil, ErrOldPassword
}
+ if len(mc.cfg.Passwd) == 0 {
+ return nil, nil
+ }
// Note: there are edge cases where this should work but doesn't;
// this is currently "wontfix":
// https://github.com/go-sql-driver/mysql/issues/184
@@ -372,7 +372,10 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
return err
}
- block, _ := pem.Decode(data[1:])
+ block, rest := pem.Decode(data[1:])
+ if block == nil {
+ return fmt.Errorf("No Pem data found, data: %s", rest)
+ }
pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return err
diff --git a/vendor/github.com/go-sql-driver/mysql/auth_test.go b/vendor/github.com/go-sql-driver/mysql/auth_test.go
deleted file mode 100644
index 1920ef3..0000000
--- a/vendor/github.com/go-sql-driver/mysql/auth_test.go
+++ /dev/null
@@ -1,1330 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "crypto/rsa"
- "crypto/tls"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "testing"
-)
-
-var testPubKey = []byte("-----BEGIN PUBLIC KEY-----\n" +
- "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAol0Z8G8U+25Btxk/g/fm\n" +
- "UAW/wEKjQCTjkibDE4B+qkuWeiumg6miIRhtilU6m9BFmLQSy1ltYQuu4k17A4tQ\n" +
- "rIPpOQYZges/qsDFkZh3wyK5jL5WEFVdOasf6wsfszExnPmcZS4axxoYJfiuilrN\n" +
- "hnwinBAqfi3S0sw5MpSI4Zl1AbOrHG4zDI62Gti2PKiMGyYDZTS9xPrBLbN95Kby\n" +
- "FFclQLEzA9RJcS1nHFsWtRgHjGPhhjCQxEm9NQ1nePFhCfBfApyfH1VM2VCOQum6\n" +
- "Ci9bMuHWjTjckC84mzF99kOxOWVU7mwS6gnJqBzpuz8t3zq8/iQ2y7QrmZV+jTJP\n" +
- "WQIDAQAB\n" +
- "-----END PUBLIC KEY-----\n")
-
-var testPubKeyRSA *rsa.PublicKey
-
-func init() {
- block, _ := pem.Decode(testPubKey)
- pub, err := x509.ParsePKIXPublicKey(block.Bytes)
- if err != nil {
- panic(err)
- }
- testPubKeyRSA = pub.(*rsa.PublicKey)
-}
-
-func TestScrambleOldPass(t *testing.T) {
- scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2}
- vectors := []struct {
- pass string
- out string
- }{
- {" pass", "47575c5a435b4251"},
- {"pass ", "47575c5a435b4251"},
- {"123\t456", "575c47505b5b5559"},
- {"C0mpl!ca ted#PASS123", "5d5d554849584a45"},
- }
- for _, tuple := range vectors {
- ours := scrambleOldPassword(scramble, tuple.pass)
- if tuple.out != fmt.Sprintf("%x", ours) {
- t.Errorf("Failed old password %q", tuple.pass)
- }
- }
-}
-
-func TestScrambleSHA256Pass(t *testing.T) {
- scramble := []byte{10, 47, 74, 111, 75, 73, 34, 48, 88, 76, 114, 74, 37, 13, 3, 80, 82, 2, 23, 21}
- vectors := []struct {
- pass string
- out string
- }{
- {"secret", "f490e76f66d9d86665ce54d98c78d0acfe2fb0b08b423da807144873d30b312c"},
- {"secret2", "abc3934a012cf342e876071c8ee202de51785b430258a7a0138bc79c4d800bc6"},
- }
- for _, tuple := range vectors {
- ours := scrambleSHA256Password(scramble, tuple.pass)
- if tuple.out != fmt.Sprintf("%x", ours) {
- t.Errorf("Failed SHA256 password %q", tuple.pass)
- }
- }
-}
-
-func TestAuthFastCachingSHA256PasswordCached(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{90, 105, 74, 126, 30, 48, 37, 56, 3, 23, 115, 127, 69,
- 22, 41, 84, 32, 123, 43, 118}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{102, 32, 5, 35, 143, 161, 140, 241, 171, 232, 56,
- 139, 43, 14, 107, 196, 249, 170, 147, 60, 220, 204, 120, 178, 214, 15,
- 184, 150, 26, 61, 57, 235}
- if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 2, 0, 0, 2, 1, 3, // Fast Auth Success
- 7, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastCachingSHA256PasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = ""
-
- authData := []byte{90, 105, 74, 126, 30, 48, 37, 56, 3, 23, 115, 127, 69,
- 22, 41, 84, 32, 123, 43, 118}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- if writtenAuthRespLen != 0 {
- t.Fatalf("unexpected written auth response (%d bytes): %v",
- writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastCachingSHA256PasswordFullRSA(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
- 49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
- 110, 40, 139, 124, 41}
- if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 2, 0, 0, 2, 1, 4, // Perform Full Authentication
- }
- conn.queuedReplies = [][]byte{
- // pub key response
- append([]byte{byte(1 + len(testPubKey)), 1, 0, 4, 1}, testPubKey...),
-
- // OK
- {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.HasPrefix(conn.written, []byte{1, 0, 0, 3, 2, 0, 1, 0, 5}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastCachingSHA256PasswordFullRSAWithKey(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
- mc.cfg.pubKey = testPubKeyRSA
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
- 49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
- 110, 40, 139, 124, 41}
- if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 2, 0, 0, 2, 1, 4, // Perform Full Authentication
- }
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastCachingSHA256PasswordFullSecure(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // Hack to make the caching_sha2_password plugin believe that the connection
- // is secure
- mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
- 49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
- 110, 40, 139, 124, 41}
- if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 2, 0, 0, 2, 1, 4, // Perform Full Authentication
- }
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.Equal(conn.written, []byte{7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastCleartextPasswordNotAllowed(t *testing.T) {
- _, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_clear_password"
-
- // Send Client Authentication Packet
- _, err := mc.auth(authData, plugin)
- if err != ErrCleartextPassword {
- t.Errorf("expected ErrCleartextPassword, got %v", err)
- }
-}
-
-func TestAuthFastCleartextPassword(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
- mc.cfg.AllowCleartextPasswords = true
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_clear_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{115, 101, 99, 114, 101, 116, 0}
- if writtenAuthRespLen != 7 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastCleartextPasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = ""
- mc.cfg.AllowCleartextPasswords = true
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_clear_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{0}
- if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastNativePasswordNotAllowed(t *testing.T) {
- _, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
- mc.cfg.AllowNativePasswords = false
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_native_password"
-
- // Send Client Authentication Packet
- _, err := mc.auth(authData, plugin)
- if err != ErrNativePassword {
- t.Errorf("expected ErrNativePassword, got %v", err)
- }
-}
-
-func TestAuthFastNativePassword(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_native_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{53, 177, 140, 159, 251, 189, 127, 53, 109, 252,
- 172, 50, 211, 192, 240, 164, 26, 48, 207, 45}
- if writtenAuthRespLen != 20 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastNativePasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = ""
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_native_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- if writtenAuthRespLen != 0 {
- t.Fatalf("unexpected written auth response (%d bytes): %v",
- writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastSHA256PasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = ""
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "sha256_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{0}
- if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response (pub key response)
- conn.data = append([]byte{byte(1 + len(testPubKey)), 1, 0, 2, 1}, testPubKey...)
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastSHA256PasswordRSA(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "sha256_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{1}
- if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response (pub key response)
- conn.data = append([]byte{byte(1 + len(testPubKey)), 1, 0, 2, 1}, testPubKey...)
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastSHA256PasswordRSAWithKey(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
- mc.cfg.pubKey = testPubKeyRSA
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "sha256_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // auth response (OK)
- conn.data = []byte{7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0}
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastSHA256PasswordSecure(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- // hack to make the caching_sha2_password plugin believe that the connection
- // is secure
- mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "sha256_password"
-
- // send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // unset TLS config to prevent the actual establishment of a TLS wrapper
- mc.cfg.tls = nil
-
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{115, 101, 99, 114, 101, 116, 0}
- if writtenAuthRespLen != 7 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response (OK)
- conn.data = []byte{7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0}
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.Equal(conn.written, []byte{}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordCached(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}, // OK
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{
- // 1. Packet: Hash
- 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
- 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
- 153, 9, 130,
- }
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{0, 0, 0, 3}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordFullRSA(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- conn.queuedReplies = [][]byte{
- // Perform Full Authentication
- {2, 0, 0, 4, 1, 4},
-
- // Pub Key Response
- append([]byte{byte(1 + len(testPubKey)), 1, 0, 6, 1}, testPubKey...),
-
- // OK
- {7, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 4
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Hash
- 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
- 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
- 153, 9, 130,
-
- // 2. Packet: Pub Key Request
- 1, 0, 0, 5, 2,
-
- // 3. Packet: Encrypted Password
- 0, 1, 0, 7, // [changing bytes]
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordFullRSAWithKey(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
- mc.cfg.pubKey = testPubKeyRSA
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- conn.queuedReplies = [][]byte{
- // Perform Full Authentication
- {2, 0, 0, 4, 1, 4},
-
- // OK
- {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Hash
- 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
- 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
- 153, 9, 130,
-
- // 2. Packet: Encrypted Password
- 0, 1, 0, 5, // [changing bytes]
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordFullSecure(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // Hack to make the caching_sha2_password plugin believe that the connection
- // is secure
- mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{
- {2, 0, 0, 4, 1, 4}, // Perform Full Authentication
- {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0}, // OK
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{
- // 1. Packet: Hash
- 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
- 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
- 153, 9, 130,
-
- // 2. Packet: Cleartext password
- 7, 0, 0, 5, 115, 101, 99, 114, 101, 116, 0,
- }
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCleartextPasswordNotAllowed(t *testing.T) {
- conn, mc := newRWMockConn(2)
-
- conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
- 101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
- conn.maxReads = 1
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
- err := mc.handleAuthResult(authData, plugin)
- if err != ErrCleartextPassword {
- t.Errorf("expected ErrCleartextPassword, got %v", err)
- }
-}
-
-func TestAuthSwitchCleartextPassword(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowCleartextPasswords = true
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
- 101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCleartextPasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowCleartextPasswords = true
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
- 101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{1, 0, 0, 3, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchNativePasswordNotAllowed(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowNativePasswords = false
-
- conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
- 116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
- 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
- 31, 0}
- conn.maxReads = 1
- authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
- 48, 31, 89, 39, 55, 31}
- plugin := "caching_sha2_password"
- err := mc.handleAuthResult(authData, plugin)
- if err != ErrNativePassword {
- t.Errorf("expected ErrNativePassword, got %v", err)
- }
-}
-
-func TestAuthSwitchNativePassword(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowNativePasswords = true
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
- 116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
- 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
- 31, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
- 48, 31, 89, 39, 55, 31}
- plugin := "caching_sha2_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{20, 0, 0, 3, 202, 41, 195, 164, 34, 226, 49, 103,
- 21, 211, 167, 199, 227, 116, 8, 48, 57, 71, 149, 146}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchNativePasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowNativePasswords = true
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
- 116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
- 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
- 31, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
- 48, 31, 89, 39, 55, 31}
- plugin := "caching_sha2_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{0, 0, 0, 3}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchOldPasswordNotAllowed(t *testing.T) {
- conn, mc := newRWMockConn(2)
-
- conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
- 100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
- 49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
- conn.maxReads = 1
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
- err := mc.handleAuthResult(authData, plugin)
- if err != ErrOldPassword {
- t.Errorf("expected ErrOldPassword, got %v", err)
- }
-}
-
-// Same to TestAuthSwitchOldPasswordNotAllowed, but use OldAuthSwitch request.
-func TestOldAuthSwitchNotAllowed(t *testing.T) {
- conn, mc := newRWMockConn(2)
-
- // OldAuthSwitch request
- conn.data = []byte{1, 0, 0, 2, 0xfe}
- conn.maxReads = 1
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
- err := mc.handleAuthResult(authData, plugin)
- if err != ErrOldPassword {
- t.Errorf("expected ErrOldPassword, got %v", err)
- }
-}
-
-func TestAuthSwitchOldPassword(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowOldPasswords = true
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
- 100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
- 49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{9, 0, 0, 3, 86, 83, 83, 79, 74, 78, 65, 66, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-// Same to TestAuthSwitchOldPassword, but use OldAuthSwitch request.
-func TestOldAuthSwitch(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowOldPasswords = true
- mc.cfg.Passwd = "secret"
-
- // OldAuthSwitch request
- conn.data = []byte{1, 0, 0, 2, 0xfe}
-
- // auth response
- conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{9, 0, 0, 3, 86, 83, 83, 79, 74, 78, 65, 66, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-func TestAuthSwitchOldPasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowOldPasswords = true
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
- 100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
- 49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{1, 0, 0, 3, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-// Same to TestAuthSwitchOldPasswordEmpty, but use OldAuthSwitch request.
-func TestOldAuthSwitchPasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowOldPasswords = true
- mc.cfg.Passwd = ""
-
- // OldAuthSwitch request.
- conn.data = []byte{1, 0, 0, 2, 0xfe}
-
- // auth response
- conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{1, 0, 0, 3, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchSHA256PasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
- 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
- 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Empty Password
- 1, 0, 0, 3, 0,
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchSHA256PasswordRSA(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
- 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
- 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-
- conn.queuedReplies = [][]byte{
- // Pub Key Response
- append([]byte{byte(1 + len(testPubKey)), 1, 0, 4, 1}, testPubKey...),
-
- // OK
- {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Pub Key Request
- 1, 0, 0, 3, 1,
-
- // 2. Packet: Encrypted Password
- 0, 1, 0, 5, // [changing bytes]
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchSHA256PasswordRSAWithKey(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
- mc.cfg.pubKey = testPubKeyRSA
-
- // auth switch request
- conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
- 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
- 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Encrypted Password
- 0, 1, 0, 3, // [changing bytes]
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchSHA256PasswordSecure(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // Hack to make the caching_sha2_password plugin believe that the connection
- // is secure
- mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-
- // auth switch request
- conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
- 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
- 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Cleartext Password
- 7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0,
- }
- if !bytes.Equal(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
deleted file mode 100644
index 3e25a3b..0000000
--- a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "context"
- "database/sql"
- "database/sql/driver"
- "fmt"
- "math"
- "runtime"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-)
-
-type TB testing.B
-
-func (tb *TB) check(err error) {
- if err != nil {
- tb.Fatal(err)
- }
-}
-
-func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB {
- tb.check(err)
- return db
-}
-
-func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows {
- tb.check(err)
- return rows
-}
-
-func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
- tb.check(err)
- return stmt
-}
-
-func initDB(b *testing.B, queries ...string) *sql.DB {
- tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
- for _, query := range queries {
- if _, err := db.Exec(query); err != nil {
- b.Fatalf("error on %q: %v", query, err)
- }
- }
- return db
-}
-
-const concurrencyLevel = 10
-
-func BenchmarkQuery(b *testing.B) {
- tb := (*TB)(b)
- b.StopTimer()
- b.ReportAllocs()
- db := initDB(b,
- "DROP TABLE IF EXISTS foo",
- "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
- `INSERT INTO foo VALUES (1, "one")`,
- `INSERT INTO foo VALUES (2, "two")`,
- )
- db.SetMaxIdleConns(concurrencyLevel)
- defer db.Close()
-
- stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?"))
- defer stmt.Close()
-
- remain := int64(b.N)
- var wg sync.WaitGroup
- wg.Add(concurrencyLevel)
- defer wg.Wait()
- b.StartTimer()
-
- for i := 0; i < concurrencyLevel; i++ {
- go func() {
- for {
- if atomic.AddInt64(&remain, -1) < 0 {
- wg.Done()
- return
- }
-
- var got string
- tb.check(stmt.QueryRow(1).Scan(&got))
- if got != "one" {
- b.Errorf("query = %q; want one", got)
- wg.Done()
- return
- }
- }
- }()
- }
-}
-
-func BenchmarkExec(b *testing.B) {
- tb := (*TB)(b)
- b.StopTimer()
- b.ReportAllocs()
- db := tb.checkDB(sql.Open("mysql", dsn))
- db.SetMaxIdleConns(concurrencyLevel)
- defer db.Close()
-
- stmt := tb.checkStmt(db.Prepare("DO 1"))
- defer stmt.Close()
-
- remain := int64(b.N)
- var wg sync.WaitGroup
- wg.Add(concurrencyLevel)
- defer wg.Wait()
- b.StartTimer()
-
- for i := 0; i < concurrencyLevel; i++ {
- go func() {
- for {
- if atomic.AddInt64(&remain, -1) < 0 {
- wg.Done()
- return
- }
-
- if _, err := stmt.Exec(); err != nil {
- b.Fatal(err.Error())
- }
- }
- }()
- }
-}
-
-// data, but no db writes
-var roundtripSample []byte
-
-func initRoundtripBenchmarks() ([]byte, int, int) {
- if roundtripSample == nil {
- roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024))
- }
- return roundtripSample, 16, len(roundtripSample)
-}
-
-func BenchmarkRoundtripTxt(b *testing.B) {
- b.StopTimer()
- sample, min, max := initRoundtripBenchmarks()
- sampleString := string(sample)
- b.ReportAllocs()
- tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
- defer db.Close()
- b.StartTimer()
- var result string
- for i := 0; i < b.N; i++ {
- length := min + i
- if length > max {
- length = max
- }
- test := sampleString[0:length]
- rows := tb.checkRows(db.Query(`SELECT "` + test + `"`))
- if !rows.Next() {
- rows.Close()
- b.Fatalf("crashed")
- }
- err := rows.Scan(&result)
- if err != nil {
- rows.Close()
- b.Fatalf("crashed")
- }
- if result != test {
- rows.Close()
- b.Errorf("mismatch")
- }
- rows.Close()
- }
-}
-
-func BenchmarkRoundtripBin(b *testing.B) {
- b.StopTimer()
- sample, min, max := initRoundtripBenchmarks()
- b.ReportAllocs()
- tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
- defer db.Close()
- stmt := tb.checkStmt(db.Prepare("SELECT ?"))
- defer stmt.Close()
- b.StartTimer()
- var result sql.RawBytes
- for i := 0; i < b.N; i++ {
- length := min + i
- if length > max {
- length = max
- }
- test := sample[0:length]
- rows := tb.checkRows(stmt.Query(test))
- if !rows.Next() {
- rows.Close()
- b.Fatalf("crashed")
- }
- err := rows.Scan(&result)
- if err != nil {
- rows.Close()
- b.Fatalf("crashed")
- }
- if !bytes.Equal(result, test) {
- rows.Close()
- b.Errorf("mismatch")
- }
- rows.Close()
- }
-}
-
-func BenchmarkInterpolation(b *testing.B) {
- mc := &mysqlConn{
- cfg: &Config{
- InterpolateParams: true,
- Loc: time.UTC,
- },
- maxAllowedPacket: maxPacketSize,
- maxWriteSize: maxPacketSize - 1,
- buf: newBuffer(nil),
- }
-
- args := []driver.Value{
- int64(42424242),
- float64(math.Pi),
- false,
- time.Unix(1423411542, 807015000),
- []byte("bytes containing special chars ' \" \a \x00"),
- "string containing special chars ' \" \a \x00",
- }
- q := "SELECT ?, ?, ?, ?, ?, ?"
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := mc.interpolateParams(q, args)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
-
- tb := (*TB)(b)
- stmt := tb.checkStmt(db.PrepareContext(ctx, "SELECT val FROM foo WHERE id=?"))
- defer stmt.Close()
-
- b.SetParallelism(p)
- b.ReportAllocs()
- b.ResetTimer()
- b.RunParallel(func(pb *testing.PB) {
- var got string
- for pb.Next() {
- tb.check(stmt.QueryRow(1).Scan(&got))
- if got != "one" {
- b.Fatalf("query = %q; want one", got)
- }
- }
- })
-}
-
-func BenchmarkQueryContext(b *testing.B) {
- db := initDB(b,
- "DROP TABLE IF EXISTS foo",
- "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
- `INSERT INTO foo VALUES (1, "one")`,
- `INSERT INTO foo VALUES (2, "two")`,
- )
- defer db.Close()
- for _, p := range []int{1, 2, 3, 4} {
- b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
- benchmarkQueryContext(b, db, p)
- })
- }
-}
-
-func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
-
- tb := (*TB)(b)
- stmt := tb.checkStmt(db.PrepareContext(ctx, "DO 1"))
- defer stmt.Close()
-
- b.SetParallelism(p)
- b.ReportAllocs()
- b.ResetTimer()
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if _, err := stmt.ExecContext(ctx); err != nil {
- b.Fatal(err)
- }
- }
- })
-}
-
-func BenchmarkExecContext(b *testing.B) {
- db := initDB(b,
- "DROP TABLE IF EXISTS foo",
- "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
- `INSERT INTO foo VALUES (1, "one")`,
- `INSERT INTO foo VALUES (2, "two")`,
- )
- defer db.Close()
- for _, p := range []int{1, 2, 3, 4} {
- b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
- benchmarkQueryContext(b, db, p)
- })
- }
-}
-
-// BenchmarkQueryRawBytes benchmarks fetching 100 blobs using sql.RawBytes.
-// "size=" means size of each blobs.
-func BenchmarkQueryRawBytes(b *testing.B) {
- var sizes []int = []int{100, 1000, 2000, 4000, 8000, 12000, 16000, 32000, 64000, 256000}
- db := initDB(b,
- "DROP TABLE IF EXISTS bench_rawbytes",
- "CREATE TABLE bench_rawbytes (id INT PRIMARY KEY, val LONGBLOB)",
- )
- defer db.Close()
-
- blob := make([]byte, sizes[len(sizes)-1])
- for i := range blob {
- blob[i] = 42
- }
- for i := 0; i < 100; i++ {
- _, err := db.Exec("INSERT INTO bench_rawbytes VALUES (?, ?)", i, blob)
- if err != nil {
- b.Fatal(err)
- }
- }
-
- for _, s := range sizes {
- b.Run(fmt.Sprintf("size=%v", s), func(b *testing.B) {
- db.SetMaxIdleConns(0)
- db.SetMaxIdleConns(1)
- b.ReportAllocs()
- b.ResetTimer()
-
- for j := 0; j < b.N; j++ {
- rows, err := db.Query("SELECT LEFT(val, ?) as v FROM bench_rawbytes", s)
- if err != nil {
- b.Fatal(err)
- }
- nrows := 0
- for rows.Next() {
- var buf sql.RawBytes
- err := rows.Scan(&buf)
- if err != nil {
- b.Fatal(err)
- }
- if len(buf) != s {
- b.Fatalf("size mismatch: expected %v, got %v", s, len(buf))
- }
- nrows++
- }
- rows.Close()
- if nrows != 100 {
- b.Fatalf("numbers of rows mismatch: expected %v, got %v", 100, nrows)
- }
- }
- })
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
index 8d2b556..326a9f7 100644
--- a/vendor/github.com/go-sql-driver/mysql/collations.go
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -247,7 +247,7 @@ var collations = map[string]byte{
"utf8mb4_0900_ai_ci": 255,
}
-// A blacklist of collations which is unsafe to interpolate parameters.
+// A denylist of collations which is unsafe to interpolate parameters.
// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
var unsafeCollations = map[string]bool{
"big5_chinese_ci": true,
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_test.go b/vendor/github.com/go-sql-driver/mysql/conncheck_test.go
deleted file mode 100644
index 5399551..0000000
--- a/vendor/github.com/go-sql-driver/mysql/conncheck_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
-
-package mysql
-
-import (
- "testing"
- "time"
-)
-
-func TestStaleConnectionChecks(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("SET @@SESSION.wait_timeout = 2")
-
- if err := dbt.db.Ping(); err != nil {
- dbt.Fatal(err)
- }
-
- // wait for MySQL to close our connection
- time.Sleep(3 * time.Second)
-
- tx, err := dbt.db.Begin()
- if err != nil {
- dbt.Fatal(err)
- }
-
- if err := tx.Rollback(); err != nil {
- dbt.Fatal(err)
- }
- })
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index b07cd76..835f897 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -47,9 +47,10 @@ type mysqlConn struct {
// Handles parameters set in DSN after the connection is established
func (mc *mysqlConn) handleParams() (err error) {
+ var cmdSet strings.Builder
for param, val := range mc.cfg.Params {
switch param {
- // Charset
+ // Charset: character_set_connection, character_set_client, character_set_results
case "charset":
charsets := strings.Split(val, ",")
for i := range charsets {
@@ -63,12 +64,25 @@ func (mc *mysqlConn) handleParams() (err error) {
return
}
- // System Vars
+ // Other system vars accumulated in a single SET command
default:
- err = mc.exec("SET " + param + "=" + val + "")
- if err != nil {
- return
+ if cmdSet.Len() == 0 {
+ // Heuristic: 29 chars for each other key=value to reduce reallocations
+ cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.WriteString("SET ")
+ } else {
+ cmdSet.WriteByte(',')
}
+ cmdSet.WriteString(param)
+ cmdSet.WriteByte('=')
+ cmdSet.WriteString(val)
+ }
+ }
+
+ if cmdSet.Len() > 0 {
+ err = mc.exec(cmdSet.String())
+ if err != nil {
+ return
}
}
@@ -231,44 +245,10 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
if v.IsZero() {
buf = append(buf, "'0000-00-00'"...)
} else {
- v := v.In(mc.cfg.Loc)
- v = v.Add(time.Nanosecond * 500) // To round under microsecond
- year := v.Year()
- year100 := year / 100
- year1 := year % 100
- month := v.Month()
- day := v.Day()
- hour := v.Hour()
- minute := v.Minute()
- second := v.Second()
- micro := v.Nanosecond() / 1000
-
- buf = append(buf, []byte{
- '\'',
- digits10[year100], digits01[year100],
- digits10[year1], digits01[year1],
- '-',
- digits10[month], digits01[month],
- '-',
- digits10[day], digits01[day],
- ' ',
- digits10[hour], digits01[hour],
- ':',
- digits10[minute], digits01[minute],
- ':',
- digits10[second], digits01[second],
- }...)
-
- if micro != 0 {
- micro10000 := micro / 10000
- micro100 := micro / 100 % 100
- micro1 := micro % 100
- buf = append(buf, []byte{
- '.',
- digits10[micro10000], digits01[micro10000],
- digits10[micro100], digits01[micro100],
- digits10[micro1], digits01[micro1],
- }...)
+ buf = append(buf, '\'')
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ if err != nil {
+ return "", err
}
buf = append(buf, '\'')
}
@@ -489,6 +469,10 @@ func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
// BeginTx implements driver.ConnBeginTx interface
func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if mc.closed.IsSet() {
+ return nil, driver.ErrBadConn
+ }
+
if err := mc.watchCancel(ctx); err != nil {
return nil, err
}
@@ -658,3 +642,9 @@ func (mc *mysqlConn) ResetSession(ctx context.Context) error {
mc.reset = true
return nil
}
+
+// IsValid implements driver.Validator interface
+// (From Go 1.15)
+func (mc *mysqlConn) IsValid() bool {
+ return !mc.closed.IsSet()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection_test.go b/vendor/github.com/go-sql-driver/mysql/connection_test.go
deleted file mode 100644
index a6d6773..0000000
--- a/vendor/github.com/go-sql-driver/mysql/connection_test.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "context"
- "database/sql/driver"
- "encoding/json"
- "errors"
- "net"
- "testing"
-)
-
-func TestInterpolateParams(t *testing.T) {
- mc := &mysqlConn{
- buf: newBuffer(nil),
- maxAllowedPacket: maxPacketSize,
- cfg: &Config{
- InterpolateParams: true,
- },
- }
-
- q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42), "gopher"})
- if err != nil {
- t.Errorf("Expected err=nil, got %#v", err)
- return
- }
- expected := `SELECT 42+'gopher'`
- if q != expected {
- t.Errorf("Expected: %q\nGot: %q", expected, q)
- }
-}
-
-func TestInterpolateParamsJSONRawMessage(t *testing.T) {
- mc := &mysqlConn{
- buf: newBuffer(nil),
- maxAllowedPacket: maxPacketSize,
- cfg: &Config{
- InterpolateParams: true,
- },
- }
-
- buf, err := json.Marshal(struct {
- Value int `json:"value"`
- }{Value: 42})
- if err != nil {
- t.Errorf("Expected err=nil, got %#v", err)
- return
- }
- q, err := mc.interpolateParams("SELECT ?", []driver.Value{json.RawMessage(buf)})
- if err != nil {
- t.Errorf("Expected err=nil, got %#v", err)
- return
- }
- expected := `SELECT '{\"value\":42}'`
- if q != expected {
- t.Errorf("Expected: %q\nGot: %q", expected, q)
- }
-}
-
-func TestInterpolateParamsTooManyPlaceholders(t *testing.T) {
- mc := &mysqlConn{
- buf: newBuffer(nil),
- maxAllowedPacket: maxPacketSize,
- cfg: &Config{
- InterpolateParams: true,
- },
- }
-
- q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42)})
- if err != driver.ErrSkip {
- t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
- }
-}
-
-// We don't support placeholder in string literal for now.
-// https://github.com/go-sql-driver/mysql/pull/490
-func TestInterpolateParamsPlaceholderInString(t *testing.T) {
- mc := &mysqlConn{
- buf: newBuffer(nil),
- maxAllowedPacket: maxPacketSize,
- cfg: &Config{
- InterpolateParams: true,
- },
- }
-
- q, err := mc.interpolateParams("SELECT 'abc?xyz',?", []driver.Value{int64(42)})
- // When InterpolateParams support string literal, this should return `"SELECT 'abc?xyz', 42`
- if err != driver.ErrSkip {
- t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
- }
-}
-
-func TestInterpolateParamsUint64(t *testing.T) {
- mc := &mysqlConn{
- buf: newBuffer(nil),
- maxAllowedPacket: maxPacketSize,
- cfg: &Config{
- InterpolateParams: true,
- },
- }
-
- q, err := mc.interpolateParams("SELECT ?", []driver.Value{uint64(42)})
- if err != nil {
- t.Errorf("Expected err=nil, got err=%#v, q=%#v", err, q)
- }
- if q != "SELECT 42" {
- t.Errorf("Expected uint64 interpolation to work, got q=%#v", q)
- }
-}
-
-func TestCheckNamedValue(t *testing.T) {
- value := driver.NamedValue{Value: ^uint64(0)}
- x := &mysqlConn{}
- err := x.CheckNamedValue(&value)
-
- if err != nil {
- t.Fatal("uint64 high-bit not convertible", err)
- }
-
- if value.Value != ^uint64(0) {
- t.Fatalf("uint64 high-bit converted, got %#v %T", value.Value, value.Value)
- }
-}
-
-// TestCleanCancel tests passed context is cancelled at start.
-// No packet should be sent. Connection should keep current status.
-func TestCleanCancel(t *testing.T) {
- mc := &mysqlConn{
- closech: make(chan struct{}),
- }
- mc.startWatcher()
- defer mc.cleanup()
-
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
-
- for i := 0; i < 3; i++ { // Repeat same behavior
- err := mc.Ping(ctx)
- if err != context.Canceled {
- t.Errorf("expected context.Canceled, got %#v", err)
- }
-
- if mc.closed.IsSet() {
- t.Error("expected mc is not closed, closed actually")
- }
-
- if mc.watching {
- t.Error("expected watching is false, but true")
- }
- }
-}
-
-func TestPingMarkBadConnection(t *testing.T) {
- nc := badConnection{err: errors.New("boom")}
- ms := &mysqlConn{
- netConn: nc,
- buf: newBuffer(nc),
- maxAllowedPacket: defaultMaxAllowedPacket,
- }
-
- err := ms.Ping(context.Background())
-
- if err != driver.ErrBadConn {
- t.Errorf("expected driver.ErrBadConn, got %#v", err)
- }
-}
-
-func TestPingErrInvalidConn(t *testing.T) {
- nc := badConnection{err: errors.New("failed to write"), n: 10}
- ms := &mysqlConn{
- netConn: nc,
- buf: newBuffer(nc),
- maxAllowedPacket: defaultMaxAllowedPacket,
- closech: make(chan struct{}),
- }
-
- err := ms.Ping(context.Background())
-
- if err != ErrInvalidConn {
- t.Errorf("expected ErrInvalidConn, got %#v", err)
- }
-}
-
-type badConnection struct {
- n int
- err error
- net.Conn
-}
-
-func (bc badConnection) Write(b []byte) (n int, err error) {
- return bc.n, bc.err
-}
-
-func (bc badConnection) Close() error {
- return nil
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/connector_test.go b/vendor/github.com/go-sql-driver/mysql/connector_test.go
deleted file mode 100644
index 976903c..0000000
--- a/vendor/github.com/go-sql-driver/mysql/connector_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package mysql
-
-import (
- "context"
- "net"
- "testing"
- "time"
-)
-
-func TestConnectorReturnsTimeout(t *testing.T) {
- connector := &connector{&Config{
- Net: "tcp",
- Addr: "1.1.1.1:1234",
- Timeout: 10 * time.Millisecond,
- }}
-
- _, err := connector.Connect(context.Background())
- if err == nil {
- t.Fatal("error expected")
- }
-
- if nerr, ok := err.(*net.OpError); ok {
- expected := "dial tcp 1.1.1.1:1234: i/o timeout"
- if nerr.Error() != expected {
- t.Fatalf("expected %q, got %q", expected, nerr.Error())
- }
- } else {
- t.Fatalf("expected %T, got %T", nerr, err)
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/go-sql-driver/mysql/driver_test.go
deleted file mode 100644
index ace083d..0000000
--- a/vendor/github.com/go-sql-driver/mysql/driver_test.go
+++ /dev/null
@@ -1,3165 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "context"
- "crypto/tls"
- "database/sql"
- "database/sql/driver"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math"
- "net"
- "net/url"
- "os"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-)
-
-// Ensure that all the driver interfaces are implemented
-var (
- _ driver.Rows = &binaryRows{}
- _ driver.Rows = &textRows{}
-)
-
-var (
- user string
- pass string
- prot string
- addr string
- dbname string
- dsn string
- netAddr string
- available bool
-)
-
-var (
- tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC)
- sDate = "2012-06-14"
- tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)
- sDateTime = "2011-11-20 21:27:37"
- tDate0 = time.Time{}
- sDate0 = "0000-00-00"
- sDateTime0 = "0000-00-00 00:00:00"
-)
-
-// See https://github.com/go-sql-driver/mysql/wiki/Testing
-func init() {
- // get environment variables
- env := func(key, defaultValue string) string {
- if value := os.Getenv(key); value != "" {
- return value
- }
- return defaultValue
- }
- user = env("MYSQL_TEST_USER", "root")
- pass = env("MYSQL_TEST_PASS", "")
- prot = env("MYSQL_TEST_PROT", "tcp")
- addr = env("MYSQL_TEST_ADDR", "localhost:3306")
- dbname = env("MYSQL_TEST_DBNAME", "gotest")
- netAddr = fmt.Sprintf("%s(%s)", prot, addr)
- dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, pass, netAddr, dbname)
- c, err := net.Dial(prot, addr)
- if err == nil {
- available = true
- c.Close()
- }
-}
-
-type DBTest struct {
- *testing.T
- db *sql.DB
-}
-
-type netErrorMock struct {
- temporary bool
- timeout bool
-}
-
-func (e netErrorMock) Temporary() bool {
- return e.temporary
-}
-
-func (e netErrorMock) Timeout() bool {
- return e.timeout
-}
-
-func (e netErrorMock) Error() string {
- return fmt.Sprintf("mock net error. Temporary: %v, Timeout %v", e.temporary, e.timeout)
-}
-
-func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- dsn += "&multiStatements=true"
- var db *sql.DB
- if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
- db, err = sql.Open("mysql", dsn)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
- }
-
- dbt := &DBTest{t, db}
- for _, test := range tests {
- test(dbt)
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- }
-}
-
-func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- db, err := sql.Open("mysql", dsn)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- db.Exec("DROP TABLE IF EXISTS test")
-
- dsn2 := dsn + "&interpolateParams=true"
- var db2 *sql.DB
- if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation {
- db2, err = sql.Open("mysql", dsn2)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db2.Close()
- }
-
- dsn3 := dsn + "&multiStatements=true"
- var db3 *sql.DB
- if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation {
- db3, err = sql.Open("mysql", dsn3)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db3.Close()
- }
-
- dbt := &DBTest{t, db}
- dbt2 := &DBTest{t, db2}
- dbt3 := &DBTest{t, db3}
- for _, test := range tests {
- test(dbt)
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- if db2 != nil {
- test(dbt2)
- dbt2.db.Exec("DROP TABLE IF EXISTS test")
- }
- if db3 != nil {
- test(dbt3)
- dbt3.db.Exec("DROP TABLE IF EXISTS test")
- }
- }
-}
-
-func (dbt *DBTest) fail(method, query string, err error) {
- if len(query) > 300 {
- query = "[query too large to print]"
- }
- dbt.Fatalf("error on %s %s: %s", method, query, err.Error())
-}
-
-func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) {
- res, err := dbt.db.Exec(query, args...)
- if err != nil {
- dbt.fail("exec", query, err)
- }
- return res
-}
-
-func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) {
- rows, err := dbt.db.Query(query, args...)
- if err != nil {
- dbt.fail("query", query, err)
- }
- return rows
-}
-
-func maybeSkip(t *testing.T, err error, skipErrno uint16) {
- mySQLErr, ok := err.(*MySQLError)
- if !ok {
- return
- }
-
- if mySQLErr.Number == skipErrno {
- t.Skipf("skipping test for error: %v", err)
- }
-}
-
-func TestEmptyQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // just a comment, no query
- rows := dbt.mustQuery("--")
- defer rows.Close()
- // will hang before #255
- if rows.Next() {
- dbt.Errorf("next on rows must be false")
- }
- })
-}
-
-func TestCRUD(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // Create Table
- dbt.mustExec("CREATE TABLE test (value BOOL)")
-
- // Test for unexpected data
- var out bool
- rows := dbt.mustQuery("SELECT * FROM test")
- if rows.Next() {
- dbt.Error("unexpected data in empty table")
- }
- rows.Close()
-
- // Create Data
- res := dbt.mustExec("INSERT INTO test VALUES (1)")
- count, err := res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- id, err := res.LastInsertId()
- if err != nil {
- dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error())
- }
- if id != 0 {
- dbt.Fatalf("expected InsertId 0, got %d", id)
- }
-
- // Read
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if true != out {
- dbt.Errorf("true != %t", out)
- }
-
- if rows.Next() {
- dbt.Error("unexpected data")
- }
- } else {
- dbt.Error("no data")
- }
- rows.Close()
-
- // Update
- res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true)
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- // Check Update
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if false != out {
- dbt.Errorf("false != %t", out)
- }
-
- if rows.Next() {
- dbt.Error("unexpected data")
- }
- } else {
- dbt.Error("no data")
- }
- rows.Close()
-
- // Delete
- res = dbt.mustExec("DELETE FROM test WHERE value = ?", false)
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- // Check for unexpected rows
- res = dbt.mustExec("DELETE FROM test")
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 0 {
- dbt.Fatalf("expected 0 affected row, got %d", count)
- }
- })
-}
-
-func TestMultiQuery(t *testing.T) {
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- // Create Table
- dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ")
-
- // Create Data
- res := dbt.mustExec("INSERT INTO test VALUES (1, 1)")
- count, err := res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- // Update
- res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;")
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- // Read
- var out int
- rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;")
- if rows.Next() {
- rows.Scan(&out)
- if 5 != out {
- dbt.Errorf("5 != %d", out)
- }
-
- if rows.Next() {
- dbt.Error("unexpected data")
- }
- } else {
- dbt.Error("no data")
- }
- rows.Close()
-
- })
-}
-
-func TestInt(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"}
- in := int64(42)
- var out int64
- var rows *sql.Rows
-
- // SIGNED
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
-
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
-
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Errorf("%s: %d != %d", v, in, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
-
- // UNSIGNED ZEROFILL
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)")
-
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
-
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out)
- }
- } else {
- dbt.Errorf("%s ZEROFILL: no data", v)
- }
- rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
- })
-}
-
-func TestFloat32(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [2]string{"FLOAT", "DOUBLE"}
- in := float32(42.23)
- var out float32
- var rows *sql.Rows
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Errorf("%s: %g != %g", v, in, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
- })
-}
-
-func TestFloat64(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [2]string{"FLOAT", "DOUBLE"}
- var expected float64 = 42.23
- var out float64
- var rows *sql.Rows
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (42.23)")
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if expected != out {
- dbt.Errorf("%s: %g != %g", v, expected, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
- })
-}
-
-func TestFloat64Placeholder(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [2]string{"FLOAT", "DOUBLE"}
- var expected float64 = 42.23
- var out float64
- var rows *sql.Rows
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (id int, value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (1, 42.23)")
- rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1)
- if rows.Next() {
- rows.Scan(&out)
- if expected != out {
- dbt.Errorf("%s: %g != %g", v, expected, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
- })
-}
-
-func TestString(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"}
- in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย"
- var out string
- var rows *sql.Rows
-
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8")
-
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
-
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Errorf("%s: %s != %s", v, in, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
-
- // BLOB
- dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
-
- id := 2
- in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
- "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
- "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
- "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " +
- "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
- "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
- "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
- "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet."
- dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in)
-
- err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out)
- if err != nil {
- dbt.Fatalf("Error on BLOB-Query: %s", err.Error())
- } else if out != in {
- dbt.Errorf("BLOB: %s != %s", in, out)
- }
- })
-}
-
-func TestRawBytes(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- v1 := []byte("aaa")
- v2 := []byte("bbb")
- rows := dbt.mustQuery("SELECT ?, ?", v1, v2)
- defer rows.Close()
- if rows.Next() {
- var o1, o2 sql.RawBytes
- if err := rows.Scan(&o1, &o2); err != nil {
- dbt.Errorf("Got error: %v", err)
- }
- if !bytes.Equal(v1, o1) {
- dbt.Errorf("expected %v, got %v", v1, o1)
- }
- if !bytes.Equal(v2, o2) {
- dbt.Errorf("expected %v, got %v", v2, o2)
- }
- // https://github.com/go-sql-driver/mysql/issues/765
- // Appending to RawBytes shouldn't overwrite next RawBytes.
- o1 = append(o1, "xyzzy"...)
- if !bytes.Equal(v2, o2) {
- dbt.Errorf("expected %v, got %v", v2, o2)
- }
- } else {
- dbt.Errorf("no data")
- }
- })
-}
-
-type testValuer struct {
- value string
-}
-
-func (tv testValuer) Value() (driver.Value, error) {
- return tv.value, nil
-}
-
-func TestValuer(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- in := testValuer{"a_value"}
- var out string
- var rows *sql.Rows
-
- dbt.mustExec("CREATE TABLE test (value VARCHAR(255)) CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in.value != out {
- dbt.Errorf("Valuer: %v != %s", in, out)
- }
- } else {
- dbt.Errorf("Valuer: no data")
- }
- rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
- })
-}
-
-type testValuerWithValidation struct {
- value string
-}
-
-func (tv testValuerWithValidation) Value() (driver.Value, error) {
- if len(tv.value) == 0 {
- return nil, fmt.Errorf("Invalid string valuer. Value must not be empty")
- }
-
- return tv.value, nil
-}
-
-func TestValuerWithValidation(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- in := testValuerWithValidation{"a_value"}
- var out string
- var rows *sql.Rows
-
- dbt.mustExec("CREATE TABLE testValuer (value VARCHAR(255)) CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO testValuer VALUES (?)", in)
-
- rows = dbt.mustQuery("SELECT value FROM testValuer")
- defer rows.Close()
-
- if rows.Next() {
- rows.Scan(&out)
- if in.value != out {
- dbt.Errorf("Valuer: %v != %s", in, out)
- }
- } else {
- dbt.Errorf("Valuer: no data")
- }
-
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", testValuerWithValidation{""}); err == nil {
- dbt.Errorf("Failed to check valuer error")
- }
-
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", nil); err != nil {
- dbt.Errorf("Failed to check nil")
- }
-
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", map[string]bool{}); err == nil {
- dbt.Errorf("Failed to check not valuer")
- }
-
- dbt.mustExec("DROP TABLE IF EXISTS testValuer")
- })
-}
-
-type timeTests struct {
- dbtype string
- tlayout string
- tests []timeTest
-}
-
-type timeTest struct {
- s string // leading "!": do not use t as value in queries
- t time.Time
-}
-
-type timeMode byte
-
-func (t timeMode) String() string {
- switch t {
- case binaryString:
- return "binary:string"
- case binaryTime:
- return "binary:time.Time"
- case textString:
- return "text:string"
- }
- panic("unsupported timeMode")
-}
-
-func (t timeMode) Binary() bool {
- switch t {
- case binaryString, binaryTime:
- return true
- }
- return false
-}
-
-const (
- binaryString timeMode = iota
- binaryTime
- textString
-)
-
-func (t timeTest) genQuery(dbtype string, mode timeMode) string {
- var inner string
- if mode.Binary() {
- inner = "?"
- } else {
- inner = `"%s"`
- }
- return `SELECT cast(` + inner + ` as ` + dbtype + `)`
-}
-
-func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
- var rows *sql.Rows
- query := t.genQuery(dbtype, mode)
- switch mode {
- case binaryString:
- rows = dbt.mustQuery(query, t.s)
- case binaryTime:
- rows = dbt.mustQuery(query, t.t)
- case textString:
- query = fmt.Sprintf(query, t.s)
- rows = dbt.mustQuery(query)
- default:
- panic("unsupported mode")
- }
- defer rows.Close()
- var err error
- if !rows.Next() {
- err = rows.Err()
- if err == nil {
- err = fmt.Errorf("no data")
- }
- dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
- return
- }
- var dst interface{}
- err = rows.Scan(&dst)
- if err != nil {
- dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
- return
- }
- switch val := dst.(type) {
- case []uint8:
- str := string(val)
- if str == t.s {
- return
- }
- if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s {
- // a fix mainly for TravisCI:
- // accept full microsecond resolution in result for DATETIME columns
- // where the binary protocol was used
- return
- }
- dbt.Errorf("%s [%s] to string: expected %q, got %q",
- dbtype, mode,
- t.s, str,
- )
- case time.Time:
- if val == t.t {
- return
- }
- dbt.Errorf("%s [%s] to string: expected %q, got %q",
- dbtype, mode,
- t.s, val.Format(tlayout),
- )
- default:
- fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t})
- dbt.Errorf("%s [%s]: unhandled type %T (is '%v')",
- dbtype, mode,
- val, val,
- )
- }
-}
-
-func TestDateTime(t *testing.T) {
- afterTime := func(t time.Time, d string) time.Time {
- dur, err := time.ParseDuration(d)
- if err != nil {
- panic(err)
- }
- return t.Add(dur)
- }
- // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests
- format := "2006-01-02 15:04:05.999999"
- t0 := time.Time{}
- tstr0 := "0000-00-00 00:00:00.000000"
- testcases := []timeTests{
- {"DATE", format[:10], []timeTest{
- {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)},
- {t: t0, s: tstr0[:10]},
- }},
- {"DATETIME", format[:19], []timeTest{
- {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
- {t: t0, s: tstr0[:19]},
- }},
- {"DATETIME(0)", format[:21], []timeTest{
- {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
- {t: t0, s: tstr0[:19]},
- }},
- {"DATETIME(1)", format[:21], []timeTest{
- {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)},
- {t: t0, s: tstr0[:21]},
- }},
- {"DATETIME(6)", format, []timeTest{
- {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)},
- {t: t0, s: tstr0},
- }},
- {"TIME", format[11:19], []timeTest{
- {t: afterTime(t0, "12345s")},
- {s: "!-12:34:56"},
- {s: "!-838:59:59"},
- {s: "!838:59:59"},
- {t: t0, s: tstr0[11:19]},
- }},
- {"TIME(0)", format[11:19], []timeTest{
- {t: afterTime(t0, "12345s")},
- {s: "!-12:34:56"},
- {s: "!-838:59:59"},
- {s: "!838:59:59"},
- {t: t0, s: tstr0[11:19]},
- }},
- {"TIME(1)", format[11:21], []timeTest{
- {t: afterTime(t0, "12345600ms")},
- {s: "!-12:34:56.7"},
- {s: "!-838:59:58.9"},
- {s: "!838:59:58.9"},
- {t: t0, s: tstr0[11:21]},
- }},
- {"TIME(6)", format[11:], []timeTest{
- {t: afterTime(t0, "1234567890123000ns")},
- {s: "!-12:34:56.789012"},
- {s: "!-838:59:58.999999"},
- {s: "!838:59:58.999999"},
- {t: t0, s: tstr0[11:]},
- }},
- }
- dsns := []string{
- dsn + "&parseTime=true",
- dsn + "&parseTime=false",
- }
- for _, testdsn := range dsns {
- runTests(t, testdsn, func(dbt *DBTest) {
- microsecsSupported := false
- zeroDateSupported := false
- var rows *sql.Rows
- var err error
- rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`)
- if err == nil {
- rows.Scan(µsecsSupported)
- rows.Close()
- }
- rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`)
- if err == nil {
- rows.Scan(&zeroDateSupported)
- rows.Close()
- }
- for _, setups := range testcases {
- if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" {
- // skip fractional second tests if unsupported by server
- continue
- }
- for _, setup := range setups.tests {
- allowBinTime := true
- if setup.s == "" {
- // fill time string wherever Go can reliable produce it
- setup.s = setup.t.Format(setups.tlayout)
- } else if setup.s[0] == '!' {
- // skip tests using setup.t as source in queries
- allowBinTime = false
- // fix setup.s - remove the "!"
- setup.s = setup.s[1:]
- }
- if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] {
- // skip disallowed 0000-00-00 date
- continue
- }
- setup.run(dbt, setups.dbtype, setups.tlayout, textString)
- setup.run(dbt, setups.dbtype, setups.tlayout, binaryString)
- if allowBinTime {
- setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime)
- }
- }
- }
- })
- }
-}
-
-func TestTimestampMicros(t *testing.T) {
- format := "2006-01-02 15:04:05.999999"
- f0 := format[:19]
- f1 := format[:21]
- f6 := format[:26]
- runTests(t, dsn, func(dbt *DBTest) {
- // check if microseconds are supported.
- // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width
- // and not precision.
- // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html
- microsecsSupported := false
- if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil {
- rows.Scan(µsecsSupported)
- rows.Close()
- }
- if !microsecsSupported {
- // skip test
- return
- }
- _, err := dbt.db.Exec(`
- CREATE TABLE test (
- value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `',
- value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `',
- value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `'
- )`,
- )
- if err != nil {
- dbt.Error(err)
- }
- defer dbt.mustExec("DROP TABLE IF EXISTS test")
- dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6)
- var res0, res1, res6 string
- rows := dbt.mustQuery("SELECT * FROM test")
- defer rows.Close()
- if !rows.Next() {
- dbt.Errorf("test contained no selectable values")
- }
- err = rows.Scan(&res0, &res1, &res6)
- if err != nil {
- dbt.Error(err)
- }
- if res0 != f0 {
- dbt.Errorf("expected %q, got %q", f0, res0)
- }
- if res1 != f1 {
- dbt.Errorf("expected %q, got %q", f1, res1)
- }
- if res6 != f6 {
- dbt.Errorf("expected %q, got %q", f6, res6)
- }
- })
-}
-
-func TestNULL(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- nullStmt, err := dbt.db.Prepare("SELECT NULL")
- if err != nil {
- dbt.Fatal(err)
- }
- defer nullStmt.Close()
-
- nonNullStmt, err := dbt.db.Prepare("SELECT 1")
- if err != nil {
- dbt.Fatal(err)
- }
- defer nonNullStmt.Close()
-
- // NullBool
- var nb sql.NullBool
- // Invalid
- if err = nullStmt.QueryRow().Scan(&nb); err != nil {
- dbt.Fatal(err)
- }
- if nb.Valid {
- dbt.Error("valid NullBool which should be invalid")
- }
- // Valid
- if err = nonNullStmt.QueryRow().Scan(&nb); err != nil {
- dbt.Fatal(err)
- }
- if !nb.Valid {
- dbt.Error("invalid NullBool which should be valid")
- } else if nb.Bool != true {
- dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool)
- }
-
- // NullFloat64
- var nf sql.NullFloat64
- // Invalid
- if err = nullStmt.QueryRow().Scan(&nf); err != nil {
- dbt.Fatal(err)
- }
- if nf.Valid {
- dbt.Error("valid NullFloat64 which should be invalid")
- }
- // Valid
- if err = nonNullStmt.QueryRow().Scan(&nf); err != nil {
- dbt.Fatal(err)
- }
- if !nf.Valid {
- dbt.Error("invalid NullFloat64 which should be valid")
- } else if nf.Float64 != float64(1) {
- dbt.Errorf("unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64)
- }
-
- // NullInt64
- var ni sql.NullInt64
- // Invalid
- if err = nullStmt.QueryRow().Scan(&ni); err != nil {
- dbt.Fatal(err)
- }
- if ni.Valid {
- dbt.Error("valid NullInt64 which should be invalid")
- }
- // Valid
- if err = nonNullStmt.QueryRow().Scan(&ni); err != nil {
- dbt.Fatal(err)
- }
- if !ni.Valid {
- dbt.Error("invalid NullInt64 which should be valid")
- } else if ni.Int64 != int64(1) {
- dbt.Errorf("unexpected NullInt64 value: %d (should be 1)", ni.Int64)
- }
-
- // NullString
- var ns sql.NullString
- // Invalid
- if err = nullStmt.QueryRow().Scan(&ns); err != nil {
- dbt.Fatal(err)
- }
- if ns.Valid {
- dbt.Error("valid NullString which should be invalid")
- }
- // Valid
- if err = nonNullStmt.QueryRow().Scan(&ns); err != nil {
- dbt.Fatal(err)
- }
- if !ns.Valid {
- dbt.Error("invalid NullString which should be valid")
- } else if ns.String != `1` {
- dbt.Error("unexpected NullString value:" + ns.String + " (should be `1`)")
- }
-
- // nil-bytes
- var b []byte
- // Read nil
- if err = nullStmt.QueryRow().Scan(&b); err != nil {
- dbt.Fatal(err)
- }
- if b != nil {
- dbt.Error("non-nil []byte which should be nil")
- }
- // Read non-nil
- if err = nonNullStmt.QueryRow().Scan(&b); err != nil {
- dbt.Fatal(err)
- }
- if b == nil {
- dbt.Error("nil []byte which should be non-nil")
- }
- // Insert nil
- b = nil
- success := false
- if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil {
- dbt.Fatal(err)
- }
- if !success {
- dbt.Error("inserting []byte(nil) as NULL failed")
- }
- // Check input==output with input==nil
- b = nil
- if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
- dbt.Fatal(err)
- }
- if b != nil {
- dbt.Error("non-nil echo from nil input")
- }
- // Check input==output with input!=nil
- b = []byte("")
- if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
- dbt.Fatal(err)
- }
- if b == nil {
- dbt.Error("nil echo from non-nil input")
- }
-
- // Insert NULL
- dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)")
-
- dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2)
-
- var out interface{}
- rows := dbt.mustQuery("SELECT * FROM test")
- defer rows.Close()
- if rows.Next() {
- rows.Scan(&out)
- if out != nil {
- dbt.Errorf("%v != nil", out)
- }
- } else {
- dbt.Error("no data")
- }
- })
-}
-
-func TestUint64(t *testing.T) {
- const (
- u0 = uint64(0)
- uall = ^u0
- uhigh = uall >> 1
- utop = ^uhigh
- s0 = int64(0)
- sall = ^s0
- shigh = int64(uhigh)
- stop = ^shigh
- )
- runTests(t, dsn, func(dbt *DBTest) {
- stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`)
- if err != nil {
- dbt.Fatal(err)
- }
- defer stmt.Close()
- row := stmt.QueryRow(
- u0, uhigh, utop, uall,
- s0, shigh, stop, sall,
- )
-
- var ua, ub, uc, ud uint64
- var sa, sb, sc, sd int64
-
- err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd)
- if err != nil {
- dbt.Fatal(err)
- }
- switch {
- case ua != u0,
- ub != uhigh,
- uc != utop,
- ud != uall,
- sa != s0,
- sb != shigh,
- sc != stop,
- sd != sall:
- dbt.Fatal("unexpected result value")
- }
- })
-}
-
-func TestLongData(t *testing.T) {
- runTests(t, dsn+"&maxAllowedPacket=0", func(dbt *DBTest) {
- var maxAllowedPacketSize int
- err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize)
- if err != nil {
- dbt.Fatal(err)
- }
- maxAllowedPacketSize--
-
- // don't get too ambitious
- if maxAllowedPacketSize > 1<<25 {
- maxAllowedPacketSize = 1 << 25
- }
-
- dbt.mustExec("CREATE TABLE test (value LONGBLOB)")
-
- in := strings.Repeat(`a`, maxAllowedPacketSize+1)
- var out string
- var rows *sql.Rows
-
- // Long text data
- const nonDataQueryLen = 28 // length query w/o value
- inS := in[:maxAllowedPacketSize-nonDataQueryLen]
- dbt.mustExec("INSERT INTO test VALUES('" + inS + "')")
- rows = dbt.mustQuery("SELECT value FROM test")
- defer rows.Close()
- if rows.Next() {
- rows.Scan(&out)
- if inS != out {
- dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out))
- }
- if rows.Next() {
- dbt.Error("LONGBLOB: unexpexted row")
- }
- } else {
- dbt.Fatalf("LONGBLOB: no data")
- }
-
- // Empty table
- dbt.mustExec("TRUNCATE TABLE test")
-
- // Long binary data
- dbt.mustExec("INSERT INTO test VALUES(?)", in)
- rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1)
- defer rows.Close()
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out))
- }
- if rows.Next() {
- dbt.Error("LONGBLOB: unexpexted row")
- }
- } else {
- if err = rows.Err(); err != nil {
- dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error())
- } else {
- dbt.Fatal("LONGBLOB: no data (err: )")
- }
- }
- })
-}
-
-func TestLoadData(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- verifyLoadDataResult := func() {
- rows, err := dbt.db.Query("SELECT * FROM test")
- if err != nil {
- dbt.Fatal(err.Error())
- }
-
- i := 0
- values := [4]string{
- "a string",
- "a string containing a \t",
- "a string containing a \n",
- "a string containing both \t\n",
- }
-
- var id int
- var value string
-
- for rows.Next() {
- i++
- err = rows.Scan(&id, &value)
- if err != nil {
- dbt.Fatal(err.Error())
- }
- if i != id {
- dbt.Fatalf("%d != %d", i, id)
- }
- if values[i-1] != value {
- dbt.Fatalf("%q != %q", values[i-1], value)
- }
- }
- err = rows.Err()
- if err != nil {
- dbt.Fatal(err.Error())
- }
-
- if i != 4 {
- dbt.Fatalf("rows count mismatch. Got %d, want 4", i)
- }
- }
-
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8")
-
- // Local File
- file, err := ioutil.TempFile("", "gotest")
- defer os.Remove(file.Name())
- if err != nil {
- dbt.Fatal(err)
- }
- RegisterLocalFile(file.Name())
-
- // Try first with empty file
- dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
- var count int
- err = dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&count)
- if err != nil {
- dbt.Fatal(err.Error())
- }
- if count != 0 {
- dbt.Fatalf("unexpected row count: got %d, want 0", count)
- }
-
- // Then fille File with data and try to load it
- file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n")
- file.Close()
- dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
- verifyLoadDataResult()
-
- // Try with non-existing file
- _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test")
- if err == nil {
- dbt.Fatal("load non-existent file didn't fail")
- } else if err.Error() != "local file 'doesnotexist' is not registered" {
- dbt.Fatal(err.Error())
- }
-
- // Empty table
- dbt.mustExec("TRUNCATE TABLE test")
-
- // Reader
- RegisterReaderHandler("test", func() io.Reader {
- file, err = os.Open(file.Name())
- if err != nil {
- dbt.Fatal(err)
- }
- return file
- })
- dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test")
- verifyLoadDataResult()
- // negative test
- _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test")
- if err == nil {
- dbt.Fatal("load non-existent Reader didn't fail")
- } else if err.Error() != "Reader 'doesnotexist' is not registered" {
- dbt.Fatal(err.Error())
- }
- })
-}
-
-func TestFoundRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
- dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
-
- res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
- count, err := res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 2 {
- dbt.Fatalf("Expected 2 affected rows, got %d", count)
- }
- res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 2 {
- dbt.Fatalf("Expected 2 affected rows, got %d", count)
- }
- })
- runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
- dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
-
- res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
- count, err := res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 2 {
- dbt.Fatalf("Expected 2 matched rows, got %d", count)
- }
- res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 3 {
- dbt.Fatalf("Expected 3 matched rows, got %d", count)
- }
- })
-}
-
-func TestTLS(t *testing.T) {
- tlsTestReq := func(dbt *DBTest) {
- if err := dbt.db.Ping(); err != nil {
- if err == ErrNoTLS {
- dbt.Skip("server does not support TLS")
- } else {
- dbt.Fatalf("error on Ping: %s", err.Error())
- }
- }
-
- rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'")
- defer rows.Close()
-
- var variable, value *sql.RawBytes
- for rows.Next() {
- if err := rows.Scan(&variable, &value); err != nil {
- dbt.Fatal(err.Error())
- }
-
- if (*value == nil) || (len(*value) == 0) {
- dbt.Fatalf("no Cipher")
- } else {
- dbt.Logf("Cipher: %s", *value)
- }
- }
- }
- tlsTestOpt := func(dbt *DBTest) {
- if err := dbt.db.Ping(); err != nil {
- dbt.Fatalf("error on Ping: %s", err.Error())
- }
- }
-
- runTests(t, dsn+"&tls=preferred", tlsTestOpt)
- runTests(t, dsn+"&tls=skip-verify", tlsTestReq)
-
- // Verify that registering / using a custom cfg works
- RegisterTLSConfig("custom-skip-verify", &tls.Config{
- InsecureSkipVerify: true,
- })
- runTests(t, dsn+"&tls=custom-skip-verify", tlsTestReq)
-}
-
-func TestReuseClosedConnection(t *testing.T) {
- // this test does not use sql.database, it uses the driver directly
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- md := &MySQLDriver{}
- conn, err := md.Open(dsn)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- stmt, err := conn.Prepare("DO 1")
- if err != nil {
- t.Fatalf("error preparing statement: %s", err.Error())
- }
- _, err = stmt.Exec(nil)
- if err != nil {
- t.Fatalf("error executing statement: %s", err.Error())
- }
- err = conn.Close()
- if err != nil {
- t.Fatalf("error closing connection: %s", err.Error())
- }
-
- defer func() {
- if err := recover(); err != nil {
- t.Errorf("panic after reusing a closed connection: %v", err)
- }
- }()
- _, err = stmt.Exec(nil)
- if err != nil && err != driver.ErrBadConn {
- t.Errorf("unexpected error '%s', expected '%s'",
- err.Error(), driver.ErrBadConn.Error())
- }
-}
-
-func TestCharset(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- mustSetCharset := func(charsetParam, expected string) {
- runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) {
- rows := dbt.mustQuery("SELECT @@character_set_connection")
- defer rows.Close()
-
- if !rows.Next() {
- dbt.Fatalf("error getting connection charset: %s", rows.Err())
- }
-
- var got string
- rows.Scan(&got)
-
- if got != expected {
- dbt.Fatalf("expected connection charset %s but got %s", expected, got)
- }
- })
- }
-
- // non utf8 test
- mustSetCharset("charset=ascii", "ascii")
-
- // when the first charset is invalid, use the second
- mustSetCharset("charset=none,utf8", "utf8")
-
- // when the first charset is valid, use it
- mustSetCharset("charset=ascii,utf8", "ascii")
- mustSetCharset("charset=utf8,ascii", "utf8")
-}
-
-func TestFailingCharset(t *testing.T) {
- runTests(t, dsn+"&charset=none", func(dbt *DBTest) {
- // run query to really establish connection...
- _, err := dbt.db.Exec("SELECT 1")
- if err == nil {
- dbt.db.Close()
- t.Fatalf("connection must not succeed without a valid charset")
- }
- })
-}
-
-func TestCollation(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- defaultCollation := "utf8mb4_general_ci"
- testCollations := []string{
- "", // do not set
- defaultCollation, // driver default
- "latin1_general_ci",
- "binary",
- "utf8_unicode_ci",
- "cp1257_bin",
- }
-
- for _, collation := range testCollations {
- var expected, tdsn string
- if collation != "" {
- tdsn = dsn + "&collation=" + collation
- expected = collation
- } else {
- tdsn = dsn
- expected = defaultCollation
- }
-
- runTests(t, tdsn, func(dbt *DBTest) {
- var got string
- if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil {
- dbt.Fatal(err)
- }
-
- if got != expected {
- dbt.Fatalf("expected connection collation %s but got %s", expected, got)
- }
- })
- }
-}
-
-func TestColumnsWithAlias(t *testing.T) {
- runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) {
- rows := dbt.mustQuery("SELECT 1 AS A")
- defer rows.Close()
- cols, _ := rows.Columns()
- if len(cols) != 1 {
- t.Fatalf("expected 1 column, got %d", len(cols))
- }
- if cols[0] != "A" {
- t.Fatalf("expected column name \"A\", got \"%s\"", cols[0])
- }
-
- rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A")
- defer rows.Close()
- cols, _ = rows.Columns()
- if len(cols) != 1 {
- t.Fatalf("expected 1 column, got %d", len(cols))
- }
- if cols[0] != "A.one" {
- t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0])
- }
- })
-}
-
-func TestRawBytesResultExceedsBuffer(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // defaultBufSize from buffer.go
- expected := strings.Repeat("abc", defaultBufSize)
-
- rows := dbt.mustQuery("SELECT '" + expected + "'")
- defer rows.Close()
- if !rows.Next() {
- dbt.Error("expected result, got none")
- }
- var result sql.RawBytes
- rows.Scan(&result)
- if expected != string(result) {
- dbt.Error("result did not match expected value")
- }
- })
-}
-
-func TestTimezoneConversion(t *testing.T) {
- zones := []string{"UTC", "US/Central", "US/Pacific", "Local"}
-
- // Regression test for timezone handling
- tzTest := func(dbt *DBTest) {
- // Create table
- dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)")
-
- // Insert local time into database (should be converted)
- usCentral, _ := time.LoadLocation("US/Central")
- reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral)
- dbt.mustExec("INSERT INTO test VALUE (?)", reftime)
-
- // Retrieve time from DB
- rows := dbt.mustQuery("SELECT ts FROM test")
- defer rows.Close()
- if !rows.Next() {
- dbt.Fatal("did not get any rows out")
- }
-
- var dbTime time.Time
- err := rows.Scan(&dbTime)
- if err != nil {
- dbt.Fatal("Err", err)
- }
-
- // Check that dates match
- if reftime.Unix() != dbTime.Unix() {
- dbt.Errorf("times do not match.\n")
- dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime)
- dbt.Errorf(" Now(UTC)=%v\n", dbTime)
- }
- }
-
- for _, tz := range zones {
- runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest)
- }
-}
-
-// Special cases
-
-func TestRowsClose(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- rows, err := dbt.db.Query("SELECT 1")
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows.Close()
- if err != nil {
- dbt.Fatal(err)
- }
-
- if rows.Next() {
- dbt.Fatal("unexpected row after rows.Close()")
- }
-
- err = rows.Err()
- if err != nil {
- dbt.Fatal(err)
- }
- })
-}
-
-// dangling statements
-// http://code.google.com/p/go/issues/detail?id=3865
-func TestCloseStmtBeforeRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- stmt, err := dbt.db.Prepare("SELECT 1")
- if err != nil {
- dbt.Fatal(err)
- }
-
- rows, err := stmt.Query()
- if err != nil {
- stmt.Close()
- dbt.Fatal(err)
- }
- defer rows.Close()
-
- err = stmt.Close()
- if err != nil {
- dbt.Fatal(err)
- }
-
- if !rows.Next() {
- dbt.Fatal("getting row failed")
- } else {
- err = rows.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- var out bool
- err = rows.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != true {
- dbt.Errorf("true != %t", out)
- }
- }
- })
-}
-
-// It is valid to have multiple Rows for the same Stmt
-// http://code.google.com/p/go/issues/detail?id=3734
-func TestStmtMultiRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0")
- if err != nil {
- dbt.Fatal(err)
- }
-
- rows1, err := stmt.Query()
- if err != nil {
- stmt.Close()
- dbt.Fatal(err)
- }
- defer rows1.Close()
-
- rows2, err := stmt.Query()
- if err != nil {
- stmt.Close()
- dbt.Fatal(err)
- }
- defer rows2.Close()
-
- var out bool
-
- // 1
- if !rows1.Next() {
- dbt.Fatal("first rows1.Next failed")
- } else {
- err = rows1.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows1.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != true {
- dbt.Errorf("true != %t", out)
- }
- }
-
- if !rows2.Next() {
- dbt.Fatal("first rows2.Next failed")
- } else {
- err = rows2.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows2.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != true {
- dbt.Errorf("true != %t", out)
- }
- }
-
- // 2
- if !rows1.Next() {
- dbt.Fatal("second rows1.Next failed")
- } else {
- err = rows1.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows1.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != false {
- dbt.Errorf("false != %t", out)
- }
-
- if rows1.Next() {
- dbt.Fatal("unexpected row on rows1")
- }
- err = rows1.Close()
- if err != nil {
- dbt.Fatal(err)
- }
- }
-
- if !rows2.Next() {
- dbt.Fatal("second rows2.Next failed")
- } else {
- err = rows2.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows2.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != false {
- dbt.Errorf("false != %t", out)
- }
-
- if rows2.Next() {
- dbt.Fatal("unexpected row on rows2")
- }
- err = rows2.Close()
- if err != nil {
- dbt.Fatal(err)
- }
- }
- })
-}
-
-// Regression test for
-// * more than 32 NULL parameters (issue 209)
-// * more parameters than fit into the buffer (issue 201)
-// * parameters * 64 > max_allowed_packet (issue 734)
-func TestPreparedManyCols(t *testing.T) {
- numParams := 65535
- runTests(t, dsn, func(dbt *DBTest) {
- query := "SELECT ?" + strings.Repeat(",?", numParams-1)
- stmt, err := dbt.db.Prepare(query)
- if err != nil {
- dbt.Fatal(err)
- }
- defer stmt.Close()
-
- // create more parameters than fit into the buffer
- // which will take nil-values
- params := make([]interface{}, numParams)
- rows, err := stmt.Query(params...)
- if err != nil {
- dbt.Fatal(err)
- }
- rows.Close()
-
- // Create 0byte string which we can't send via STMT_LONG_DATA.
- for i := 0; i < numParams; i++ {
- params[i] = ""
- }
- rows, err = stmt.Query(params...)
- if err != nil {
- dbt.Fatal(err)
- }
- rows.Close()
- })
-}
-
-func TestConcurrent(t *testing.T) {
- if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled {
- t.Skip("MYSQL_TEST_CONCURRENT env var not set")
- }
-
- runTests(t, dsn, func(dbt *DBTest) {
- var max int
- err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max)
- if err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- dbt.Logf("testing up to %d concurrent connections \r\n", max)
-
- var remaining, succeeded int32 = int32(max), 0
-
- var wg sync.WaitGroup
- wg.Add(max)
-
- var fatalError string
- var once sync.Once
- fatalf := func(s string, vals ...interface{}) {
- once.Do(func() {
- fatalError = fmt.Sprintf(s, vals...)
- })
- }
-
- for i := 0; i < max; i++ {
- go func(id int) {
- defer wg.Done()
-
- tx, err := dbt.db.Begin()
- atomic.AddInt32(&remaining, -1)
-
- if err != nil {
- if err.Error() != "Error 1040: Too many connections" {
- fatalf("error on conn %d: %s", id, err.Error())
- }
- return
- }
-
- // keep the connection busy until all connections are open
- for remaining > 0 {
- if _, err = tx.Exec("DO 1"); err != nil {
- fatalf("error on conn %d: %s", id, err.Error())
- return
- }
- }
-
- if err = tx.Commit(); err != nil {
- fatalf("error on conn %d: %s", id, err.Error())
- return
- }
-
- // everything went fine with this connection
- atomic.AddInt32(&succeeded, 1)
- }(i)
- }
-
- // wait until all conections are open
- wg.Wait()
-
- if fatalError != "" {
- dbt.Fatal(fatalError)
- }
-
- dbt.Logf("reached %d concurrent connections\r\n", succeeded)
- })
-}
-
-func testDialError(t *testing.T, dialErr error, expectErr error) {
- RegisterDialContext("mydial", func(ctx context.Context, addr string) (net.Conn, error) {
- return nil, dialErr
- })
-
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- _, err = db.Exec("DO 1")
- if err != expectErr {
- t.Fatalf("was expecting %s. Got: %s", dialErr, err)
- }
-}
-
-func TestDialUnknownError(t *testing.T) {
- testErr := fmt.Errorf("test")
- testDialError(t, testErr, testErr)
-}
-
-func TestDialNonRetryableNetErr(t *testing.T) {
- testErr := netErrorMock{}
- testDialError(t, testErr, testErr)
-}
-
-func TestDialTemporaryNetErr(t *testing.T) {
- testErr := netErrorMock{temporary: true}
- testDialError(t, testErr, testErr)
-}
-
-// Tests custom dial functions
-func TestCustomDial(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- // our custom dial function which justs wraps net.Dial here
- RegisterDialContext("mydial", func(ctx context.Context, addr string) (net.Conn, error) {
- var d net.Dialer
- return d.DialContext(ctx, prot, addr)
- })
-
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- if _, err = db.Exec("DO 1"); err != nil {
- t.Fatalf("connection failed: %s", err.Error())
- }
-}
-
-func TestSQLInjection(t *testing.T) {
- createTest := func(arg string) func(dbt *DBTest) {
- return func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- dbt.mustExec("INSERT INTO test VALUES (?)", 1)
-
- var v int
- // NULL can't be equal to anything, the idea here is to inject query so it returns row
- // This test verifies that escapeQuotes and escapeBackslash are working properly
- err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v)
- if err == sql.ErrNoRows {
- return // success, sql injection failed
- } else if err == nil {
- dbt.Errorf("sql injection successful with arg: %s", arg)
- } else {
- dbt.Errorf("error running query with arg: %s; err: %s", arg, err.Error())
- }
- }
- }
-
- dsns := []string{
- dsn,
- dsn + "&sql_mode='NO_BACKSLASH_ESCAPES'",
- }
- for _, testdsn := range dsns {
- runTests(t, testdsn, createTest("1 OR 1=1"))
- runTests(t, testdsn, createTest("' OR '1'='1"))
- }
-}
-
-// Test if inserted data is correctly retrieved after being escaped
-func TestInsertRetrieveEscapedData(t *testing.T) {
- testData := func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v VARCHAR(255))")
-
- // All sequences that are escaped by escapeQuotes and escapeBackslash
- v := "foo \x00\n\r\x1a\"'\\"
- dbt.mustExec("INSERT INTO test VALUES (?)", v)
-
- var out string
- err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out)
- if err != nil {
- dbt.Fatalf("%s", err.Error())
- }
-
- if out != v {
- dbt.Errorf("%q != %q", out, v)
- }
- }
-
- dsns := []string{
- dsn,
- dsn + "&sql_mode='NO_BACKSLASH_ESCAPES'",
- }
- for _, testdsn := range dsns {
- runTests(t, testdsn, testData)
- }
-}
-
-func TestUnixSocketAuthFail(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // Save the current logger so we can restore it.
- oldLogger := errLog
-
- // Set a new logger so we can capture its output.
- buffer := bytes.NewBuffer(make([]byte, 0, 64))
- newLogger := log.New(buffer, "prefix: ", 0)
- SetLogger(newLogger)
-
- // Restore the logger.
- defer SetLogger(oldLogger)
-
- // Make a new DSN that uses the MySQL socket file and a bad password, which
- // we can make by simply appending any character to the real password.
- badPass := pass + "x"
- socket := ""
- if prot == "unix" {
- socket = addr
- } else {
- // Get socket file from MySQL.
- err := dbt.db.QueryRow("SELECT @@socket").Scan(&socket)
- if err != nil {
- t.Fatalf("error on SELECT @@socket: %s", err.Error())
- }
- }
- t.Logf("socket: %s", socket)
- badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s", user, badPass, socket, dbname)
- db, err := sql.Open("mysql", badDSN)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- // Connect to MySQL for real. This will cause an auth failure.
- err = db.Ping()
- if err == nil {
- t.Error("expected Ping() to return an error")
- }
-
- // The driver should not log anything.
- if actual := buffer.String(); actual != "" {
- t.Errorf("expected no output, got %q", actual)
- }
- })
-}
-
-// See Issue #422
-func TestInterruptBySignal(t *testing.T) {
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- dbt.mustExec(`
- DROP PROCEDURE IF EXISTS test_signal;
- CREATE PROCEDURE test_signal(ret INT)
- BEGIN
- SELECT ret;
- SIGNAL SQLSTATE
- '45001'
- SET
- MESSAGE_TEXT = "an error",
- MYSQL_ERRNO = 45001;
- END
- `)
- defer dbt.mustExec("DROP PROCEDURE test_signal")
-
- var val int
-
- // text protocol
- rows, err := dbt.db.Query("CALL test_signal(42)")
- if err != nil {
- dbt.Fatalf("error on text query: %s", err.Error())
- }
- for rows.Next() {
- if err := rows.Scan(&val); err != nil {
- dbt.Error(err)
- } else if val != 42 {
- dbt.Errorf("expected val to be 42")
- }
- }
- rows.Close()
-
- // binary protocol
- rows, err = dbt.db.Query("CALL test_signal(?)", 42)
- if err != nil {
- dbt.Fatalf("error on binary query: %s", err.Error())
- }
- for rows.Next() {
- if err := rows.Scan(&val); err != nil {
- dbt.Error(err)
- } else if val != 42 {
- dbt.Errorf("expected val to be 42")
- }
- }
- rows.Close()
- })
-}
-
-func TestColumnsReusesSlice(t *testing.T) {
- rows := mysqlRows{
- rs: resultSet{
- columns: []mysqlField{
- {
- tableName: "test",
- name: "A",
- },
- {
- tableName: "test",
- name: "B",
- },
- },
- },
- }
-
- allocs := testing.AllocsPerRun(1, func() {
- cols := rows.Columns()
-
- if len(cols) != 2 {
- t.Fatalf("expected 2 columns, got %d", len(cols))
- }
- })
-
- if allocs != 0 {
- t.Fatalf("expected 0 allocations, got %d", int(allocs))
- }
-
- if rows.rs.columnNames == nil {
- t.Fatalf("expected columnNames to be set, got nil")
- }
-}
-
-func TestRejectReadOnly(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // Create Table
- dbt.mustExec("CREATE TABLE test (value BOOL)")
- // Set the session to read-only. We didn't set the `rejectReadOnly`
- // option, so any writes after this should fail.
- _, err := dbt.db.Exec("SET SESSION TRANSACTION READ ONLY")
- // Error 1193: Unknown system variable 'TRANSACTION' => skip test,
- // MySQL server version is too old
- maybeSkip(t, err, 1193)
- if _, err := dbt.db.Exec("DROP TABLE test"); err == nil {
- t.Fatalf("writing to DB in read-only session without " +
- "rejectReadOnly did not error")
- }
- // Set the session back to read-write so runTests() can properly clean
- // up the table `test`.
- dbt.mustExec("SET SESSION TRANSACTION READ WRITE")
- })
-
- // Enable the `rejectReadOnly` option.
- runTests(t, dsn+"&rejectReadOnly=true", func(dbt *DBTest) {
- // Create Table
- dbt.mustExec("CREATE TABLE test (value BOOL)")
- // Set the session to read only. Any writes after this should error on
- // a driver.ErrBadConn, and cause `database/sql` to initiate a new
- // connection.
- dbt.mustExec("SET SESSION TRANSACTION READ ONLY")
- // This would error, but `database/sql` should automatically retry on a
- // new connection which is not read-only, and eventually succeed.
- dbt.mustExec("DROP TABLE test")
- })
-}
-
-func TestPing(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- if err := dbt.db.Ping(); err != nil {
- dbt.fail("Ping", "Ping", err)
- }
- })
-}
-
-// See Issue #799
-func TestEmptyPassword(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- dsn := fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, "", netAddr, dbname)
- db, err := sql.Open("mysql", dsn)
- if err == nil {
- defer db.Close()
- err = db.Ping()
- }
-
- if pass == "" {
- if err != nil {
- t.Fatal(err.Error())
- }
- } else {
- if err == nil {
- t.Fatal("expected authentication error")
- }
- if !strings.HasPrefix(err.Error(), "Error 1045") {
- t.Fatal(err.Error())
- }
- }
-}
-
-// static interface implementation checks of mysqlConn
-var (
- _ driver.ConnBeginTx = &mysqlConn{}
- _ driver.ConnPrepareContext = &mysqlConn{}
- _ driver.ExecerContext = &mysqlConn{}
- _ driver.Pinger = &mysqlConn{}
- _ driver.QueryerContext = &mysqlConn{}
-)
-
-// static interface implementation checks of mysqlStmt
-var (
- _ driver.StmtExecContext = &mysqlStmt{}
- _ driver.StmtQueryContext = &mysqlStmt{}
-)
-
-// Ensure that all the driver interfaces are implemented
-var (
- // _ driver.RowsColumnTypeLength = &binaryRows{}
- // _ driver.RowsColumnTypeLength = &textRows{}
- _ driver.RowsColumnTypeDatabaseTypeName = &binaryRows{}
- _ driver.RowsColumnTypeDatabaseTypeName = &textRows{}
- _ driver.RowsColumnTypeNullable = &binaryRows{}
- _ driver.RowsColumnTypeNullable = &textRows{}
- _ driver.RowsColumnTypePrecisionScale = &binaryRows{}
- _ driver.RowsColumnTypePrecisionScale = &textRows{}
- _ driver.RowsColumnTypeScanType = &binaryRows{}
- _ driver.RowsColumnTypeScanType = &textRows{}
- _ driver.RowsNextResultSet = &binaryRows{}
- _ driver.RowsNextResultSet = &textRows{}
-)
-
-func TestMultiResultSet(t *testing.T) {
- type result struct {
- values [][]int
- columns []string
- }
-
- // checkRows is a helper test function to validate rows containing 3 result
- // sets with specific values and columns. The basic query would look like this:
- //
- // SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
- // SELECT 0 UNION SELECT 1;
- // SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
- //
- // to distinguish test cases the first string argument is put in front of
- // every error or fatal message.
- checkRows := func(desc string, rows *sql.Rows, dbt *DBTest) {
- expected := []result{
- {
- values: [][]int{{1, 2}, {3, 4}},
- columns: []string{"col1", "col2"},
- },
- {
- values: [][]int{{1, 2, 3}, {4, 5, 6}},
- columns: []string{"col1", "col2", "col3"},
- },
- }
-
- var res1 result
- for rows.Next() {
- var res [2]int
- if err := rows.Scan(&res[0], &res[1]); err != nil {
- dbt.Fatal(err)
- }
- res1.values = append(res1.values, res[:])
- }
-
- cols, err := rows.Columns()
- if err != nil {
- dbt.Fatal(desc, err)
- }
- res1.columns = cols
-
- if !reflect.DeepEqual(expected[0], res1) {
- dbt.Error(desc, "want =", expected[0], "got =", res1)
- }
-
- if !rows.NextResultSet() {
- dbt.Fatal(desc, "expected next result set")
- }
-
- // ignoring one result set
-
- if !rows.NextResultSet() {
- dbt.Fatal(desc, "expected next result set")
- }
-
- var res2 result
- cols, err = rows.Columns()
- if err != nil {
- dbt.Fatal(desc, err)
- }
- res2.columns = cols
-
- for rows.Next() {
- var res [3]int
- if err := rows.Scan(&res[0], &res[1], &res[2]); err != nil {
- dbt.Fatal(desc, err)
- }
- res2.values = append(res2.values, res[:])
- }
-
- if !reflect.DeepEqual(expected[1], res2) {
- dbt.Error(desc, "want =", expected[1], "got =", res2)
- }
-
- if rows.NextResultSet() {
- dbt.Error(desc, "unexpected next result set")
- }
-
- if err := rows.Err(); err != nil {
- dbt.Error(desc, err)
- }
- }
-
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- rows := dbt.mustQuery(`DO 1;
- SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
- DO 1;
- SELECT 0 UNION SELECT 1;
- SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;`)
- defer rows.Close()
- checkRows("query: ", rows, dbt)
- })
-
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- queries := []string{
- `
- DROP PROCEDURE IF EXISTS test_mrss;
- CREATE PROCEDURE test_mrss()
- BEGIN
- DO 1;
- SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
- DO 1;
- SELECT 0 UNION SELECT 1;
- SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
- END
- `,
- `
- DROP PROCEDURE IF EXISTS test_mrss;
- CREATE PROCEDURE test_mrss()
- BEGIN
- SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
- SELECT 0 UNION SELECT 1;
- SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
- END
- `,
- }
-
- defer dbt.mustExec("DROP PROCEDURE IF EXISTS test_mrss")
-
- for i, query := range queries {
- dbt.mustExec(query)
-
- stmt, err := dbt.db.Prepare("CALL test_mrss()")
- if err != nil {
- dbt.Fatalf("%v (i=%d)", err, i)
- }
- defer stmt.Close()
-
- for j := 0; j < 2; j++ {
- rows, err := stmt.Query()
- if err != nil {
- dbt.Fatalf("%v (i=%d) (j=%d)", err, i, j)
- }
- checkRows(fmt.Sprintf("prepared stmt query (i=%d) (j=%d): ", i, j), rows, dbt)
- }
- }
- })
-}
-
-func TestMultiResultSetNoSelect(t *testing.T) {
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- rows := dbt.mustQuery("DO 1; DO 2;")
- defer rows.Close()
-
- if rows.Next() {
- dbt.Error("unexpected row")
- }
-
- if rows.NextResultSet() {
- dbt.Error("unexpected next result set")
- }
-
- if err := rows.Err(); err != nil {
- dbt.Error("expected nil; got ", err)
- }
- })
-}
-
-// tests if rows are set in a proper state if some results were ignored before
-// calling rows.NextResultSet.
-func TestSkipResults(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- rows := dbt.mustQuery("SELECT 1, 2")
- defer rows.Close()
-
- if !rows.Next() {
- dbt.Error("expected row")
- }
-
- if rows.NextResultSet() {
- dbt.Error("unexpected next result set")
- }
-
- if err := rows.Err(); err != nil {
- dbt.Error("expected nil; got ", err)
- }
- })
-}
-
-func TestPingContext(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
- if err := dbt.db.PingContext(ctx); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- })
-}
-
-func TestContextCancelExec(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Wait for the INSERT query to be done.
- time.Sleep(time.Second)
-
- // Check how many times the query is executed.
- var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 { // TODO: need to kill the query, and v should be 0.
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
-
- // Context is already canceled, so error should come before execution.
- if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (1)"); err == nil {
- dbt.Error("expected error")
- } else if err.Error() != "context canceled" {
- dbt.Fatalf("unexpected error: %s", err)
- }
-
- // The second insert query will fail, so the table has no changes.
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 {
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
- })
-}
-
-func TestContextCancelQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Wait for the INSERT query to be done.
- time.Sleep(time.Second)
-
- // Check how many times the query is executed.
- var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 { // TODO: need to kill the query, and v should be 0.
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
-
- // Context is already canceled, so error should come before execution.
- if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (1)"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
-
- // The second insert query will fail, so the table has no changes.
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 {
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
- })
-}
-
-func TestContextCancelQueryRow(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- dbt.mustExec("INSERT INTO test VALUES (1), (2), (3)")
- ctx, cancel := context.WithCancel(context.Background())
-
- rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM test")
- if err != nil {
- dbt.Fatalf("%s", err.Error())
- }
-
- // the first row will be succeed.
- var v int
- if !rows.Next() {
- dbt.Fatalf("unexpected end")
- }
- if err := rows.Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
-
- cancel()
- // make sure the driver receives the cancel request.
- time.Sleep(100 * time.Millisecond)
-
- if rows.Next() {
- dbt.Errorf("expected end, but not")
- }
- if err := rows.Err(); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- })
-}
-
-func TestContextCancelPrepare(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
- if _, err := dbt.db.PrepareContext(ctx, "SELECT 1"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- })
-}
-
-func TestContextCancelStmtExec(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
- if err != nil {
- dbt.Fatalf("unexpected error: %v", err)
- }
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := stmt.ExecContext(ctx); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Wait for the INSERT query to be done.
- time.Sleep(time.Second)
-
- // Check how many times the query is executed.
- var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 { // TODO: need to kill the query, and v should be 0.
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
- })
-}
-
-func TestContextCancelStmtQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
- if err != nil {
- dbt.Fatalf("unexpected error: %v", err)
- }
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := stmt.QueryContext(ctx); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Wait for the INSERT query has done.
- time.Sleep(time.Second)
-
- // Check how many times the query is executed.
- var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 { // TODO: need to kill the query, and v should be 0.
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
- })
-}
-
-func TestContextCancelBegin(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- tx, err := dbt.db.BeginTx(ctx, nil)
- if err != nil {
- dbt.Fatal(err)
- }
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := tx.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Transaction is canceled, so expect an error.
- switch err := tx.Commit(); err {
- case sql.ErrTxDone:
- // because the transaction has already been rollbacked.
- // the database/sql package watches ctx
- // and rollbacks when ctx is canceled.
- case context.Canceled:
- // the database/sql package rollbacks on another goroutine,
- // so the transaction may not be rollbacked depending on goroutine scheduling.
- default:
- dbt.Errorf("expected sql.ErrTxDone or context.Canceled, got %v", err)
- }
-
- // Context is canceled, so cannot begin a transaction.
- if _, err := dbt.db.BeginTx(ctx, nil); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- })
-}
-
-func TestContextBeginIsolationLevel(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- tx1, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
- Isolation: sql.LevelRepeatableRead,
- })
- if err != nil {
- dbt.Fatal(err)
- }
-
- tx2, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
- Isolation: sql.LevelReadCommitted,
- })
- if err != nil {
- dbt.Fatal(err)
- }
-
- _, err = tx1.ExecContext(ctx, "INSERT INTO test VALUES (1)")
- if err != nil {
- dbt.Fatal(err)
- }
-
- var v int
- row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
- if err := row.Scan(&v); err != nil {
- dbt.Fatal(err)
- }
- // Because writer transaction wasn't commited yet, it should be available
- if v != 0 {
- dbt.Errorf("expected val to be 0, got %d", v)
- }
-
- err = tx1.Commit()
- if err != nil {
- dbt.Fatal(err)
- }
-
- row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
- if err := row.Scan(&v); err != nil {
- dbt.Fatal(err)
- }
- // Data written by writer transaction is already commited, it should be selectable
- if v != 1 {
- dbt.Errorf("expected val to be 1, got %d", v)
- }
- tx2.Commit()
- })
-}
-
-func TestContextBeginReadOnly(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- tx, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
- ReadOnly: true,
- })
- if _, ok := err.(*MySQLError); ok {
- dbt.Skip("It seems that your MySQL does not support READ ONLY transactions")
- return
- } else if err != nil {
- dbt.Fatal(err)
- }
-
- // INSERT queries fail in a READ ONLY transaction.
- _, err = tx.ExecContext(ctx, "INSERT INTO test VALUES (1)")
- if _, ok := err.(*MySQLError); !ok {
- dbt.Errorf("expected MySQLError, got %v", err)
- }
-
- // SELECT queries can be executed.
- var v int
- row := tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
- if err := row.Scan(&v); err != nil {
- dbt.Fatal(err)
- }
- if v != 0 {
- dbt.Errorf("expected val to be 0, got %d", v)
- }
-
- if err := tx.Commit(); err != nil {
- dbt.Fatal(err)
- }
- })
-}
-
-func TestRowsColumnTypes(t *testing.T) {
- niNULL := sql.NullInt64{Int64: 0, Valid: false}
- ni0 := sql.NullInt64{Int64: 0, Valid: true}
- ni1 := sql.NullInt64{Int64: 1, Valid: true}
- ni42 := sql.NullInt64{Int64: 42, Valid: true}
- nfNULL := sql.NullFloat64{Float64: 0.0, Valid: false}
- nf0 := sql.NullFloat64{Float64: 0.0, Valid: true}
- nf1337 := sql.NullFloat64{Float64: 13.37, Valid: true}
- nt0 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC), Valid: true}
- nt1 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 100000000, time.UTC), Valid: true}
- nt2 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 110000000, time.UTC), Valid: true}
- nt6 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 111111000, time.UTC), Valid: true}
- nd1 := NullTime{Time: time.Date(2006, 01, 02, 0, 0, 0, 0, time.UTC), Valid: true}
- nd2 := NullTime{Time: time.Date(2006, 03, 04, 0, 0, 0, 0, time.UTC), Valid: true}
- ndNULL := NullTime{Time: time.Time{}, Valid: false}
- rbNULL := sql.RawBytes(nil)
- rb0 := sql.RawBytes("0")
- rb42 := sql.RawBytes("42")
- rbTest := sql.RawBytes("Test")
- rb0pad4 := sql.RawBytes("0\x00\x00\x00") // BINARY right-pads values with 0x00
- rbx0 := sql.RawBytes("\x00")
- rbx42 := sql.RawBytes("\x42")
-
- var columns = []struct {
- name string
- fieldType string // type used when creating table schema
- databaseTypeName string // actual type used by MySQL
- scanType reflect.Type
- nullable bool
- precision int64 // 0 if not ok
- scale int64
- valuesIn [3]string
- valuesOut [3]interface{}
- }{
- {"bit8null", "BIT(8)", "BIT", scanTypeRawBytes, true, 0, 0, [3]string{"0x0", "NULL", "0x42"}, [3]interface{}{rbx0, rbNULL, rbx42}},
- {"boolnull", "BOOL", "TINYINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "true", "0"}, [3]interface{}{niNULL, ni1, ni0}},
- {"bool", "BOOL NOT NULL", "TINYINT", scanTypeInt8, false, 0, 0, [3]string{"1", "0", "FALSE"}, [3]interface{}{int8(1), int8(0), int8(0)}},
- {"intnull", "INTEGER", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"smallint", "SMALLINT NOT NULL", "SMALLINT", scanTypeInt16, false, 0, 0, [3]string{"0", "-32768", "32767"}, [3]interface{}{int16(0), int16(-32768), int16(32767)}},
- {"smallintnull", "SMALLINT", "SMALLINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"int3null", "INT(3)", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"int7", "INT(7) NOT NULL", "INT", scanTypeInt32, false, 0, 0, [3]string{"0", "-1337", "42"}, [3]interface{}{int32(0), int32(-1337), int32(42)}},
- {"mediumintnull", "MEDIUMINT", "MEDIUMINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "42", "NULL"}, [3]interface{}{ni0, ni42, niNULL}},
- {"bigint", "BIGINT NOT NULL", "BIGINT", scanTypeInt64, false, 0, 0, [3]string{"0", "65535", "-42"}, [3]interface{}{int64(0), int64(65535), int64(-42)}},
- {"bigintnull", "BIGINT", "BIGINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "1", "42"}, [3]interface{}{niNULL, ni1, ni42}},
- {"tinyuint", "TINYINT UNSIGNED NOT NULL", "TINYINT", scanTypeUint8, false, 0, 0, [3]string{"0", "255", "42"}, [3]interface{}{uint8(0), uint8(255), uint8(42)}},
- {"smalluint", "SMALLINT UNSIGNED NOT NULL", "SMALLINT", scanTypeUint16, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint16(0), uint16(65535), uint16(42)}},
- {"biguint", "BIGINT UNSIGNED NOT NULL", "BIGINT", scanTypeUint64, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint64(0), uint64(65535), uint64(42)}},
- {"uint13", "INT(13) UNSIGNED NOT NULL", "INT", scanTypeUint32, false, 0, 0, [3]string{"0", "1337", "42"}, [3]interface{}{uint32(0), uint32(1337), uint32(42)}},
- {"float", "FLOAT NOT NULL", "FLOAT", scanTypeFloat32, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float32(0), float32(42), float32(13.37)}},
- {"floatnull", "FLOAT", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"float74null", "FLOAT(7,4)", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, 4, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"double", "DOUBLE NOT NULL", "DOUBLE", scanTypeFloat64, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float64(0), float64(42), float64(13.37)}},
- {"doublenull", "DOUBLE", "DOUBLE", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"decimal1", "DECIMAL(10,6) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 10, 6, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), sql.RawBytes("13.370000"), sql.RawBytes("1234.123456")}},
- {"decimal1null", "DECIMAL(10,6)", "DECIMAL", scanTypeRawBytes, true, 10, 6, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), rbNULL, sql.RawBytes("1234.123456")}},
- {"decimal2", "DECIMAL(8,4) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 8, 4, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), sql.RawBytes("13.3700"), sql.RawBytes("1234.1235")}},
- {"decimal2null", "DECIMAL(8,4)", "DECIMAL", scanTypeRawBytes, true, 8, 4, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), rbNULL, sql.RawBytes("1234.1235")}},
- {"decimal3", "DECIMAL(5,0) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 5, 0, [3]string{"0", "13.37", "-12345.123456"}, [3]interface{}{rb0, sql.RawBytes("13"), sql.RawBytes("-12345")}},
- {"decimal3null", "DECIMAL(5,0)", "DECIMAL", scanTypeRawBytes, true, 5, 0, [3]string{"0", "NULL", "-12345.123456"}, [3]interface{}{rb0, rbNULL, sql.RawBytes("-12345")}},
- {"char25null", "CHAR(25)", "CHAR", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"varchar42", "VARCHAR(42) NOT NULL", "VARCHAR", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"binary4null", "BINARY(4)", "BINARY", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0pad4, rbNULL, rbTest}},
- {"varbinary42", "VARBINARY(42) NOT NULL", "VARBINARY", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"tinyblobnull", "TINYBLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"tinytextnull", "TINYTEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"blobnull", "BLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"textnull", "TEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"mediumblob", "MEDIUMBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"mediumtext", "MEDIUMTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"longblob", "LONGBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"longtext", "LONGTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"datetime", "DATETIME", "DATETIME", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt0, nt0}},
- {"datetime2", "DATETIME(2)", "DATETIME", scanTypeNullTime, true, 2, 2, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt2}},
- {"datetime6", "DATETIME(6)", "DATETIME", scanTypeNullTime, true, 6, 6, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt6}},
- {"date", "DATE", "DATE", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02'", "NULL", "'2006-03-04'"}, [3]interface{}{nd1, ndNULL, nd2}},
- {"year", "YEAR NOT NULL", "YEAR", scanTypeUint16, false, 0, 0, [3]string{"2006", "2000", "1994"}, [3]interface{}{uint16(2006), uint16(2000), uint16(1994)}},
- }
-
- schema := ""
- values1 := ""
- values2 := ""
- values3 := ""
- for _, column := range columns {
- schema += fmt.Sprintf("`%s` %s, ", column.name, column.fieldType)
- values1 += column.valuesIn[0] + ", "
- values2 += column.valuesIn[1] + ", "
- values3 += column.valuesIn[2] + ", "
- }
- schema = schema[:len(schema)-2]
- values1 = values1[:len(values1)-2]
- values2 = values2[:len(values2)-2]
- values3 = values3[:len(values3)-2]
-
- dsns := []string{
- dsn + "&parseTime=true",
- dsn + "&parseTime=false",
- }
- for _, testdsn := range dsns {
- runTests(t, testdsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (" + schema + ")")
- dbt.mustExec("INSERT INTO test VALUES (" + values1 + "), (" + values2 + "), (" + values3 + ")")
-
- rows, err := dbt.db.Query("SELECT * FROM test")
- if err != nil {
- t.Fatalf("Query: %v", err)
- }
-
- tt, err := rows.ColumnTypes()
- if err != nil {
- t.Fatalf("ColumnTypes: %v", err)
- }
-
- if len(tt) != len(columns) {
- t.Fatalf("unexpected number of columns: expected %d, got %d", len(columns), len(tt))
- }
-
- types := make([]reflect.Type, len(tt))
- for i, tp := range tt {
- column := columns[i]
-
- // Name
- name := tp.Name()
- if name != column.name {
- t.Errorf("column name mismatch %s != %s", name, column.name)
- continue
- }
-
- // DatabaseTypeName
- databaseTypeName := tp.DatabaseTypeName()
- if databaseTypeName != column.databaseTypeName {
- t.Errorf("databasetypename name mismatch for column %q: %s != %s", name, databaseTypeName, column.databaseTypeName)
- continue
- }
-
- // ScanType
- scanType := tp.ScanType()
- if scanType != column.scanType {
- if scanType == nil {
- t.Errorf("scantype is null for column %q", name)
- } else {
- t.Errorf("scantype mismatch for column %q: %s != %s", name, scanType.Name(), column.scanType.Name())
- }
- continue
- }
- types[i] = scanType
-
- // Nullable
- nullable, ok := tp.Nullable()
- if !ok {
- t.Errorf("nullable not ok %q", name)
- continue
- }
- if nullable != column.nullable {
- t.Errorf("nullable mismatch for column %q: %t != %t", name, nullable, column.nullable)
- }
-
- // Length
- // length, ok := tp.Length()
- // if length != column.length {
- // if !ok {
- // t.Errorf("length not ok for column %q", name)
- // } else {
- // t.Errorf("length mismatch for column %q: %d != %d", name, length, column.length)
- // }
- // continue
- // }
-
- // Precision and Scale
- precision, scale, ok := tp.DecimalSize()
- if precision != column.precision {
- if !ok {
- t.Errorf("precision not ok for column %q", name)
- } else {
- t.Errorf("precision mismatch for column %q: %d != %d", name, precision, column.precision)
- }
- continue
- }
- if scale != column.scale {
- if !ok {
- t.Errorf("scale not ok for column %q", name)
- } else {
- t.Errorf("scale mismatch for column %q: %d != %d", name, scale, column.scale)
- }
- continue
- }
- }
-
- values := make([]interface{}, len(tt))
- for i := range values {
- values[i] = reflect.New(types[i]).Interface()
- }
- i := 0
- for rows.Next() {
- err = rows.Scan(values...)
- if err != nil {
- t.Fatalf("failed to scan values in %v", err)
- }
- for j := range values {
- value := reflect.ValueOf(values[j]).Elem().Interface()
- if !reflect.DeepEqual(value, columns[j].valuesOut[i]) {
- if columns[j].scanType == scanTypeRawBytes {
- t.Errorf("row %d, column %d: %v != %v", i, j, string(value.(sql.RawBytes)), string(columns[j].valuesOut[i].(sql.RawBytes)))
- } else {
- t.Errorf("row %d, column %d: %v != %v", i, j, value, columns[j].valuesOut[i])
- }
- }
- }
- i++
- }
- if i != 3 {
- t.Errorf("expected 3 rows, got %d", i)
- }
-
- if err := rows.Close(); err != nil {
- t.Errorf("error closing rows: %s", err)
- }
- })
- }
-}
-
-func TestValuerWithValueReceiverGivenNilValue(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (value VARCHAR(255))")
- dbt.db.Exec("INSERT INTO test VALUES (?)", (*testValuer)(nil))
- // This test will panic on the INSERT if ConvertValue() does not check for typed nil before calling Value()
- })
-}
-
-// TestRawBytesAreNotModified checks for a race condition that arises when a query context
-// is canceled while a user is calling rows.Scan. This is a more stringent test than the one
-// proposed in https://github.com/golang/go/issues/23519. Here we're explicitly using
-// `sql.RawBytes` to check the contents of our internal buffers are not modified after an implicit
-// call to `Rows.Close`, so Context cancellation should **not** invalidate the backing buffers.
-func TestRawBytesAreNotModified(t *testing.T) {
- const blob = "abcdefghijklmnop"
- const contextRaceIterations = 20
- const blobSize = defaultBufSize * 3 / 4 // Second row overwrites first row.
- const insertRows = 4
-
- var sqlBlobs = [2]string{
- strings.Repeat(blob, blobSize/len(blob)),
- strings.Repeat(strings.ToUpper(blob), blobSize/len(blob)),
- }
-
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
- for i := 0; i < insertRows; i++ {
- dbt.mustExec("INSERT INTO test VALUES (?, ?)", i+1, sqlBlobs[i&1])
- }
-
- for i := 0; i < contextRaceIterations; i++ {
- func() {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- rows, err := dbt.db.QueryContext(ctx, `SELECT id, value FROM test`)
- if err != nil {
- t.Fatal(err)
- }
-
- var b int
- var raw sql.RawBytes
- for rows.Next() {
- if err := rows.Scan(&b, &raw); err != nil {
- t.Fatal(err)
- }
-
- before := string(raw)
- // Ensure cancelling the query does not corrupt the contents of `raw`
- cancel()
- time.Sleep(time.Microsecond * 100)
- after := string(raw)
-
- if before != after {
- t.Fatalf("the backing storage for sql.RawBytes has been modified (i=%v)", i)
- }
- }
- rows.Close()
- }()
- }
- })
-}
-
-var _ driver.DriverContext = &MySQLDriver{}
-
-type dialCtxKey struct{}
-
-func TestConnectorObeysDialTimeouts(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- RegisterDialContext("dialctxtest", func(ctx context.Context, addr string) (net.Conn, error) {
- var d net.Dialer
- if !ctx.Value(dialCtxKey{}).(bool) {
- return nil, fmt.Errorf("test error: query context is not propagated to our dialer")
- }
- return d.DialContext(ctx, prot, addr)
- })
-
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@dialctxtest(%s)/%s?timeout=30s", user, pass, addr, dbname))
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- ctx := context.WithValue(context.Background(), dialCtxKey{}, true)
-
- _, err = db.ExecContext(ctx, "DO 1")
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func configForTests(t *testing.T) *Config {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- mycnf := NewConfig()
- mycnf.User = user
- mycnf.Passwd = pass
- mycnf.Addr = addr
- mycnf.Net = prot
- mycnf.DBName = dbname
- return mycnf
-}
-
-func TestNewConnector(t *testing.T) {
- mycnf := configForTests(t)
- conn, err := NewConnector(mycnf)
- if err != nil {
- t.Fatal(err)
- }
-
- db := sql.OpenDB(conn)
- defer db.Close()
-
- if err := db.Ping(); err != nil {
- t.Fatal(err)
- }
-}
-
-type slowConnection struct {
- net.Conn
- slowdown time.Duration
-}
-
-func (sc *slowConnection) Read(b []byte) (int, error) {
- time.Sleep(sc.slowdown)
- return sc.Conn.Read(b)
-}
-
-type connectorHijack struct {
- driver.Connector
- connErr error
-}
-
-func (cw *connectorHijack) Connect(ctx context.Context) (driver.Conn, error) {
- var conn driver.Conn
- conn, cw.connErr = cw.Connector.Connect(ctx)
- return conn, cw.connErr
-}
-
-func TestConnectorTimeoutsDuringOpen(t *testing.T) {
- RegisterDialContext("slowconn", func(ctx context.Context, addr string) (net.Conn, error) {
- var d net.Dialer
- conn, err := d.DialContext(ctx, prot, addr)
- if err != nil {
- return nil, err
- }
- return &slowConnection{Conn: conn, slowdown: 100 * time.Millisecond}, nil
- })
-
- mycnf := configForTests(t)
- mycnf.Net = "slowconn"
-
- conn, err := NewConnector(mycnf)
- if err != nil {
- t.Fatal(err)
- }
-
- hijack := &connectorHijack{Connector: conn}
-
- db := sql.OpenDB(hijack)
- defer db.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
- defer cancel()
-
- _, err = db.ExecContext(ctx, "DO 1")
- if err != context.DeadlineExceeded {
- t.Fatalf("ExecContext should have timed out")
- }
- if hijack.connErr != context.DeadlineExceeded {
- t.Fatalf("(*Connector).Connect should have timed out")
- }
-}
-
-// A connection which can only be closed.
-type dummyConnection struct {
- net.Conn
- closed bool
-}
-
-func (d *dummyConnection) Close() error {
- d.closed = true
- return nil
-}
-
-func TestConnectorTimeoutsWatchCancel(t *testing.T) {
- var (
- cancel func() // Used to cancel the context just after connecting.
- created *dummyConnection // The created connection.
- )
-
- RegisterDialContext("TestConnectorTimeoutsWatchCancel", func(ctx context.Context, addr string) (net.Conn, error) {
- // Canceling at this time triggers the watchCancel error branch in Connect().
- cancel()
- created = &dummyConnection{}
- return created, nil
- })
-
- mycnf := NewConfig()
- mycnf.User = "root"
- mycnf.Addr = "foo"
- mycnf.Net = "TestConnectorTimeoutsWatchCancel"
-
- conn, err := NewConnector(mycnf)
- if err != nil {
- t.Fatal(err)
- }
-
- db := sql.OpenDB(conn)
- defer db.Close()
-
- var ctx context.Context
- ctx, cancel = context.WithCancel(context.Background())
- defer cancel()
-
- if _, err := db.Conn(ctx); err != context.Canceled {
- t.Errorf("got %v, want context.Canceled", err)
- }
-
- if created == nil {
- t.Fatal("no connection created")
- }
- if !created.closed {
- t.Errorf("connection not closed")
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
index 75c8c24..93f3548 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -375,7 +375,7 @@ func parseDSNParams(cfg *Config, params string) (err error) {
// cfg params
switch value := param[1]; param[0] {
- // Disable INFILE whitelist / enable all files
+ // Disable INFILE allowlist / enable all files
case "allowAllFiles":
var isBool bool
cfg.AllowAllFiles, isBool = readBool(value)
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn_test.go b/vendor/github.com/go-sql-driver/mysql/dsn_test.go
deleted file mode 100644
index 89815b3..0000000
--- a/vendor/github.com/go-sql-driver/mysql/dsn_test.go
+++ /dev/null
@@ -1,415 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "crypto/tls"
- "fmt"
- "net/url"
- "reflect"
- "testing"
- "time"
-)
-
-var testDSNs = []struct {
- in string
- out *Config
-}{{
- "username:password@protocol(address)/dbname?param=value",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true},
-}, {
- "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, ColumnsWithAlias: true, MultiStatements: true},
-}, {
- "user@unix(/path/to/socket)/dbname?charset=utf8",
- &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "true"},
-}, {
- "user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true, TLSConfig: "skip-verify"},
-}, {
- "user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216&tls=false&allowCleartextPasswords=true&parseTime=true&rejectReadOnly=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, CheckConnLiveness: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
-}, {
- "user:password@/dbname?allowNativePasswords=false&checkConnLiveness=false&maxAllowedPacket=0",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowNativePasswords: false, CheckConnLiveness: false},
-}, {
- "user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
- &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "/dbname",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "@/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "user:p@/ssword@/",
- &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "unix/?arg=%2Fsome%2Fpath.ext",
- &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "tcp(127.0.0.1)/dbname",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-}, {
- "tcp(de:ad:be:ef::ca:fe)/dbname",
- &Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, CheckConnLiveness: true},
-},
-}
-
-func TestDSNParser(t *testing.T) {
- for i, tst := range testDSNs {
- cfg, err := ParseDSN(tst.in)
- if err != nil {
- t.Error(err.Error())
- }
-
- // pointer not static
- cfg.tls = nil
-
- if !reflect.DeepEqual(cfg, tst.out) {
- t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
- }
- }
-}
-
-func TestDSNParserInvalid(t *testing.T) {
- var invalidDSNs = []string{
- "@net(addr/", // no closing brace
- "@tcp(/", // no closing brace
- "tcp(/", // no closing brace
- "(/", // no closing brace
- "net(addr)//", // unescaped
- "User:pass@tcp(1.2.3.4:3306)", // no trailing slash
- "net()/", // unknown default addr
- //"/dbname?arg=/some/unescaped/path",
- }
-
- for i, tst := range invalidDSNs {
- if _, err := ParseDSN(tst); err == nil {
- t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
- }
- }
-}
-
-func TestDSNReformat(t *testing.T) {
- for i, tst := range testDSNs {
- dsn1 := tst.in
- cfg1, err := ParseDSN(dsn1)
- if err != nil {
- t.Error(err.Error())
- continue
- }
- cfg1.tls = nil // pointer not static
- res1 := fmt.Sprintf("%+v", cfg1)
-
- dsn2 := cfg1.FormatDSN()
- cfg2, err := ParseDSN(dsn2)
- if err != nil {
- t.Error(err.Error())
- continue
- }
- cfg2.tls = nil // pointer not static
- res2 := fmt.Sprintf("%+v", cfg2)
-
- if res1 != res2 {
- t.Errorf("%d. %q does not match %q", i, res2, res1)
- }
- }
-}
-
-func TestDSNServerPubKey(t *testing.T) {
- baseDSN := "User:password@tcp(localhost:5555)/dbname?serverPubKey="
-
- RegisterServerPubKey("testKey", testPubKeyRSA)
- defer DeregisterServerPubKey("testKey")
-
- tst := baseDSN + "testKey"
- cfg, err := ParseDSN(tst)
- if err != nil {
- t.Error(err.Error())
- }
-
- if cfg.ServerPubKey != "testKey" {
- t.Errorf("unexpected cfg.ServerPubKey value: %v", cfg.ServerPubKey)
- }
- if cfg.pubKey != testPubKeyRSA {
- t.Error("pub key pointer doesn't match")
- }
-
- // Key is missing
- tst = baseDSN + "invalid_name"
- cfg, err = ParseDSN(tst)
- if err == nil {
- t.Errorf("invalid name in DSN (%s) but did not error. Got config: %#v", tst, cfg)
- }
-}
-
-func TestDSNServerPubKeyQueryEscape(t *testing.T) {
- const name = "&%!:"
- dsn := "User:password@tcp(localhost:5555)/dbname?serverPubKey=" + url.QueryEscape(name)
-
- RegisterServerPubKey(name, testPubKeyRSA)
- defer DeregisterServerPubKey(name)
-
- cfg, err := ParseDSN(dsn)
- if err != nil {
- t.Error(err.Error())
- }
-
- if cfg.pubKey != testPubKeyRSA {
- t.Error("pub key pointer doesn't match")
- }
-}
-
-func TestDSNWithCustomTLS(t *testing.T) {
- baseDSN := "User:password@tcp(localhost:5555)/dbname?tls="
- tlsCfg := tls.Config{}
-
- RegisterTLSConfig("utils_test", &tlsCfg)
- defer DeregisterTLSConfig("utils_test")
-
- // Custom TLS is missing
- tst := baseDSN + "invalid_tls"
- cfg, err := ParseDSN(tst)
- if err == nil {
- t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg)
- }
-
- tst = baseDSN + "utils_test"
-
- // Custom TLS with a server name
- name := "foohost"
- tlsCfg.ServerName = name
- cfg, err = ParseDSN(tst)
-
- if err != nil {
- t.Error(err.Error())
- } else if cfg.tls.ServerName != name {
- t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst)
- }
-
- // Custom TLS without a server name
- name = "localhost"
- tlsCfg.ServerName = ""
- cfg, err = ParseDSN(tst)
-
- if err != nil {
- t.Error(err.Error())
- } else if cfg.tls.ServerName != name {
- t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
- } else if tlsCfg.ServerName != "" {
- t.Errorf("tlsCfg was mutated ServerName (%s) should be empty parsing DSN (%s).", name, tst)
- }
-}
-
-func TestDSNTLSConfig(t *testing.T) {
- expectedServerName := "example.com"
- dsn := "tcp(example.com:1234)/?tls=true"
-
- cfg, err := ParseDSN(dsn)
- if err != nil {
- t.Error(err.Error())
- }
- if cfg.tls == nil {
- t.Error("cfg.tls should not be nil")
- }
- if cfg.tls.ServerName != expectedServerName {
- t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
- }
-
- dsn = "tcp(example.com)/?tls=true"
- cfg, err = ParseDSN(dsn)
- if err != nil {
- t.Error(err.Error())
- }
- if cfg.tls == nil {
- t.Error("cfg.tls should not be nil")
- }
- if cfg.tls.ServerName != expectedServerName {
- t.Errorf("cfg.tls.ServerName should be %q, got %q (host without port)", expectedServerName, cfg.tls.ServerName)
- }
-}
-
-func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
- const configKey = "&%!:"
- dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey)
- name := "foohost"
- tlsCfg := tls.Config{ServerName: name}
-
- RegisterTLSConfig(configKey, &tlsCfg)
- defer DeregisterTLSConfig(configKey)
-
- cfg, err := ParseDSN(dsn)
-
- if err != nil {
- t.Error(err.Error())
- } else if cfg.tls.ServerName != name {
- t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn)
- }
-}
-
-func TestDSNUnsafeCollation(t *testing.T) {
- _, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true")
- if err != errInvalidDSNUnsafeCollation {
- t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err)
- }
-
- _, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=gbk_chinese_ci")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-}
-
-func TestParamsAreSorted(t *testing.T) {
- expected := "/dbname?interpolateParams=true&foobar=baz&quux=loo"
- cfg := NewConfig()
- cfg.DBName = "dbname"
- cfg.InterpolateParams = true
- cfg.Params = map[string]string{
- "quux": "loo",
- "foobar": "baz",
- }
- actual := cfg.FormatDSN()
- if actual != expected {
- t.Errorf("generic Config.Params were not sorted: want %#v, got %#v", expected, actual)
- }
-}
-
-func TestCloneConfig(t *testing.T) {
- RegisterServerPubKey("testKey", testPubKeyRSA)
- defer DeregisterServerPubKey("testKey")
-
- expectedServerName := "example.com"
- dsn := "tcp(example.com:1234)/?tls=true&foobar=baz&serverPubKey=testKey"
- cfg, err := ParseDSN(dsn)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- cfg2 := cfg.Clone()
- if cfg == cfg2 {
- t.Errorf("Config.Clone did not create a separate config struct")
- }
-
- if cfg2.tls.ServerName != expectedServerName {
- t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
- }
-
- cfg2.tls.ServerName = "example2.com"
- if cfg.tls.ServerName == cfg2.tls.ServerName {
- t.Errorf("changed cfg.tls.Server name should not propagate to original Config")
- }
-
- if _, ok := cfg2.Params["foobar"]; !ok {
- t.Errorf("cloned Config is missing custom params")
- }
-
- delete(cfg2.Params, "foobar")
-
- if _, ok := cfg.Params["foobar"]; !ok {
- t.Errorf("custom params in cloned Config should not propagate to original Config")
- }
-
- if !reflect.DeepEqual(cfg.pubKey, cfg2.pubKey) {
- t.Errorf("public key in Config should be identical")
- }
-}
-
-func TestNormalizeTLSConfig(t *testing.T) {
- tt := []struct {
- tlsConfig string
- want *tls.Config
- }{
- {"", nil},
- {"false", nil},
- {"true", &tls.Config{ServerName: "myserver"}},
- {"skip-verify", &tls.Config{InsecureSkipVerify: true}},
- {"preferred", &tls.Config{InsecureSkipVerify: true}},
- {"test_tls_config", &tls.Config{ServerName: "myServerName"}},
- }
-
- RegisterTLSConfig("test_tls_config", &tls.Config{ServerName: "myServerName"})
- defer func() { DeregisterTLSConfig("test_tls_config") }()
-
- for _, tc := range tt {
- t.Run(tc.tlsConfig, func(t *testing.T) {
- cfg := &Config{
- Addr: "myserver:3306",
- TLSConfig: tc.tlsConfig,
- }
-
- cfg.normalize()
-
- if cfg.tls == nil {
- if tc.want != nil {
- t.Fatal("wanted a tls config but got nil instead")
- }
- return
- }
-
- if cfg.tls.ServerName != tc.want.ServerName {
- t.Errorf("tls.ServerName doesn't match (want: '%s', got: '%s')",
- tc.want.ServerName, cfg.tls.ServerName)
- }
- if cfg.tls.InsecureSkipVerify != tc.want.InsecureSkipVerify {
- t.Errorf("tls.InsecureSkipVerify doesn't match (want: %T, got :%T)",
- tc.want.InsecureSkipVerify, cfg.tls.InsecureSkipVerify)
- }
- })
- }
-}
-
-func BenchmarkParseDSN(b *testing.B) {
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- for _, tst := range testDSNs {
- if _, err := ParseDSN(tst.in); err != nil {
- b.Error(err.Error())
- }
- }
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors_test.go b/vendor/github.com/go-sql-driver/mysql/errors_test.go
deleted file mode 100644
index 96f9126..0000000
--- a/vendor/github.com/go-sql-driver/mysql/errors_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "log"
- "testing"
-)
-
-func TestErrorsSetLogger(t *testing.T) {
- previous := errLog
- defer func() {
- errLog = previous
- }()
-
- // set up logger
- const expected = "prefix: test\n"
- buffer := bytes.NewBuffer(make([]byte, 0, 64))
- logger := log.New(buffer, "prefix: ", 0)
-
- // print
- SetLogger(logger)
- errLog.Print("test")
-
- // check result
- if actual := buffer.String(); actual != expected {
- t.Errorf("expected %q, got %q", expected, actual)
- }
-}
-
-func TestErrorsStrictIgnoreNotes(t *testing.T) {
- runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) {
- dbt.mustExec("DROP TABLE IF EXISTS does_not_exist")
- })
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
index e1e2ece..ed6c7a3 100644
--- a/vendor/github.com/go-sql-driver/mysql/fields.go
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -106,7 +106,7 @@ var (
scanTypeInt64 = reflect.TypeOf(int64(0))
scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
- scanTypeNullTime = reflect.TypeOf(NullTime{})
+ scanTypeNullTime = reflect.TypeOf(nullTime{})
scanTypeUint8 = reflect.TypeOf(uint8(0))
scanTypeUint16 = reflect.TypeOf(uint16(0))
scanTypeUint32 = reflect.TypeOf(uint32(0))
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go
similarity index 55%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
rename to vendor/github.com/go-sql-driver/mysql/fuzz.go
index fd01f64..fa75adf 100644
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
+++ b/vendor/github.com/go-sql-driver/mysql/fuzz.go
@@ -1,17 +1,24 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
//
-// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
+// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
-// +build windows appengine
+// +build gofuzz
package mysql
-import "net"
+import (
+ "database/sql"
+)
-func connCheck(c net.Conn) error {
- return nil
+func Fuzz(data []byte) int {
+ db, err := sql.Open("mysql", string(data))
+ if err != nil {
+ return 0
+ }
+ db.Close()
+ return 1
}
diff --git a/vendor/github.com/go-sql-driver/mysql/go.mod b/vendor/github.com/go-sql-driver/mysql/go.mod
deleted file mode 100644
index fffbf6a..0000000
--- a/vendor/github.com/go-sql-driver/mysql/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/go-sql-driver/mysql
-
-go 1.10
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
index 273cb0b..60effdf 100644
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -23,7 +23,7 @@ var (
readerRegisterLock sync.RWMutex
)
-// RegisterLocalFile adds the given file to the file whitelist,
+// RegisterLocalFile adds the given file to the file allowlist,
// so that it can be used by "LOAD DATA LOCAL INFILE ".
// Alternatively you can allow the use of all local files with
// the DSN parameter 'allowAllFiles=true'
@@ -45,7 +45,7 @@ func RegisterLocalFile(filePath string) {
fileRegisterLock.Unlock()
}
-// DeregisterLocalFile removes the given filepath from the whitelist.
+// DeregisterLocalFile removes the given filepath from the allowlist.
func DeregisterLocalFile(filePath string) {
fileRegisterLock.Lock()
delete(fileRegister, strings.Trim(filePath, `"`))
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
index afa8a89..651723a 100644
--- a/vendor/github.com/go-sql-driver/mysql/nulltime.go
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
@@ -28,11 +28,11 @@ func (nt *NullTime) Scan(value interface{}) (err error) {
nt.Time, nt.Valid = v, true
return
case []byte:
- nt.Time, err = parseDateTime(string(v), time.UTC)
+ nt.Time, err = parseDateTime(v, time.UTC)
nt.Valid = (err == nil)
return
case string:
- nt.Time, err = parseDateTime(v, time.UTC)
+ nt.Time, err = parseDateTime([]byte(v), time.UTC)
nt.Valid = (err == nil)
return
}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
index c392594..453b4b3 100644
--- a/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
@@ -28,4 +28,13 @@ import (
// }
//
// This NullTime implementation is not driver-specific
+//
+// Deprecated: NullTime doesn't honor the loc DSN parameter.
+// NullTime.Scan interprets a time as UTC, not the loc DSN parameter.
+// Use sql.NullTime instead.
type NullTime sql.NullTime
+
+// for internal use.
+// the mysql package uses sql.NullTime if it is available.
+// if not, the package uses mysql.NullTime.
+type nullTime = sql.NullTime // sql.NullTime is available
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
index 86d159d..9f7ae27 100644
--- a/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
@@ -32,3 +32,8 @@ type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
+
+// for internal use.
+// the mysql package uses sql.NullTime if it is available.
+// if not, the package uses mysql.NullTime.
+type nullTime = NullTime // sql.NullTime is not available
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_test.go b/vendor/github.com/go-sql-driver/mysql/nulltime_test.go
deleted file mode 100644
index a14ec06..0000000
--- a/vendor/github.com/go-sql-driver/mysql/nulltime_test.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "database/sql"
- "database/sql/driver"
- "testing"
- "time"
-)
-
-var (
- // Check implementation of interfaces
- _ driver.Valuer = NullTime{}
- _ sql.Scanner = (*NullTime)(nil)
-)
-
-func TestScanNullTime(t *testing.T) {
- var scanTests = []struct {
- in interface{}
- error bool
- valid bool
- time time.Time
- }{
- {tDate, false, true, tDate},
- {sDate, false, true, tDate},
- {[]byte(sDate), false, true, tDate},
- {tDateTime, false, true, tDateTime},
- {sDateTime, false, true, tDateTime},
- {[]byte(sDateTime), false, true, tDateTime},
- {tDate0, false, true, tDate0},
- {sDate0, false, true, tDate0},
- {[]byte(sDate0), false, true, tDate0},
- {sDateTime0, false, true, tDate0},
- {[]byte(sDateTime0), false, true, tDate0},
- {"", true, false, tDate0},
- {"1234", true, false, tDate0},
- {0, true, false, tDate0},
- }
-
- var nt = NullTime{}
- var err error
-
- for _, tst := range scanTests {
- err = nt.Scan(tst.in)
- if (err != nil) != tst.error {
- t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
- }
- if nt.Valid != tst.valid {
- t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
- }
- if nt.Time != tst.time {
- t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
- }
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index 82ad7a2..6664e5a 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -13,6 +13,7 @@ import (
"crypto/tls"
"database/sql/driver"
"encoding/binary"
+ "encoding/json"
"errors"
"fmt"
"io"
@@ -348,6 +349,12 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
return errors.New("unknown collation")
}
+ // Filler [23 bytes] (all 0x00)
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
+
// SSL Connection Request Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
if mc.cfg.tls != nil {
@@ -366,12 +373,6 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
mc.buf.nc = tlsConn
}
- // Filler [23 bytes] (all 0x00)
- pos := 13
- for ; pos < 13+23; pos++ {
- data[pos] = 0
- }
-
// User [null terminated string]
if len(mc.cfg.User) > 0 {
pos += copy(data[pos:], mc.cfg.User)
@@ -777,7 +778,7 @@ func (rows *textRows) readRow(dest []driver.Value) error {
case fieldTypeTimestamp, fieldTypeDateTime,
fieldTypeDate, fieldTypeNewDate:
dest[i], err = parseDateTime(
- string(dest[i].([]byte)),
+ dest[i].([]byte),
mc.cfg.Loc,
)
if err == nil {
@@ -1003,6 +1004,9 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
continue
}
+ if v, ok := arg.(json.RawMessage); ok {
+ arg = []byte(v)
+ }
// cache types and values
switch v := arg.(type) {
case int64:
@@ -1112,7 +1116,10 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if v.IsZero() {
b = append(b, "0000-00-00"...)
} else {
- b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ if err != nil {
+ return err
+ }
}
paramValues = appendLengthEncodedInteger(paramValues,
diff --git a/vendor/github.com/go-sql-driver/mysql/packets_test.go b/vendor/github.com/go-sql-driver/mysql/packets_test.go
deleted file mode 100644
index b61e4db..0000000
--- a/vendor/github.com/go-sql-driver/mysql/packets_test.go
+++ /dev/null
@@ -1,336 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "errors"
- "net"
- "testing"
- "time"
-)
-
-var (
- errConnClosed = errors.New("connection is closed")
- errConnTooManyReads = errors.New("too many reads")
- errConnTooManyWrites = errors.New("too many writes")
-)
-
-// struct to mock a net.Conn for testing purposes
-type mockConn struct {
- laddr net.Addr
- raddr net.Addr
- data []byte
- written []byte
- queuedReplies [][]byte
- closed bool
- read int
- reads int
- writes int
- maxReads int
- maxWrites int
-}
-
-func (m *mockConn) Read(b []byte) (n int, err error) {
- if m.closed {
- return 0, errConnClosed
- }
-
- m.reads++
- if m.maxReads > 0 && m.reads > m.maxReads {
- return 0, errConnTooManyReads
- }
-
- n = copy(b, m.data)
- m.read += n
- m.data = m.data[n:]
- return
-}
-func (m *mockConn) Write(b []byte) (n int, err error) {
- if m.closed {
- return 0, errConnClosed
- }
-
- m.writes++
- if m.maxWrites > 0 && m.writes > m.maxWrites {
- return 0, errConnTooManyWrites
- }
-
- n = len(b)
- m.written = append(m.written, b...)
-
- if n > 0 && len(m.queuedReplies) > 0 {
- m.data = m.queuedReplies[0]
- m.queuedReplies = m.queuedReplies[1:]
- }
- return
-}
-func (m *mockConn) Close() error {
- m.closed = true
- return nil
-}
-func (m *mockConn) LocalAddr() net.Addr {
- return m.laddr
-}
-func (m *mockConn) RemoteAddr() net.Addr {
- return m.raddr
-}
-func (m *mockConn) SetDeadline(t time.Time) error {
- return nil
-}
-func (m *mockConn) SetReadDeadline(t time.Time) error {
- return nil
-}
-func (m *mockConn) SetWriteDeadline(t time.Time) error {
- return nil
-}
-
-// make sure mockConn implements the net.Conn interface
-var _ net.Conn = new(mockConn)
-
-func newRWMockConn(sequence uint8) (*mockConn, *mysqlConn) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- cfg: NewConfig(),
- netConn: conn,
- closech: make(chan struct{}),
- maxAllowedPacket: defaultMaxAllowedPacket,
- sequence: sequence,
- }
- return conn, mc
-}
-
-func TestReadPacketSingleByte(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- }
-
- conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
- conn.maxReads = 1
- packet, err := mc.readPacket()
- if err != nil {
- t.Fatal(err)
- }
- if len(packet) != 1 {
- t.Fatalf("unexpected packet length: expected %d, got %d", 1, len(packet))
- }
- if packet[0] != 0xff {
- t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0])
- }
-}
-
-func TestReadPacketWrongSequenceID(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- }
-
- // too low sequence id
- conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
- conn.maxReads = 1
- mc.sequence = 1
- _, err := mc.readPacket()
- if err != ErrPktSync {
- t.Errorf("expected ErrPktSync, got %v", err)
- }
-
- // reset
- conn.reads = 0
- mc.sequence = 0
- mc.buf = newBuffer(conn)
-
- // too high sequence id
- conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff}
- _, err = mc.readPacket()
- if err != ErrPktSyncMul {
- t.Errorf("expected ErrPktSyncMul, got %v", err)
- }
-}
-
-func TestReadPacketSplit(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- }
-
- data := make([]byte, maxPacketSize*2+4*3)
- const pkt2ofs = maxPacketSize + 4
- const pkt3ofs = 2 * (maxPacketSize + 4)
-
- // case 1: payload has length maxPacketSize
- data = data[:pkt2ofs+4]
-
- // 1st packet has maxPacketSize length and sequence id 0
- // ff ff ff 00 ...
- data[0] = 0xff
- data[1] = 0xff
- data[2] = 0xff
-
- // mark the payload start and end of 1st packet so that we can check if the
- // content was correctly appended
- data[4] = 0x11
- data[maxPacketSize+3] = 0x22
-
- // 2nd packet has payload length 0 and squence id 1
- // 00 00 00 01
- data[pkt2ofs+3] = 0x01
-
- conn.data = data
- conn.maxReads = 3
- packet, err := mc.readPacket()
- if err != nil {
- t.Fatal(err)
- }
- if len(packet) != maxPacketSize {
- t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize, len(packet))
- }
- if packet[0] != 0x11 {
- t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
- }
- if packet[maxPacketSize-1] != 0x22 {
- t.Fatalf("unexpected payload end: expected %x, got %x", 0x22, packet[maxPacketSize-1])
- }
-
- // case 2: payload has length which is a multiple of maxPacketSize
- data = data[:cap(data)]
-
- // 2nd packet now has maxPacketSize length
- data[pkt2ofs] = 0xff
- data[pkt2ofs+1] = 0xff
- data[pkt2ofs+2] = 0xff
-
- // mark the payload start and end of the 2nd packet
- data[pkt2ofs+4] = 0x33
- data[pkt2ofs+maxPacketSize+3] = 0x44
-
- // 3rd packet has payload length 0 and squence id 2
- // 00 00 00 02
- data[pkt3ofs+3] = 0x02
-
- conn.data = data
- conn.reads = 0
- conn.maxReads = 5
- mc.sequence = 0
- packet, err = mc.readPacket()
- if err != nil {
- t.Fatal(err)
- }
- if len(packet) != 2*maxPacketSize {
- t.Fatalf("unexpected packet length: expected %d, got %d", 2*maxPacketSize, len(packet))
- }
- if packet[0] != 0x11 {
- t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
- }
- if packet[2*maxPacketSize-1] != 0x44 {
- t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[2*maxPacketSize-1])
- }
-
- // case 3: payload has a length larger maxPacketSize, which is not an exact
- // multiple of it
- data = data[:pkt2ofs+4+42]
- data[pkt2ofs] = 0x2a
- data[pkt2ofs+1] = 0x00
- data[pkt2ofs+2] = 0x00
- data[pkt2ofs+4+41] = 0x44
-
- conn.data = data
- conn.reads = 0
- conn.maxReads = 4
- mc.sequence = 0
- packet, err = mc.readPacket()
- if err != nil {
- t.Fatal(err)
- }
- if len(packet) != maxPacketSize+42 {
- t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize+42, len(packet))
- }
- if packet[0] != 0x11 {
- t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
- }
- if packet[maxPacketSize+41] != 0x44 {
- t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[maxPacketSize+41])
- }
-}
-
-func TestReadPacketFail(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- closech: make(chan struct{}),
- }
-
- // illegal empty (stand-alone) packet
- conn.data = []byte{0x00, 0x00, 0x00, 0x00}
- conn.maxReads = 1
- _, err := mc.readPacket()
- if err != ErrInvalidConn {
- t.Errorf("expected ErrInvalidConn, got %v", err)
- }
-
- // reset
- conn.reads = 0
- mc.sequence = 0
- mc.buf = newBuffer(conn)
-
- // fail to read header
- conn.closed = true
- _, err = mc.readPacket()
- if err != ErrInvalidConn {
- t.Errorf("expected ErrInvalidConn, got %v", err)
- }
-
- // reset
- conn.closed = false
- conn.reads = 0
- mc.sequence = 0
- mc.buf = newBuffer(conn)
-
- // fail to read body
- conn.maxReads = 1
- _, err = mc.readPacket()
- if err != ErrInvalidConn {
- t.Errorf("expected ErrInvalidConn, got %v", err)
- }
-}
-
-// https://github.com/go-sql-driver/mysql/pull/801
-// not-NUL terminated plugin_name in init packet
-func TestRegression801(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- cfg: new(Config),
- sequence: 42,
- closech: make(chan struct{}),
- }
-
- conn.data = []byte{72, 0, 0, 42, 10, 53, 46, 53, 46, 56, 0, 165, 0, 0, 0,
- 60, 70, 63, 58, 68, 104, 34, 97, 0, 223, 247, 33, 2, 0, 15, 128, 21, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 98, 120, 114, 47, 85, 75, 109, 99, 51, 77,
- 50, 64, 0, 109, 121, 115, 113, 108, 95, 110, 97, 116, 105, 118, 101, 95,
- 112, 97, 115, 115, 119, 111, 114, 100}
- conn.maxReads = 1
-
- authData, pluginName, err := mc.readHandshakePacket()
- if err != nil {
- t.Fatalf("got error: %v", err)
- }
-
- if pluginName != "mysql_native_password" {
- t.Errorf("expected plugin name 'mysql_native_password', got '%s'", pluginName)
- }
-
- expectedAuthData := []byte{60, 70, 63, 58, 68, 104, 34, 97, 98, 120, 114,
- 47, 85, 75, 109, 99, 51, 77, 50, 64}
- if !bytes.Equal(authData, expectedAuthData) {
- t.Errorf("expected authData '%v', got '%v'", expectedAuthData, authData)
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index f7e3709..18a3ae4 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -10,6 +10,7 @@ package mysql
import (
"database/sql/driver"
+ "encoding/json"
"fmt"
"io"
"reflect"
@@ -43,6 +44,11 @@ func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
return converter{}
}
+func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
if stmt.mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
@@ -129,6 +135,8 @@ func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
return rows, err
}
+var jsonType = reflect.TypeOf(json.RawMessage{})
+
type converter struct{}
// ConvertValue mirrors the reference/default converter in database/sql/driver
@@ -146,12 +154,17 @@ func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
if err != nil {
return nil, err
}
- if !driver.IsValue(sv) {
- return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+ if driver.IsValue(sv) {
+ return sv, nil
}
- return sv, nil
+ // A value returend from the Valuer interface can be "a type handled by
+ // a database driver's NamedValueChecker interface" so we should accept
+ // uint64 here as well.
+ if u, ok := sv.(uint64); ok {
+ return u, nil
+ }
+ return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
}
-
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Ptr:
@@ -170,11 +183,14 @@ func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
case reflect.Bool:
return rv.Bool(), nil
case reflect.Slice:
- ek := rv.Type().Elem().Kind()
- if ek == reflect.Uint8 {
+ switch t := rv.Type(); {
+ case t == jsonType:
+ return v, nil
+ case t.Elem().Kind() == reflect.Uint8:
return rv.Bytes(), nil
+ default:
+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, t.Elem().Kind())
}
- return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
case reflect.String:
return rv.String(), nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement_test.go b/vendor/github.com/go-sql-driver/mysql/statement_test.go
deleted file mode 100644
index 4b9914f..0000000
--- a/vendor/github.com/go-sql-driver/mysql/statement_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "testing"
-)
-
-func TestConvertDerivedString(t *testing.T) {
- type derived string
-
- output, err := converter{}.ConvertValue(derived("value"))
- if err != nil {
- t.Fatal("Derived string type not convertible", err)
- }
-
- if output != "value" {
- t.Fatalf("Derived string type not converted, got %#v %T", output, output)
- }
-}
-
-func TestConvertDerivedByteSlice(t *testing.T) {
- type derived []uint8
-
- output, err := converter{}.ConvertValue(derived("value"))
- if err != nil {
- t.Fatal("Byte slice not convertible", err)
- }
-
- if bytes.Compare(output.([]byte), []byte("value")) != 0 {
- t.Fatalf("Byte slice not converted, got %#v %T", output, output)
- }
-}
-
-func TestConvertDerivedUnsupportedSlice(t *testing.T) {
- type derived []int
-
- _, err := converter{}.ConvertValue(derived{1})
- if err == nil || err.Error() != "unsupported type mysql.derived, a slice of int" {
- t.Fatal("Unexpected error", err)
- }
-}
-
-func TestConvertDerivedBool(t *testing.T) {
- type derived bool
-
- output, err := converter{}.ConvertValue(derived(true))
- if err != nil {
- t.Fatal("Derived bool type not convertible", err)
- }
-
- if output != true {
- t.Fatalf("Derived bool type not converted, got %#v %T", output, output)
- }
-}
-
-func TestConvertPointer(t *testing.T) {
- str := "value"
-
- output, err := converter{}.ConvertValue(&str)
- if err != nil {
- t.Fatal("Pointer type not convertible", err)
- }
-
- if output != "value" {
- t.Fatalf("Pointer type not converted, got %#v %T", output, output)
- }
-}
-
-func TestConvertSignedIntegers(t *testing.T) {
- values := []interface{}{
- int8(-42),
- int16(-42),
- int32(-42),
- int64(-42),
- int(-42),
- }
-
- for _, value := range values {
- output, err := converter{}.ConvertValue(value)
- if err != nil {
- t.Fatalf("%T type not convertible %s", value, err)
- }
-
- if output != int64(-42) {
- t.Fatalf("%T type not converted, got %#v %T", value, output, output)
- }
- }
-}
-
-func TestConvertUnsignedIntegers(t *testing.T) {
- values := []interface{}{
- uint8(42),
- uint16(42),
- uint32(42),
- uint64(42),
- uint(42),
- }
-
- for _, value := range values {
- output, err := converter{}.ConvertValue(value)
- if err != nil {
- t.Fatalf("%T type not convertible %s", value, err)
- }
-
- if output != uint64(42) {
- t.Fatalf("%T type not converted, got %#v %T", value, output, output)
- }
- }
-
- output, err := converter{}.ConvertValue(^uint64(0))
- if err != nil {
- t.Fatal("uint64 high-bit not convertible", err)
- }
-
- if output != ^uint64(0) {
- t.Fatalf("uint64 high-bit converted, got %#v %T", output, output)
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
index 9552e80..d6545f5 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -106,27 +106,136 @@ func readBool(input string) (value bool, valid bool) {
* Time related utils *
******************************************************************************/
-func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
- base := "0000-00-00 00:00:00.0000000"
- switch len(str) {
+func parseDateTime(b []byte, loc *time.Location) (time.Time, error) {
+ const base = "0000-00-00 00:00:00.000000"
+ switch len(b) {
case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
- if str == base[:len(str)] {
- return
+ if string(b) == base[:len(b)] {
+ return time.Time{}, nil
}
- t, err = time.Parse(timeFormat[:len(str)], str)
+
+ year, err := parseByteYear(b)
+ if err != nil {
+ return time.Time{}, err
+ }
+ if year <= 0 {
+ year = 1
+ }
+
+ if b[4] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[4])
+ }
+
+ m, err := parseByte2Digits(b[5], b[6])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if m <= 0 {
+ m = 1
+ }
+ month := time.Month(m)
+
+ if b[7] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[7])
+ }
+
+ day, err := parseByte2Digits(b[8], b[9])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if day <= 0 {
+ day = 1
+ }
+ if len(b) == 10 {
+ return time.Date(year, month, day, 0, 0, 0, 0, loc), nil
+ }
+
+ if b[10] != ' ' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[10])
+ }
+
+ hour, err := parseByte2Digits(b[11], b[12])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[13] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[13])
+ }
+
+ min, err := parseByte2Digits(b[14], b[15])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[16] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[16])
+ }
+
+ sec, err := parseByte2Digits(b[17], b[18])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if len(b) == 19 {
+ return time.Date(year, month, day, hour, min, sec, 0, loc), nil
+ }
+
+ if b[19] != '.' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[19])
+ }
+ nsec, err := parseByteNanoSec(b[20:])
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Date(year, month, day, hour, min, sec, nsec, loc), nil
default:
- err = fmt.Errorf("invalid time string: %s", str)
- return
+ return time.Time{}, fmt.Errorf("invalid time bytes: %s", b)
}
+}
- // Adjust location
- if err == nil && loc != time.UTC {
- y, mo, d := t.Date()
- h, mi, s := t.Clock()
- t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
+func parseByteYear(b []byte) (int, error) {
+ year, n := 0, 1000
+ for i := 0; i < 4; i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ year += v * n
+ n = n / 10
}
+ return year, nil
+}
- return
+func parseByte2Digits(b1, b2 byte) (int, error) {
+ d1, err := bToi(b1)
+ if err != nil {
+ return 0, err
+ }
+ d2, err := bToi(b2)
+ if err != nil {
+ return 0, err
+ }
+ return d1*10 + d2, nil
+}
+
+func parseByteNanoSec(b []byte) (int, error) {
+ ns, digit := 0, 100000 // max is 6-digits
+ for i := 0; i < len(b); i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ ns += v * digit
+ digit /= 10
+ }
+ // nanoseconds has 10-digits. (needs to scale digits)
+ // 10 - 6 = 4, so we have to multiple 1000.
+ return ns * 1000, nil
+}
+
+func bToi(b byte) (int, error) {
+ if b < '0' || b > '9' {
+ return 0, errors.New("not [0-9]")
+ }
+ return int(b - '0'), nil
}
func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
@@ -167,6 +276,64 @@ func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Va
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
}
+func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+ year, month, day := t.Date()
+ hour, min, sec := t.Clock()
+ nsec := t.Nanosecond()
+
+ if year < 1 || year > 9999 {
+ return buf, errors.New("year is not in the range [1, 9999]: " + strconv.Itoa(year)) // use errors.New instead of fmt.Errorf to avoid year escape to heap
+ }
+ year100 := year / 100
+ year1 := year % 100
+
+ var localBuf [len("2006-01-02T15:04:05.999999999")]byte // does not escape
+ localBuf[0], localBuf[1], localBuf[2], localBuf[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1]
+ localBuf[4] = '-'
+ localBuf[5], localBuf[6] = digits10[month], digits01[month]
+ localBuf[7] = '-'
+ localBuf[8], localBuf[9] = digits10[day], digits01[day]
+
+ if hour == 0 && min == 0 && sec == 0 && nsec == 0 {
+ return append(buf, localBuf[:10]...), nil
+ }
+
+ localBuf[10] = ' '
+ localBuf[11], localBuf[12] = digits10[hour], digits01[hour]
+ localBuf[13] = ':'
+ localBuf[14], localBuf[15] = digits10[min], digits01[min]
+ localBuf[16] = ':'
+ localBuf[17], localBuf[18] = digits10[sec], digits01[sec]
+
+ if nsec == 0 {
+ return append(buf, localBuf[:19]...), nil
+ }
+ nsec100000000 := nsec / 100000000
+ nsec1000000 := (nsec / 1000000) % 100
+ nsec10000 := (nsec / 10000) % 100
+ nsec100 := (nsec / 100) % 100
+ nsec1 := nsec % 100
+ localBuf[19] = '.'
+
+ // milli second
+ localBuf[20], localBuf[21], localBuf[22] =
+ digits01[nsec100000000], digits10[nsec1000000], digits01[nsec1000000]
+ // micro second
+ localBuf[23], localBuf[24], localBuf[25] =
+ digits10[nsec10000], digits01[nsec10000], digits10[nsec100]
+ // nano second
+ localBuf[26], localBuf[27], localBuf[28] =
+ digits01[nsec100], digits10[nsec1], digits01[nsec1]
+
+ // trim trailing zeros
+ n := len(localBuf)
+ for n > 0 && localBuf[n-1] == '0' {
+ n--
+ }
+
+ return append(buf, localBuf[:n]...), nil
+}
+
// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
// if the DATE or DATETIME has the zero value.
// It must never be changed.
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/go-sql-driver/mysql/utils_test.go
deleted file mode 100644
index 10a60c2..0000000
--- a/vendor/github.com/go-sql-driver/mysql/utils_test.go
+++ /dev/null
@@ -1,293 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "database/sql"
- "database/sql/driver"
- "encoding/binary"
- "testing"
-)
-
-func TestLengthEncodedInteger(t *testing.T) {
- var integerTests = []struct {
- num uint64
- encoded []byte
- }{
- {0x0000000000000000, []byte{0x00}},
- {0x0000000000000012, []byte{0x12}},
- {0x00000000000000fa, []byte{0xfa}},
- {0x0000000000000100, []byte{0xfc, 0x00, 0x01}},
- {0x0000000000001234, []byte{0xfc, 0x34, 0x12}},
- {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}},
- {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}},
- {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}},
- {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}},
- {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}},
- {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}},
- {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
- }
-
- for _, tst := range integerTests {
- num, isNull, numLen := readLengthEncodedInteger(tst.encoded)
- if isNull {
- t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num)
- }
- if num != tst.num {
- t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num)
- }
- if numLen != len(tst.encoded) {
- t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen)
- }
- encoded := appendLengthEncodedInteger(nil, num)
- if !bytes.Equal(encoded, tst.encoded) {
- t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded)
- }
- }
-}
-
-func TestFormatBinaryDateTime(t *testing.T) {
- rawDate := [11]byte{}
- binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years
- rawDate[2] = 12 // months
- rawDate[3] = 30 // days
- rawDate[4] = 15 // hours
- rawDate[5] = 46 // minutes
- rawDate[6] = 23 // seconds
- binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
- expect := func(expected string, inlen, outlen uint8) {
- actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen)
- bytes, ok := actual.([]byte)
- if !ok {
- t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
- }
- if string(bytes) != expected {
- t.Errorf(
- "expected %q, got %q for length in %d, out %d",
- expected, actual, inlen, outlen,
- )
- }
- }
- expect("0000-00-00", 0, 10)
- expect("0000-00-00 00:00:00", 0, 19)
- expect("1978-12-30", 4, 10)
- expect("1978-12-30 15:46:23", 7, 19)
- expect("1978-12-30 15:46:23.987654", 11, 26)
-}
-
-func TestFormatBinaryTime(t *testing.T) {
- expect := func(expected string, src []byte, outlen uint8) {
- actual, _ := formatBinaryTime(src, outlen)
- bytes, ok := actual.([]byte)
- if !ok {
- t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
- }
- if string(bytes) != expected {
- t.Errorf(
- "expected %q, got %q for src=%q and outlen=%d",
- expected, actual, src, outlen)
- }
- }
-
- // binary format:
- // sign (0: positive, 1: negative), days(4), hours, minutes, seconds, micro(4)
-
- // Zeros
- expect("00:00:00", []byte{}, 8)
- expect("00:00:00.0", []byte{}, 10)
- expect("00:00:00.000000", []byte{}, 15)
-
- // Without micro(4)
- expect("12:34:56", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 8)
- expect("-12:34:56", []byte{1, 0, 0, 0, 0, 12, 34, 56}, 8)
- expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 11)
- expect("24:34:56", []byte{0, 1, 0, 0, 0, 0, 34, 56}, 8)
- expect("-99:34:56", []byte{1, 4, 0, 0, 0, 3, 34, 56}, 8)
- expect("103079215103:34:56", []byte{0, 255, 255, 255, 255, 23, 34, 56}, 8)
-
- // With micro(4)
- expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 11)
- expect("12:34:56.000099", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 15)
-}
-
-func TestEscapeBackslash(t *testing.T) {
- expect := func(expected, value string) {
- actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
- if actual != expected {
- t.Errorf(
- "expected %s, got %s",
- expected, actual,
- )
- }
-
- actual = string(escapeStringBackslash([]byte{}, value))
- if actual != expected {
- t.Errorf(
- "expected %s, got %s",
- expected, actual,
- )
- }
- }
-
- expect("foo\\0bar", "foo\x00bar")
- expect("foo\\nbar", "foo\nbar")
- expect("foo\\rbar", "foo\rbar")
- expect("foo\\Zbar", "foo\x1abar")
- expect("foo\\\"bar", "foo\"bar")
- expect("foo\\\\bar", "foo\\bar")
- expect("foo\\'bar", "foo'bar")
-}
-
-func TestEscapeQuotes(t *testing.T) {
- expect := func(expected, value string) {
- actual := string(escapeBytesQuotes([]byte{}, []byte(value)))
- if actual != expected {
- t.Errorf(
- "expected %s, got %s",
- expected, actual,
- )
- }
-
- actual = string(escapeStringQuotes([]byte{}, value))
- if actual != expected {
- t.Errorf(
- "expected %s, got %s",
- expected, actual,
- )
- }
- }
-
- expect("foo\x00bar", "foo\x00bar") // not affected
- expect("foo\nbar", "foo\nbar") // not affected
- expect("foo\rbar", "foo\rbar") // not affected
- expect("foo\x1abar", "foo\x1abar") // not affected
- expect("foo''bar", "foo'bar") // affected
- expect("foo\"bar", "foo\"bar") // not affected
-}
-
-func TestAtomicBool(t *testing.T) {
- var ab atomicBool
- if ab.IsSet() {
- t.Fatal("Expected value to be false")
- }
-
- ab.Set(true)
- if ab.value != 1 {
- t.Fatal("Set(true) did not set value to 1")
- }
- if !ab.IsSet() {
- t.Fatal("Expected value to be true")
- }
-
- ab.Set(true)
- if !ab.IsSet() {
- t.Fatal("Expected value to be true")
- }
-
- ab.Set(false)
- if ab.value != 0 {
- t.Fatal("Set(false) did not set value to 0")
- }
- if ab.IsSet() {
- t.Fatal("Expected value to be false")
- }
-
- ab.Set(false)
- if ab.IsSet() {
- t.Fatal("Expected value to be false")
- }
- if ab.TrySet(false) {
- t.Fatal("Expected TrySet(false) to fail")
- }
- if !ab.TrySet(true) {
- t.Fatal("Expected TrySet(true) to succeed")
- }
- if !ab.IsSet() {
- t.Fatal("Expected value to be true")
- }
-
- ab.Set(true)
- if !ab.IsSet() {
- t.Fatal("Expected value to be true")
- }
- if ab.TrySet(true) {
- t.Fatal("Expected TrySet(true) to fail")
- }
- if !ab.TrySet(false) {
- t.Fatal("Expected TrySet(false) to succeed")
- }
- if ab.IsSet() {
- t.Fatal("Expected value to be false")
- }
-
- ab._noCopy.Lock() // we've "tested" it ¯\_(ツ)_/¯
-}
-
-func TestAtomicError(t *testing.T) {
- var ae atomicError
- if ae.Value() != nil {
- t.Fatal("Expected value to be nil")
- }
-
- ae.Set(ErrMalformPkt)
- if v := ae.Value(); v != ErrMalformPkt {
- if v == nil {
- t.Fatal("Value is still nil")
- }
- t.Fatal("Error did not match")
- }
- ae.Set(ErrPktSync)
- if ae.Value() == ErrMalformPkt {
- t.Fatal("Error still matches old error")
- }
- if v := ae.Value(); v != ErrPktSync {
- t.Fatal("Error did not match")
- }
-}
-
-func TestIsolationLevelMapping(t *testing.T) {
- data := []struct {
- level driver.IsolationLevel
- expected string
- }{
- {
- level: driver.IsolationLevel(sql.LevelReadCommitted),
- expected: "READ COMMITTED",
- },
- {
- level: driver.IsolationLevel(sql.LevelRepeatableRead),
- expected: "REPEATABLE READ",
- },
- {
- level: driver.IsolationLevel(sql.LevelReadUncommitted),
- expected: "READ UNCOMMITTED",
- },
- {
- level: driver.IsolationLevel(sql.LevelSerializable),
- expected: "SERIALIZABLE",
- },
- }
-
- for i, td := range data {
- if actual, err := mapIsolationLevel(td.level); actual != td.expected || err != nil {
- t.Fatal(i, td.expected, actual, err)
- }
- }
-
- // check unsupported mapping
- expectedErr := "mysql: unsupported isolation level: 7"
- actual, err := mapIsolationLevel(driver.IsolationLevel(sql.LevelLinearizable))
- if actual != "" || err == nil {
- t.Fatal("Expected error on unsupported isolation level")
- }
- if err.Error() != expectedErr {
- t.Fatalf("Expected error to be %q, got %q", expectedErr, err)
- }
-}
diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE
deleted file mode 100644
index 0d31edf..0000000
--- a/vendor/github.com/jmoiron/sqlx/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
- Copyright (c) 2013, Jason Moiron
-
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation
- files (the "Software"), to deal in the Software without
- restriction, including without limitation the rights to use,
- copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following
- conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- OTHER DEALINGS IN THE SOFTWARE.
-
diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md
deleted file mode 100644
index 4e3eb6d..0000000
--- a/vendor/github.com/jmoiron/sqlx/README.md
+++ /dev/null
@@ -1,258 +0,0 @@
-#sqlx
-
-[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
-
-sqlx is a library which provides a set of extensions on go's standard
-`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
-et al. all leave the underlying interfaces untouched, so that their interfaces
-are a superset on the standard ones. This makes it relatively painless to
-integrate existing codebases using database/sql with sqlx.
-
-Major additional concepts are:
-
-* Marshal rows into structs (with embedded struct support), maps, and slices
-* Named parameter support including prepared statements
-* `Get` and `Select` to go quickly from query to struct/slice
-* `LoadFile` for executing statements from a file
-
-There is now some [fairly comprehensive documentation](http://jmoiron.github.io/sqlx/) for sqlx.
-You can also read the usage below for a quick sample on how sqlx works, or check out the [API
-documentation on godoc](http://godoc.org/github.com/jmoiron/sqlx).
-
-## Recent Changes
-
-The ability to use basic types as Select and Get destinations was added. This
-is only valid when there is one column in the result set, and both functions
-return an error if this isn't the case. This allows for much simpler patterns
-of access for single column results:
-
-```go
-var count int
-err := db.Get(&count, "SELECT count(*) FROM person;")
-
-var names []string
-err := db.Select(&names, "SELECT name FROM person;")
-```
-
-See the note on Scannability at the bottom of this README for some more info.
-
-### Backwards Compatibility
-
-There is no Go1-like promise of absolute stability, but I take the issue
-seriously and will maintain the library in a compatible state unless vital
-bugs prevent me from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and [#60](https://github.com/jmoiron/sqlx/issues/60) necessitated
-breaking behavior, a wider API cleanup was done at the time of fixing.
-
-## install
-
- go get github.com/jmoiron/sqlx
-
-## issues
-
-Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
-`Columns()` can have duplicate names on queries like:
-
-```sql
-SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
-```
-
-making a struct or map destination ambiguous. Use `AS` in your queries
-to give rows distinct names, `rows.Scan` to scan them manually, or
-`SliceScan` to get a slice of results.
-
-## usage
-
-Below is an example which shows some common use cases for sqlx. Check
-[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
-usage.
-
-
-```go
-package main
-
-import (
- _ "github.com/lib/pq"
- "database/sql"
- "github.com/jmoiron/sqlx"
- "log"
-)
-
-var schema = `
-CREATE TABLE person (
- first_name text,
- last_name text,
- email text
-);
-
-CREATE TABLE place (
- country text,
- city text NULL,
- telcode integer
-)`
-
-type Person struct {
- FirstName string `db:"first_name"`
- LastName string `db:"last_name"`
- Email string
-}
-
-type Place struct {
- Country string
- City sql.NullString
- TelCode int
-}
-
-func main() {
- // this connects & tries a simple 'SELECT 1', panics on error
- // use sqlx.Open() for sql.Open() semantics
- db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
- if err != nil {
- log.Fatalln(err)
- }
-
- // exec the schema or fail; multi-statement Exec behavior varies between
- // database drivers; pq will exec them all, sqlite3 won't, ymmv
- db.MustExec(schema)
-
- tx := db.MustBegin()
- tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
- tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
- tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
- tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
- tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
- // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
- tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
- tx.Commit()
-
- // Query the database, storing results in a []Person (wrapped in []interface{})
- people := []Person{}
- db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
- jason, john := people[0], people[1]
-
- fmt.Printf("%#v\n%#v", jason, john)
- // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
- // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
-
- // You can also get a single result, a la QueryRow
- jason = Person{}
- err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
- fmt.Printf("%#v\n", jason)
- // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
-
- // if you have null fields and use SELECT *, you must use sql.Null* in your struct
- places := []Place{}
- err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- fmt.Println(err)
- return
- }
- usa, singsing, honkers := places[0], places[1], places[2]
-
- fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
- // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
- // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
- // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
-
- // Loop through rows using only one struct
- place := Place{}
- rows, err := db.Queryx("SELECT * FROM place")
- for rows.Next() {
- err := rows.StructScan(&place)
- if err != nil {
- log.Fatalln(err)
- }
- fmt.Printf("%#v\n", place)
- }
- // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
- // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
- // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
-
- // Named queries, using `:name` as the bindvar. Automatic bindvar support
- // which takes into account the dbtype based on the driverName on sqlx.Open/Connect
- _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
- map[string]interface{}{
- "first": "Bin",
- "last": "Smuth",
- "email": "bensmith@allblacks.nz",
- })
-
- // Selects Mr. Smith from the database
- rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
-
- // Named queries can also use structs. Their bind names follow the same rules
- // as the name -> db mapping, so struct fields are lowercased and the `db` tag
- // is taken into consideration.
- rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
-}
-```
-
-## Scannability
-
-Get and Select are able to take base types, so the following is now possible:
-
-```go
-var name string
-db.Get(&name, "SELECT first_name FROM person WHERE id=$1", 10)
-
-var ids []int64
-db.Select(&ids, "SELECT id FROM person LIMIT 20;")
-```
-
-This can get complicated with destination types which are structs, like `sql.NullString`. Because of this, straightforward rules for *scannability* had to be developed. Iff something is "Scannable", then it is used directly in `rows.Scan`; if it's not, then the standard sqlx struct rules apply.
-
-Something is scannable if any of the following are true:
-
-* It is not a struct, ie. `reflect.ValueOf(v).Kind() != reflect.Struct`
-* It implements the `sql.Scanner` interface
-* It has no exported fields (eg. `time.Time`)
-
-## embedded structs
-
-Scan targets obey Go attribute rules directly, including nested embedded structs. Older versions of sqlx would attempt to also descend into non-embedded structs, but this is no longer supported.
-
-Go makes *accessing* '[ambiguous selectors](http://play.golang.org/p/MGRxdjLaUc)' a compile time error, defining structs with ambiguous selectors is legal. Sqlx will decide which field to use on a struct based on a breadth first search of the struct and any structs it embeds, as specified by the order of the fields as accessible by `reflect`, which generally means in source-order. This means that sqlx chooses the outer-most, top-most matching name for targets, even when the selector might technically be ambiguous.
-
-## scan safety
-
-By default, scanning into structs requires the structs to have fields for all of the
-columns in the query. This was done for a few reasons:
-
-* A mistake in naming during development could lead you to believe that data is
- being written to a field when actually it can't be found and it is being dropped
-* This behavior mirrors the behavior of the Go compiler with respect to unused
- variables
-* Selecting more data than you need is wasteful (more data on the wire, more time
- marshalling, etc)
-
-Unlike Marshallers in the stdlib, the programmer scanning an sql result into a struct
-will generally have a full understanding of what the underlying data model is *and*
-full control over the SQL statement.
-
-Despite this, there are use cases where it's convenient to be able to ignore unknown
-columns. In most of these cases, you might be better off with `ScanSlice`, but where
-you want to still use structs, there is now the `Unsafe` method. Its usage is most
-simply shown in an example:
-
-```go
- db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
- if err != nil {
- log.Fatal(err)
- }
-
- type Person {
- Name string
- }
- var p Person
-
- // This fails, because there is no destination for location in Person
- err = db.Get(&p, "SELECT name, location FROM person LIMIT 1")
-
- udb := db.Unsafe()
-
- // This succeeds and just sets `Name` in the p struct
- err = udb.Get(&p, "SELECT name, location FROM person LIMIT 1")
-```
-
-The `Unsafe` method is implemented on `Tx`, `DB`, and `Stmt`. When you use an unsafe
-`Tx` or `DB` to create a new `Tx` or `Stmt`, those inherit its lack of safety.
-
diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go
deleted file mode 100644
index 2f1ec2b..0000000
--- a/vendor/github.com/jmoiron/sqlx/bind.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package sqlx
-
-import (
- "bytes"
- "strconv"
-)
-
-// Bindvar types supported by Rebind, BindMap and BindStruct.
-const (
- UNKNOWN = iota
- QUESTION
- DOLLAR
- NAMED
-)
-
-// BindType returns the bindtype for a given database given a drivername.
-func BindType(driverName string) int {
- switch driverName {
- case "postgres", "pgx":
- return DOLLAR
- case "mysql":
- return QUESTION
- case "sqlite3":
- return QUESTION
- case "oci8":
- return NAMED
- }
- return UNKNOWN
-}
-
-// FIXME: this should be able to be tolerant of escaped ?'s in queries without
-// losing much speed, and should be to avoid confusion.
-
-// FIXME: this is now produces the wrong results for oracle's NAMED bindtype
-
-// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
-func Rebind(bindType int, query string) string {
- if bindType != DOLLAR {
- return query
- }
-
- qb := []byte(query)
- // Add space enough for 10 params before we have to allocate
- rqb := make([]byte, 0, len(qb)+10)
- j := 1
- for _, b := range qb {
- if b == '?' {
- rqb = append(rqb, '$')
- for _, b := range strconv.Itoa(j) {
- rqb = append(rqb, byte(b))
- }
- j++
- } else {
- rqb = append(rqb, b)
- }
- }
- return string(rqb)
-}
-
-// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
-// much simpler and should be more resistant to odd unicode, but it is twice as
-// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
-// problems arise with its somewhat naive handling of unicode.
-
-func rebindBuff(bindType int, query string) string {
- if bindType != DOLLAR {
- return query
- }
-
- b := make([]byte, 0, len(query))
- rqb := bytes.NewBuffer(b)
- j := 1
- for _, r := range query {
- if r == '?' {
- rqb.WriteRune('$')
- rqb.WriteString(strconv.Itoa(j))
- j++
- } else {
- rqb.WriteRune(r)
- }
- }
-
- return rqb.String()
-}
diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go
deleted file mode 100644
index e2b4e60..0000000
--- a/vendor/github.com/jmoiron/sqlx/doc.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Package sqlx provides general purpose extensions to database/sql.
-//
-// It is intended to seamlessly wrap database/sql and provide convenience
-// methods which are useful in the development of database driven applications.
-// None of the underlying database/sql methods are changed. Instead all extended
-// behavior is implemented through new methods defined on wrapper types.
-//
-// Additions include scanning into structs, named query support, rebinding
-// queries for different drivers, convenient shorthands for common error handling
-// and more.
-//
-package sqlx
diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go
deleted file mode 100644
index d753518..0000000
--- a/vendor/github.com/jmoiron/sqlx/named.go
+++ /dev/null
@@ -1,321 +0,0 @@
-package sqlx
-
-// Named Query Support
-//
-// * BindMap - bind query bindvars to map/struct args
-// * NamedExec, NamedQuery - named query w/ struct or map
-// * NamedStmt - a pre-compiled named query which is a prepared statement
-//
-// Internal Interfaces:
-//
-// * compileNamedQuery - rebind a named query, returning a query and list of names
-// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
-//
-import (
- "database/sql"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "unicode"
-
- "github.com/jmoiron/sqlx/reflectx"
-)
-
-// NamedStmt is a prepared statement that executes named queries. Prepare it
-// how you would execute a NamedQuery, but pass in a struct or map when executing.
-type NamedStmt struct {
- Params []string
- QueryString string
- Stmt *Stmt
-}
-
-// Close closes the named statement.
-func (n *NamedStmt) Close() error {
- return n.Stmt.Close()
-}
-
-// Exec executes a named statement using the struct passed.
-func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return *new(sql.Result), err
- }
- return n.Stmt.Exec(args...)
-}
-
-// Query executes a named statement using the struct argument, returning rows.
-func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return nil, err
- }
- return n.Stmt.Query(args...)
-}
-
-// QueryRow executes a named statement against the database. Because sqlx cannot
-// create a *sql.Row with an error condition pre-set for binding errors, sqlx
-// returns a *sqlx.Row instead.
-func (n *NamedStmt) QueryRow(arg interface{}) *Row {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return &Row{err: err}
- }
- return n.Stmt.QueryRowx(args...)
-}
-
-// MustExec execs a NamedStmt, panicing on error
-func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
- res, err := n.Exec(arg)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// Queryx using this NamedStmt
-func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
- r, err := n.Query(arg)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, Mapper: n.Stmt.Mapper}, err
-}
-
-// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
-// an alias for QueryRow.
-func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
- return n.QueryRow(arg)
-}
-
-// Select using this NamedStmt
-func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
- rows, err := n.Query(arg)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// Get using this NamedStmt
-func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
- r := n.QueryRowx(arg)
- return r.scanAny(dest, false)
-}
-
-// A union interface of preparer and binder, required to be able to prepare
-// named statements (as the bindtype must be determined).
-type namedPreparer interface {
- Preparer
- binder
-}
-
-func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
- bindType := BindType(p.DriverName())
- q, args, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return nil, err
- }
- stmt, err := Preparex(p, q)
- if err != nil {
- return nil, err
- }
- return &NamedStmt{
- QueryString: q,
- Params: args,
- Stmt: stmt,
- }, nil
-}
-
-func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
- if maparg, ok := arg.(map[string]interface{}); ok {
- return bindMapArgs(names, maparg)
- }
- return bindArgs(names, arg, m)
-}
-
-// private interface to generate a list of interfaces from a given struct
-// type, given a list of names to pull out of the struct. Used by public
-// BindStruct interface.
-func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
- arglist := make([]interface{}, 0, len(names))
-
- // grab the indirected value of arg
- v := reflect.ValueOf(arg)
- for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
- v = v.Elem()
- }
-
- fields := m.TraversalsByName(v.Type(), names)
- for i, t := range fields {
- if len(t) == 0 {
- return arglist, fmt.Errorf("could not find name %s in %#v", names[i], arg)
- }
- val := reflectx.FieldByIndexesReadOnly(v, t)
- arglist = append(arglist, val.Interface())
- }
-
- return arglist, nil
-}
-
-// like bindArgs, but for maps.
-func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
- arglist := make([]interface{}, 0, len(names))
-
- for _, name := range names {
- val, ok := arg[name]
- if !ok {
- return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
- }
- arglist = append(arglist, val)
- }
- return arglist, nil
-}
-
-// bindStruct binds a named parameter query with fields from a struct argument.
-// The rules for binding field names to parameter names follow the same
-// conventions as for StructScan, including obeying the `db` struct tags.
-func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
- bound, names, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return "", []interface{}{}, err
- }
-
- arglist, err := bindArgs(names, arg, m)
- if err != nil {
- return "", []interface{}{}, err
- }
-
- return bound, arglist, nil
-}
-
-// bindMap binds a named parameter query with a map of arguments.
-func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
- bound, names, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return "", []interface{}{}, err
- }
-
- arglist, err := bindMapArgs(names, args)
- return bound, arglist, err
-}
-
-// -- Compilation of Named Queries
-
-// Allow digits and letters in bind params; additionally runes are
-// checked against underscores, meaning that bind params can have be
-// alphanumeric with underscores. Mind the difference between unicode
-// digits and numbers, where '5' is a digit but '五' is not.
-var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
-
-// FIXME: this function isn't safe for unicode named params, as a failing test
-// can testify. This is not a regression but a failure of the original code
-// as well. It should be modified to range over runes in a string rather than
-// bytes, even though this is less convenient and slower. Hopefully the
-// addition of the prepared NamedStmt (which will only do this once) will make
-// up for the slightly slower ad-hoc NamedExec/NamedQuery.
-
-// compile a NamedQuery into an unbound query (using the '?' bindvar) and
-// a list of names.
-func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
- names = make([]string, 0, 10)
- rebound := make([]byte, 0, len(qs))
-
- inName := false
- last := len(qs) - 1
- currentVar := 1
- name := make([]byte, 0, 10)
-
- for i, b := range qs {
- // a ':' while we're in a name is an error
- if b == ':' {
- // if this is the second ':' in a '::' escape sequence, append a ':'
- if inName && i > 0 && qs[i-1] == ':' {
- rebound = append(rebound, ':')
- inName = false
- continue
- } else if inName {
- err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
- return query, names, err
- }
- inName = true
- name = []byte{}
- // if we're in a name, and this is an allowed character, continue
- } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_') && i != last {
- // append the byte to the name if we are in a name and not on the last byte
- name = append(name, b)
- // if we're in a name and it's not an allowed character, the name is done
- } else if inName {
- inName = false
- // if this is the final byte of the string and it is part of the name, then
- // make sure to add it to the name
- if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
- name = append(name, b)
- }
- // add the string representation to the names list
- names = append(names, string(name))
- // add a proper bindvar for the bindType
- switch bindType {
- // oracle only supports named type bind vars even for positional
- case NAMED:
- rebound = append(rebound, ':')
- rebound = append(rebound, name...)
- case QUESTION, UNKNOWN:
- rebound = append(rebound, '?')
- case DOLLAR:
- rebound = append(rebound, '$')
- for _, b := range strconv.Itoa(currentVar) {
- rebound = append(rebound, byte(b))
- }
- currentVar++
- }
- // add this byte to string unless it was not part of the name
- if i != last {
- rebound = append(rebound, b)
- } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
- rebound = append(rebound, b)
- }
- } else {
- // this is a normal byte and should just go onto the rebound query
- rebound = append(rebound, b)
- }
- }
-
- return string(rebound), names, err
-}
-
-// Bind binds a struct or a map to a query with named parameters.
-func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
- return bindNamedMapper(bindType, query, arg, mapper())
-}
-
-func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
- if maparg, ok := arg.(map[string]interface{}); ok {
- return bindMap(bindType, query, maparg)
- }
- return bindStruct(bindType, query, arg, m)
-}
-
-// NamedQuery binds a named query and then runs Query on the result using the
-// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
-// map[string]interface{} types.
-func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.Queryx(q, args...)
-}
-
-// NamedExec uses BindStruct to get a query executable by the driver and
-// then runs Exec on the result. Returns an error from the binding
-// or the query excution itself.
-func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.Exec(q, args...)
-}
diff --git a/vendor/github.com/jmoiron/sqlx/named_test.go b/vendor/github.com/jmoiron/sqlx/named_test.go
deleted file mode 100644
index d3459a8..0000000
--- a/vendor/github.com/jmoiron/sqlx/named_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package sqlx
-
-import (
- "database/sql"
- "testing"
-)
-
-func TestCompileQuery(t *testing.T) {
- table := []struct {
- Q, R, D, N string
- V []string
- }{
- // basic test for named parameters, invalid char ',' terminating
- {
- Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
- R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
- D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
- N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
- V: []string{"name", "age", "first", "last"},
- },
- // This query tests a named parameter ending the string as well as numbers
- {
- Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
- R: `SELECT * FROM a WHERE first_name=? AND last_name=?`,
- D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`,
- N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
- V: []string{"name1", "name2"},
- },
- {
- Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
- R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`,
- D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`,
- N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
- V: []string{"name1", "name2"},
- },
- {
- Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
- R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`,
- D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`,
- N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
- V: []string{"first_name", "last_name"},
- },
- /* This unicode awareness test sadly fails, because of our byte-wise worldview.
- * We could certainly iterate by Rune instead, though it's a great deal slower,
- * it's probably the RightWay(tm)
- {
- Q: `INSERT INTO foo (a,b,c,d) VALUES (:あ, :b, :キコ, :名前)`,
- R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
- D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
- N: []string{"name", "age", "first", "last"},
- },
- */
- }
-
- for _, test := range table {
- qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION)
- if err != nil {
- t.Error(err)
- }
- if qr != test.R {
- t.Errorf("expected %s, got %s", test.R, qr)
- }
- if len(names) != len(test.V) {
- t.Errorf("expected %#v, got %#v", test.V, names)
- } else {
- for i, name := range names {
- if name != test.V[i] {
- t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name)
- }
- }
- }
- qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR)
- if qd != test.D {
- t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd)
- }
-
- qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED)
- if qq != test.N {
- t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq))
- }
- }
-}
-
-type Test struct {
- t *testing.T
-}
-
-func (t Test) Error(err error, msg ...interface{}) {
- if err != nil {
- if len(msg) == 0 {
- t.t.Error(err)
- } else {
- t.t.Error(msg...)
- }
- }
-}
-
-func (t Test) Errorf(err error, format string, args ...interface{}) {
- if err != nil {
- t.t.Errorf(format, args...)
- }
-}
-
-func TestNamedQueries(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- test := Test{t}
- var ns *NamedStmt
- var err error
-
- // Check that invalid preparations fail
- ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name")
- if err == nil {
- t.Error("Expected an error with invalid prepared statement.")
- }
-
- ns, err = db.PrepareNamed("invalid sql")
- if err == nil {
- t.Error("Expected an error with invalid prepared statement.")
- }
-
- // Check closing works as anticipated
- ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name")
- test.Error(err)
- err = ns.Close()
- test.Error(err)
-
- ns, err = db.PrepareNamed(`
- SELECT first_name, last_name, email
- FROM person WHERE first_name=:first_name AND email=:email`)
- test.Error(err)
-
- // test Queryx w/ uses Query
- p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"}
-
- rows, err := ns.Queryx(p)
- test.Error(err)
- for rows.Next() {
- var p2 Person
- rows.StructScan(&p2)
- if p.FirstName != p2.FirstName {
- t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName)
- }
- if p.LastName != p2.LastName {
- t.Errorf("got %s, expected %s", p.LastName, p2.LastName)
- }
- if p.Email != p2.Email {
- t.Errorf("got %s, expected %s", p.Email, p2.Email)
- }
- }
-
- // test Select
- people := make([]Person, 0, 5)
- err = ns.Select(&people, p)
- test.Error(err)
-
- if len(people) != 1 {
- t.Errorf("got %d results, expected %d", len(people), 1)
- }
- if p.FirstName != people[0].FirstName {
- t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName)
- }
- if p.LastName != people[0].LastName {
- t.Errorf("got %s, expected %s", p.LastName, people[0].LastName)
- }
- if p.Email != people[0].Email {
- t.Errorf("got %s, expected %s", p.Email, people[0].Email)
- }
-
- // test Exec
- ns, err = db.PrepareNamed(`
- INSERT INTO person (first_name, last_name, email)
- VALUES (:first_name, :last_name, :email)`)
- test.Error(err)
-
- js := Person{
- FirstName: "Julien",
- LastName: "Savea",
- Email: "jsavea@ab.co.nz",
- }
- _, err = ns.Exec(js)
- test.Error(err)
-
- // Make sure we can pull him out again
- p2 := Person{}
- db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email)
- if p2.Email != js.Email {
- t.Errorf("expected %s, got %s", js.Email, p2.Email)
- }
-
- // test Txn NamedStmts
- tx := db.MustBegin()
- txns := tx.NamedStmt(ns)
-
- // We're going to add Steven in this txn
- sl := Person{
- FirstName: "Steven",
- LastName: "Luatua",
- Email: "sluatua@ab.co.nz",
- }
-
- _, err = txns.Exec(sl)
- test.Error(err)
- // then rollback...
- tx.Rollback()
- // looking for Steven after a rollback should fail
- err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
- if err != sql.ErrNoRows {
- t.Errorf("expected no rows error, got %v", err)
- }
-
- // now do the same, but commit
- tx = db.MustBegin()
- txns = tx.NamedStmt(ns)
- _, err = txns.Exec(sl)
- test.Error(err)
- tx.Commit()
-
- // looking for Steven after a Commit should succeed
- err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
- test.Error(err)
- if p2.Email != sl.Email {
- t.Errorf("expected %s, got %s", sl.Email, p2.Email)
- }
-
- })
-}
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md
deleted file mode 100644
index 76f1b5d..0000000
--- a/vendor/github.com/jmoiron/sqlx/reflectx/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# reflectx
-
-The sqlx package has special reflect needs. In particular, it needs to:
-
-* be able to map a name to a field
-* understand embedded structs
-* understand mapping names to fields by a particular tag
-* user specified name -> field mapping functions
-
-These behaviors mimic the behaviors by the standard library marshallers and also the
-behavior of standard Go accessors.
-
-The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
-addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
-tags in the ways that are vital to most marshalers, and they are slow.
-
-This reflectx package extends reflect to achieve these goals.
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
deleted file mode 100644
index 847b760..0000000
--- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
+++ /dev/null
@@ -1,250 +0,0 @@
-// Package reflect implements extensions to the standard reflect lib suitable
-// for implementing marshaling and unmarshaling packages. The main Mapper type
-// allows for Go-compatible named atribute access, including accessing embedded
-// struct attributes and the ability to use functions and struct tags to
-// customize field names.
-//
-package reflectx
-
-import "sync"
-
-import (
- "reflect"
- "runtime"
-)
-
-type fieldMap map[string][]int
-
-// Mapper is a general purpose mapper of names to struct fields. A Mapper
-// behaves like most marshallers, optionally obeying a field tag for name
-// mapping and a function to provide a basic mapping of fields to names.
-type Mapper struct {
- cache map[reflect.Type]fieldMap
- tagName string
- mapFunc func(string) string
- mutex sync.Mutex
-}
-
-// NewMapper returns a new mapper which optionally obeys the field tag given
-// by tagName. If tagName is the empty string, it is ignored.
-func NewMapper(tagName string) *Mapper {
- return &Mapper{
- cache: make(map[reflect.Type]fieldMap),
- tagName: tagName,
- }
-}
-
-// NewMapperFunc returns a new mapper which optionally obeys a field tag and
-// a struct field name mapper func given by f. Tags will take precedence, but
-// for any other field, the mapped name will be f(field.Name)
-func NewMapperFunc(tagName string, f func(string) string) *Mapper {
- return &Mapper{
- cache: make(map[reflect.Type]fieldMap),
- tagName: tagName,
- mapFunc: f,
- }
-}
-
-// TypeMap returns a mapping of field strings to int slices representing
-// the traversal down the struct to reach the field.
-func (m *Mapper) TypeMap(t reflect.Type) fieldMap {
- m.mutex.Lock()
- mapping, ok := m.cache[t]
- if !ok {
- mapping = getMapping(t, m.tagName, m.mapFunc)
- m.cache[t] = mapping
- }
- m.mutex.Unlock()
- return mapping
-}
-
-// FieldMap returns the mapper's mapping of field names to reflect values. Panics
-// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
-func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
- v = reflect.Indirect(v)
- mustBe(v, reflect.Struct)
-
- r := map[string]reflect.Value{}
- nm := m.TypeMap(v.Type())
- for tagName, indexes := range nm {
- r[tagName] = FieldByIndexes(v, indexes)
- }
- return r
-}
-
-// FieldByName returns a field by the its mapped name as a reflect.Value.
-// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
-// Returns zero Value if the name is not found.
-func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
- v = reflect.Indirect(v)
- mustBe(v, reflect.Struct)
-
- nm := m.TypeMap(v.Type())
- traversal, ok := nm[name]
- if !ok {
- return *new(reflect.Value)
- }
- return FieldByIndexes(v, traversal)
-}
-
-// FieldsByName returns a slice of values corresponding to the slice of names
-// for the value. Panics if v's Kind is not Struct or v is not Indirectable
-// to a struct Kind. Returns zero Value for each name not found.
-func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
- v = reflect.Indirect(v)
- mustBe(v, reflect.Struct)
-
- nm := m.TypeMap(v.Type())
-
- vals := make([]reflect.Value, 0, len(names))
- for _, name := range names {
- traversal, ok := nm[name]
- if !ok {
- vals = append(vals, *new(reflect.Value))
- } else {
- vals = append(vals, FieldByIndexes(v, traversal))
- }
- }
- return vals
-}
-
-// Traversals by name returns a slice of int slices which represent the struct
-// traversals for each mapped name. Panics if t is not a struct or Indirectable
-// to a struct. Returns empty int slice for each name not found.
-func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
- t = Deref(t)
- mustBe(t, reflect.Struct)
- nm := m.TypeMap(t)
-
- r := make([][]int, 0, len(names))
- for _, name := range names {
- traversal, ok := nm[name]
- if !ok {
- r = append(r, []int{})
- } else {
- r = append(r, traversal)
- }
- }
- return r
-}
-
-// FieldByIndexes returns a value for a particular struct traversal.
-func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
- for _, i := range indexes {
- v = reflect.Indirect(v).Field(i)
- // if this is a pointer, it's possible it is nil
- if v.Kind() == reflect.Ptr && v.IsNil() {
- alloc := reflect.New(Deref(v.Type()))
- v.Set(alloc)
- }
- }
- return v
-}
-
-// FieldByIndexesReadOnly returns a value for a particular struct traversal,
-// but is not concerned with allocating nil pointers because the value is
-// going to be used for reading and not setting.
-func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
- for _, i := range indexes {
- v = reflect.Indirect(v).Field(i)
- }
- return v
-}
-
-// Deref is Indirect for reflect.Types
-func Deref(t reflect.Type) reflect.Type {
- if t.Kind() == reflect.Ptr {
- t = t.Elem()
- }
- return t
-}
-
-// -- helpers & utilities --
-
-type Kinder interface {
- Kind() reflect.Kind
-}
-
-// mustBe checks a value against a kind, panicing with a reflect.ValueError
-// if the kind isn't that which is required.
-func mustBe(v Kinder, expected reflect.Kind) {
- k := v.Kind()
- if k != expected {
- panic(&reflect.ValueError{Method: methodName(), Kind: k})
- }
-}
-
-// methodName is returns the caller of the function calling methodName
-func methodName() string {
- pc, _, _, _ := runtime.Caller(2)
- f := runtime.FuncForPC(pc)
- if f == nil {
- return "unknown method"
- }
- return f.Name()
-}
-
-type typeQueue struct {
- t reflect.Type
- p []int
-}
-
-// A copying append that creates a new slice each time.
-func apnd(is []int, i int) []int {
- x := make([]int, len(is)+1)
- for p, n := range is {
- x[p] = n
- }
- x[len(x)-1] = i
- return x
-}
-
-// getMapping returns a mapping for the t type, using the tagName and the mapFunc
-// to determine the canonical names of fields.
-func getMapping(t reflect.Type, tagName string, mapFunc func(string) string) fieldMap {
- queue := []typeQueue{}
- queue = append(queue, typeQueue{Deref(t), []int{}})
- m := fieldMap{}
- for len(queue) != 0 {
- // pop the first item off of the queue
- tq := queue[0]
- queue = queue[1:]
- // iterate through all of its fields
- for fieldPos := 0; fieldPos < tq.t.NumField(); fieldPos++ {
- f := tq.t.Field(fieldPos)
-
- name := f.Tag.Get(tagName)
- if len(name) == 0 {
- if mapFunc != nil {
- name = mapFunc(f.Name)
- } else {
- name = f.Name
- }
- }
-
- // if the name is "-", disabled via a tag, skip it
- if name == "-" {
- continue
- }
-
- // skip unexported fields
- if len(f.PkgPath) != 0 {
- continue
- }
-
- // bfs search of anonymous embedded structs
- if f.Anonymous {
- queue = append(queue, typeQueue{Deref(f.Type), apnd(tq.p, fieldPos)})
- continue
- }
-
- // if the name is shadowed by an earlier identical name in the search, skip it
- if _, ok := m[name]; ok {
- continue
- }
- // add it to the map at the current position
- m[name] = apnd(tq.p, fieldPos)
- }
- }
- return m
-}
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go
deleted file mode 100644
index e0a40f6..0000000
--- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package reflectx
-
-import (
- "reflect"
- "strings"
- "testing"
-)
-
-func ival(v reflect.Value) int {
- return v.Interface().(int)
-}
-
-func TestBasic(t *testing.T) {
- type Foo struct {
- A int
- B int
- C int
- }
-
- f := Foo{1, 2, 3}
- fv := reflect.ValueOf(f)
- m := NewMapper("")
-
- v := m.FieldByName(fv, "A")
- if ival(v) != f.A {
- t.Errorf("Expecting %d, got %d", ival(v), f.A)
- }
- v = m.FieldByName(fv, "B")
- if ival(v) != f.B {
- t.Errorf("Expecting %d, got %d", f.B, ival(v))
- }
- v = m.FieldByName(fv, "C")
- if ival(v) != f.C {
- t.Errorf("Expecting %d, got %d", f.C, ival(v))
- }
-}
-
-func TestEmbedded(t *testing.T) {
- type Foo struct {
- A int
- }
-
- type Bar struct {
- Foo
- B int
- }
-
- type Baz struct {
- A int
- Bar
- }
-
- m := NewMapper("")
-
- z := Baz{}
- z.A = 1
- z.B = 2
- z.Bar.Foo.A = 3
- zv := reflect.ValueOf(z)
-
- v := m.FieldByName(zv, "A")
- if ival(v) != z.A {
- t.Errorf("Expecting %d, got %d", ival(v), z.A)
- }
- v = m.FieldByName(zv, "B")
- if ival(v) != z.B {
- t.Errorf("Expecting %d, got %d", ival(v), z.B)
- }
-}
-
-func TestMapping(t *testing.T) {
- type Person struct {
- ID int
- Name string
- WearsGlasses bool `db:"wears_glasses"`
- }
-
- m := NewMapperFunc("db", strings.ToLower)
- p := Person{1, "Jason", true}
- mapping := m.TypeMap(reflect.TypeOf(p))
-
- for _, key := range []string{"id", "name", "wears_glasses"} {
- if _, ok := mapping[key]; !ok {
- t.Errorf("Expecting to find key %s in mapping but did not.", key)
- }
- }
-
- type SportsPerson struct {
- Weight int
- Age int
- Person
- }
- s := SportsPerson{Weight: 100, Age: 30, Person: p}
- mapping = m.TypeMap(reflect.TypeOf(s))
- for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} {
- if _, ok := mapping[key]; !ok {
- t.Errorf("Expecting to find key %s in mapping but did not.", key)
- }
-
- }
-
- type RugbyPlayer struct {
- Position int
- IsIntense bool `db:"is_intense"`
- IsAllBlack bool `db:"-"`
- SportsPerson
- }
- r := RugbyPlayer{12, true, false, s}
- mapping = m.TypeMap(reflect.TypeOf(r))
- for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} {
- if _, ok := mapping[key]; !ok {
- t.Errorf("Expecting to find key %s in mapping but did not.", key)
- }
- }
-
- if _, ok := mapping["isallblack"]; ok {
- t.Errorf("Expecting to ignore `IsAllBlack` field")
- }
-
- type EmbeddedLiteral struct {
- Embedded struct {
- Person string
- Position int
- }
- IsIntense bool
- }
-
- e := EmbeddedLiteral{}
- mapping = m.TypeMap(reflect.TypeOf(e))
- //fmt.Printf("Mapping: %#v\n", mapping)
-
- //f := FieldByIndexes(reflect.ValueOf(e), mapping["isintense"])
- //fmt.Println(f, f.Interface())
-
- //tbn := m.TraversalsByName(reflect.TypeOf(e), []string{"isintense"})
- //fmt.Printf("%#v\n", tbn)
-
-}
-
-type E1 struct {
- A int
-}
-type E2 struct {
- E1
- B int
-}
-type E3 struct {
- E2
- C int
-}
-type E4 struct {
- E3
- D int
-}
-
-func BenchmarkFieldNameL1(b *testing.B) {
- e4 := E4{D: 1}
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := v.FieldByName("D")
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
-
-func BenchmarkFieldNameL4(b *testing.B) {
- e4 := E4{}
- e4.A = 1
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := v.FieldByName("A")
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
-
-func BenchmarkFieldPosL1(b *testing.B) {
- e4 := E4{D: 1}
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := v.Field(1)
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
-
-func BenchmarkFieldPosL4(b *testing.B) {
- e4 := E4{}
- e4.A = 1
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := v.Field(0)
- f = f.Field(0)
- f = f.Field(0)
- f = f.Field(0)
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
-
-func BenchmarkFieldByIndexL4(b *testing.B) {
- e4 := E4{}
- e4.A = 1
- idx := []int{0, 0, 0, 0}
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := FieldByIndexes(v, idx)
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go
deleted file mode 100644
index d195705..0000000
--- a/vendor/github.com/jmoiron/sqlx/sqlx.go
+++ /dev/null
@@ -1,986 +0,0 @@
-package sqlx
-
-import (
- "database/sql"
- "database/sql/driver"
- "errors"
- "fmt"
-
- "io/ioutil"
- "path/filepath"
- "reflect"
- "strings"
-
- "github.com/jmoiron/sqlx/reflectx"
-)
-
-// Although the NameMapper is convenient, in practice it should not
-// be relied on except for application code. If you are writing a library
-// that uses sqlx, you should be aware that the name mappings you expect
-// can be overridded by your user's application.
-
-// NameMapper is used to map column names to struct field names. By default,
-// it uses strings.ToLower to lowercase struct field names. It can be set
-// to whatever you want, but it is encouraged to be set before sqlx is used
-// as name-to-field mappings are cached after first use on a type.
-var NameMapper = strings.ToLower
-var origMapper = reflect.ValueOf(NameMapper)
-
-// Rather than creating on init, this is created when necessary so that
-// importers have time to customize the NameMapper.
-var mpr *reflectx.Mapper
-
-// mapper returns a valid mapper using the configured NameMapper func.
-func mapper() *reflectx.Mapper {
- if mpr == nil {
- mpr = reflectx.NewMapperFunc("db", NameMapper)
- } else if origMapper != reflect.ValueOf(NameMapper) {
- // if NameMapper has changed, create a new mapper
- mpr = reflectx.NewMapperFunc("db", NameMapper)
- origMapper = reflect.ValueOf(NameMapper)
- }
- return mpr
-}
-
-// isScannable takes the reflect.Type and the actual dest value and returns
-// whether or not it's Scannable. Something is scannable if:
-// * it is not a struct
-// * it implements sql.Scanner
-// * it has no exported fields
-func isScannable(t reflect.Type) bool {
- if reflect.PtrTo(t).Implements(_scannerInterface) {
- return true
- }
- if t.Kind() != reflect.Struct {
- return true
- }
-
- // it's not important that we use the right mapper for this particular object,
- // we're only concerned on how many exported fields this struct has
- m := mapper()
- if len(m.TypeMap(t)) == 0 {
- return true
- }
- return false
-}
-
-// ColScanner is an interface used by MapScan and SliceScan
-type ColScanner interface {
- Columns() ([]string, error)
- Scan(dest ...interface{}) error
- Err() error
-}
-
-// Queryer is an interface used by Get and Select
-type Queryer interface {
- Query(query string, args ...interface{}) (*sql.Rows, error)
- Queryx(query string, args ...interface{}) (*Rows, error)
- QueryRowx(query string, args ...interface{}) *Row
-}
-
-// Execer is an interface used by MustExec and LoadFile
-type Execer interface {
- Exec(query string, args ...interface{}) (sql.Result, error)
-}
-
-// Binder is an interface for something which can bind queries (Tx, DB)
-type binder interface {
- DriverName() string
- Rebind(string) string
- BindNamed(string, interface{}) (string, []interface{}, error)
-}
-
-// Ext is a union interface which can bind, query, and exec, used by
-// NamedQuery and NamedExec.
-type Ext interface {
- binder
- Queryer
- Execer
-}
-
-// Preparer is an interface used by Preparex.
-type Preparer interface {
- Prepare(query string) (*sql.Stmt, error)
-}
-
-// determine if any of our extensions are unsafe
-func isUnsafe(i interface{}) bool {
- switch i.(type) {
- case Row:
- return i.(Row).unsafe
- case *Row:
- return i.(*Row).unsafe
- case Rows:
- return i.(Rows).unsafe
- case *Rows:
- return i.(*Rows).unsafe
- case Stmt:
- return i.(Stmt).unsafe
- case qStmt:
- return i.(qStmt).Stmt.unsafe
- case *qStmt:
- return i.(*qStmt).Stmt.unsafe
- case DB:
- return i.(DB).unsafe
- case *DB:
- return i.(*DB).unsafe
- case Tx:
- return i.(Tx).unsafe
- case *Tx:
- return i.(*Tx).unsafe
- case sql.Rows, *sql.Rows:
- return false
- default:
- return false
- }
-}
-
-func mapperFor(i interface{}) *reflectx.Mapper {
- switch i.(type) {
- case DB:
- return i.(DB).Mapper
- case *DB:
- return i.(*DB).Mapper
- case Tx:
- return i.(Tx).Mapper
- case *Tx:
- return i.(*Tx).Mapper
- default:
- return mapper()
- }
-}
-
-var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
-var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
-
-// Row is a reimplementation of sql.Row in order to gain access to the underlying
-// sql.Rows.Columns() data, necessary for StructScan.
-type Row struct {
- err error
- unsafe bool
- rows *sql.Rows
- Mapper *reflectx.Mapper
-}
-
-// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
-// underlying error from the internal rows object if it exists.
-func (r *Row) Scan(dest ...interface{}) error {
- if r.err != nil {
- return r.err
- }
-
- // TODO(bradfitz): for now we need to defensively clone all
- // []byte that the driver returned (not permitting
- // *RawBytes in Rows.Scan), since we're about to close
- // the Rows in our defer, when we return from this function.
- // the contract with the driver.Next(...) interface is that it
- // can return slices into read-only temporary memory that's
- // only valid until the next Scan/Close. But the TODO is that
- // for a lot of drivers, this copy will be unnecessary. We
- // should provide an optional interface for drivers to
- // implement to say, "don't worry, the []bytes that I return
- // from Next will not be modified again." (for instance, if
- // they were obtained from the network anyway) But for now we
- // don't care.
- defer r.rows.Close()
- for _, dp := range dest {
- if _, ok := dp.(*sql.RawBytes); ok {
- return errors.New("sql: RawBytes isn't allowed on Row.Scan")
- }
- }
-
- if !r.rows.Next() {
- if err := r.rows.Err(); err != nil {
- return err
- }
- return sql.ErrNoRows
- }
- err := r.rows.Scan(dest...)
- if err != nil {
- return err
- }
- // Make sure the query can be processed to completion with no errors.
- if err := r.rows.Close(); err != nil {
- return err
- }
- return nil
-}
-
-// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
-// returned by Row.Scan()
-func (r *Row) Columns() ([]string, error) {
- if r.err != nil {
- return []string{}, r.err
- }
- return r.rows.Columns()
-}
-
-// Err returns the error encountered while scanning.
-func (r *Row) Err() error {
- return r.err
-}
-
-// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
-// used mostly to automatically bind named queries using the right bindvars.
-type DB struct {
- *sql.DB
- driverName string
- unsafe bool
- Mapper *reflectx.Mapper
-}
-
-// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
-// driverName of the original database is required for named query support.
-func NewDb(db *sql.DB, driverName string) *DB {
- return &DB{DB: db, driverName: driverName, Mapper: mapper()}
-}
-
-// DriverName returns the driverName passed to the Open function for this DB.
-func (db *DB) DriverName() string {
- return db.driverName
-}
-
-// Open is the same as sql.Open, but returns an *sqlx.DB instead.
-func Open(driverName, dataSourceName string) (*DB, error) {
- db, err := sql.Open(driverName, dataSourceName)
- if err != nil {
- return nil, err
- }
- return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
-}
-
-// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
-func MustOpen(driverName, dataSourceName string) *DB {
- db, err := Open(driverName, dataSourceName)
- if err != nil {
- panic(err)
- }
- return db
-}
-
-// MapperFunc sets a new mapper for this db using the default sqlx struct tag
-// and the provided mapper function.
-func (db *DB) MapperFunc(mf func(string) string) {
- db.Mapper = reflectx.NewMapperFunc("db", mf)
-}
-
-// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
-func (db *DB) Rebind(query string) string {
- return Rebind(BindType(db.driverName), query)
-}
-
-// Unsafe returns a version of DB which will silently succeed to scan when
-// columns in the SQL result have no fields in the destination struct.
-// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
-// safety behavior.
-func (db *DB) Unsafe() *DB {
- return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
-}
-
-// BindNamed binds a query using the DB driver's bindvar type.
-func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
- return BindNamed(BindType(db.driverName), query, arg)
-}
-
-// NamedQuery using this DB.
-func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
- return NamedQuery(db, query, arg)
-}
-
-// NamedExec using this DB.
-func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
- return NamedExec(db, query, arg)
-}
-
-// Select using this DB.
-func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
- return Select(db, dest, query, args...)
-}
-
-// Get using this DB.
-func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
- return Get(db, dest, query, args...)
-}
-
-// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
-// of an *sql.Tx.
-func (db *DB) MustBegin() *Tx {
- tx, err := db.Beginx()
- if err != nil {
- panic(err)
- }
- return tx
-}
-
-// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
-func (db *DB) Beginx() (*Tx, error) {
- tx, err := db.DB.Begin()
- if err != nil {
- return nil, err
- }
- return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// Queryx queries the database and returns an *sqlx.Rows.
-func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
- r, err := db.DB.Query(query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// QueryRowx queries the database and returns an *sqlx.Row.
-func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
- rows, err := db.DB.Query(query, args...)
- return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
-}
-
-// MustExec (panic) runs MustExec using this database.
-func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
- return MustExec(db, query, args...)
-}
-
-// Preparex returns an sqlx.Stmt instead of a sql.Stmt
-func (db *DB) Preparex(query string) (*Stmt, error) {
- return Preparex(db, query)
-}
-
-// PrepareNamed returns an sqlx.NamedStmt
-func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
- return prepareNamed(db, query)
-}
-
-// Tx is an sqlx wrapper around sql.Tx with extra functionality
-type Tx struct {
- *sql.Tx
- driverName string
- unsafe bool
- Mapper *reflectx.Mapper
-}
-
-// DriverName returns the driverName used by the DB which began this transaction.
-func (tx *Tx) DriverName() string {
- return tx.driverName
-}
-
-// Rebind a query within a transaction's bindvar type.
-func (tx *Tx) Rebind(query string) string {
- return Rebind(BindType(tx.driverName), query)
-}
-
-// Unsafe returns a version of Tx which will silently succeed to scan when
-// columns in the SQL result have no fields in the destination struct.
-func (tx *Tx) Unsafe() *Tx {
- return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
-}
-
-// BindNamed binds a query within a transaction's bindvar type.
-func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
- return BindNamed(BindType(tx.driverName), query, arg)
-}
-
-// NamedQuery within a transaction.
-func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
- return NamedQuery(tx, query, arg)
-}
-
-// NamedExec a named query within a transaction.
-func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
- return NamedExec(tx, query, arg)
-}
-
-// Select within a transaction.
-func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
- return Select(tx, dest, query, args...)
-}
-
-// Queryx within a transaction.
-func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
- r, err := tx.Tx.Query(query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
-}
-
-// QueryRowx within a transaction.
-func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
- rows, err := tx.Tx.Query(query, args...)
- return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
-}
-
-// Get within a transaction.
-func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
- return Get(tx, dest, query, args...)
-}
-
-// MustExec runs MustExec within a transaction.
-func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
- return MustExec(tx, query, args...)
-}
-
-// Preparex a statement within a transaction.
-func (tx *Tx) Preparex(query string) (*Stmt, error) {
- return Preparex(tx, query)
-}
-
-// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
-// stmt can be either *sql.Stmt or *sqlx.Stmt.
-func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
- var st sql.Stmt
- var s *sql.Stmt
- switch stmt.(type) {
- case sql.Stmt:
- st = stmt.(sql.Stmt)
- s = &st
- case Stmt:
- s = stmt.(Stmt).Stmt
- case *Stmt:
- s = stmt.(*Stmt).Stmt
- case *sql.Stmt:
- s = stmt.(*sql.Stmt)
- }
- return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
-}
-
-// NamedStmt returns a version of the prepared statement which runs within a transaction.
-func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
- return &NamedStmt{
- QueryString: stmt.QueryString,
- Params: stmt.Params,
- Stmt: tx.Stmtx(stmt.Stmt),
- }
-}
-
-// PrepareNamed returns an sqlx.NamedStmt
-func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
- return prepareNamed(tx, query)
-}
-
-// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
-type Stmt struct {
- *sql.Stmt
- unsafe bool
- Mapper *reflectx.Mapper
-}
-
-// Unsafe returns a version of Stmt which will silently succeed to scan when
-// columns in the SQL result have no fields in the destination struct.
-func (s *Stmt) Unsafe() *Stmt {
- return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
-}
-
-// Select using the prepared statement.
-func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
- return Select(&qStmt{*s}, dest, "", args...)
-}
-
-// Get using the prepared statement.
-func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
- return Get(&qStmt{*s}, dest, "", args...)
-}
-
-// MustExec (panic) using this statement. Note that the query portion of the error
-// output will be blank, as Stmt does not expose its query.
-func (s *Stmt) MustExec(args ...interface{}) sql.Result {
- return MustExec(&qStmt{*s}, "", args...)
-}
-
-// QueryRowx using this statement.
-func (s *Stmt) QueryRowx(args ...interface{}) *Row {
- qs := &qStmt{*s}
- return qs.QueryRowx("", args...)
-}
-
-// Queryx using this statement.
-func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
- qs := &qStmt{*s}
- return qs.Queryx("", args...)
-}
-
-// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
-// implementing those interfaces and ignoring the `query` argument.
-type qStmt struct{ Stmt }
-
-func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
- return q.Stmt.Query(args...)
-}
-
-func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
- r, err := q.Stmt.Query(args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
-}
-
-func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
- rows, err := q.Stmt.Query(args...)
- return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
-}
-
-func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
- return q.Stmt.Exec(args...)
-}
-
-// Rows is a wrapper around sql.Rows which caches costly reflect operations
-// during a looped StructScan
-type Rows struct {
- *sql.Rows
- unsafe bool
- Mapper *reflectx.Mapper
- // these fields cache memory use for a rows during iteration w/ structScan
- started bool
- fields [][]int
- values []interface{}
-}
-
-// SliceScan using this Rows.
-func (r *Rows) SliceScan() ([]interface{}, error) {
- return SliceScan(r)
-}
-
-// MapScan using this Rows.
-func (r *Rows) MapScan(dest map[string]interface{}) error {
- return MapScan(r, dest)
-}
-
-// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
-// Use this and iterate over Rows manually when the memory load of Select() might be
-// prohibitive. *Rows.StructScan caches the reflect work of matching up column
-// positions to fields to avoid that overhead per scan, which means it is not safe
-// to run StructScan on the same Rows instance with different struct types.
-func (r *Rows) StructScan(dest interface{}) error {
- v := reflect.ValueOf(dest)
-
- if v.Kind() != reflect.Ptr {
- return errors.New("must pass a pointer, not a value, to StructScan destination")
- }
-
- v = reflect.Indirect(v)
-
- if !r.started {
- columns, err := r.Columns()
- if err != nil {
- return err
- }
- m := r.Mapper
-
- r.fields = m.TraversalsByName(v.Type(), columns)
- // if we are not unsafe and are missing fields, return an error
- if f, err := missingFields(r.fields); err != nil && !r.unsafe {
- return fmt.Errorf("missing destination name %s", columns[f])
- }
- r.values = make([]interface{}, len(columns))
- r.started = true
- }
-
- err := fieldsByTraversal(v, r.fields, r.values, true)
- if err != nil {
- return err
- }
- // scan into the struct field pointers and append to our results
- err = r.Scan(r.values...)
- if err != nil {
- return err
- }
- return r.Err()
-}
-
-// Connect to a database and verify with a ping.
-func Connect(driverName, dataSourceName string) (*DB, error) {
- db, err := Open(driverName, dataSourceName)
- if err != nil {
- return db, err
- }
- err = db.Ping()
- return db, err
-}
-
-// MustConnect connects to a database and panics on error.
-func MustConnect(driverName, dataSourceName string) *DB {
- db, err := Connect(driverName, dataSourceName)
- if err != nil {
- panic(err)
- }
- return db
-}
-
-// Preparex prepares a statement.
-func Preparex(p Preparer, query string) (*Stmt, error) {
- s, err := p.Prepare(query)
- if err != nil {
- return nil, err
- }
- return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
-}
-
-// Select executes a query using the provided Queryer, and StructScans each row
-// into dest, which must be a slice. If the slice elements are scannable, then
-// the result set must have only one column. Otherwise, StructScan is used.
-// The *sql.Rows are closed automatically.
-func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
- rows, err := q.Queryx(query, args...)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// Get does a QueryRow using the provided Queryer, and scans the resulting row
-// to dest. If dest is scannable, the result must only have one column. Otherwise,
-// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
-func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
- r := q.QueryRowx(query, args...)
- return r.scanAny(dest, false)
-}
-
-// LoadFile exec's every statement in a file (as a single call to Exec).
-// LoadFile may return a nil *sql.Result if errors are encountered locating or
-// reading the file at path. LoadFile reads the entire file into memory, so it
-// is not suitable for loading large data dumps, but can be useful for initializing
-// schemas or loading indexes.
-//
-// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
-// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
-// this by requiring something with DriverName() and then attempting to split the
-// queries will be difficult to get right, and its current driver-specific behavior
-// is deemed at least not complex in its incorrectness.
-func LoadFile(e Execer, path string) (*sql.Result, error) {
- realpath, err := filepath.Abs(path)
- if err != nil {
- return nil, err
- }
- contents, err := ioutil.ReadFile(realpath)
- if err != nil {
- return nil, err
- }
- res, err := e.Exec(string(contents))
- return &res, err
-}
-
-// MustExec execs the query using e and panics if there was an error.
-func MustExec(e Execer, query string, args ...interface{}) sql.Result {
- res, err := e.Exec(query, args...)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// SliceScan using this Rows.
-func (r *Row) SliceScan() ([]interface{}, error) {
- return SliceScan(r)
-}
-
-// MapScan using this Rows.
-func (r *Row) MapScan(dest map[string]interface{}) error {
- return MapScan(r, dest)
-}
-
-func (r *Row) scanAny(dest interface{}, structOnly bool) error {
- if r.err != nil {
- return r.err
- }
- defer r.rows.Close()
-
- v := reflect.ValueOf(dest)
- if v.Kind() != reflect.Ptr {
- return errors.New("must pass a pointer, not a value, to StructScan destination")
- }
- if v.IsNil() {
- return errors.New("nil pointer passed to StructScan destination")
- }
-
- base := reflectx.Deref(v.Type())
- scannable := isScannable(base)
-
- if structOnly && scannable {
- return structOnlyError(base)
- }
-
- columns, err := r.Columns()
- if err != nil {
- return err
- }
-
- if scannable && len(columns) > 1 {
- return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
- }
-
- if scannable {
- return r.Scan(dest)
- }
-
- m := r.Mapper
-
- fields := m.TraversalsByName(v.Type(), columns)
- // if we are not unsafe and are missing fields, return an error
- if f, err := missingFields(fields); err != nil && !r.unsafe {
- return fmt.Errorf("missing destination name %s", columns[f])
- }
- values := make([]interface{}, len(columns))
-
- err = fieldsByTraversal(v, fields, values, true)
- if err != nil {
- return err
- }
- // scan into the struct field pointers and append to our results
- return r.Scan(values...)
-}
-
-// StructScan a single Row into dest.
-func (r *Row) StructScan(dest interface{}) error {
- return r.scanAny(dest, true)
-}
-
-// SliceScan a row, returning a []interface{} with values similar to MapScan.
-// This function is primarly intended for use where the number of columns
-// is not known. Because you can pass an []interface{} directly to Scan,
-// it's recommended that you do that as it will not have to allocate new
-// slices per row.
-func SliceScan(r ColScanner) ([]interface{}, error) {
- // ignore r.started, since we needn't use reflect for anything.
- columns, err := r.Columns()
- if err != nil {
- return []interface{}{}, err
- }
-
- values := make([]interface{}, len(columns))
- for i := range values {
- values[i] = new(interface{})
- }
-
- err = r.Scan(values...)
-
- if err != nil {
- return values, err
- }
-
- for i := range columns {
- values[i] = *(values[i].(*interface{}))
- }
-
- return values, r.Err()
-}
-
-// MapScan scans a single Row into the dest map[string]interface{}.
-// Use this to get results for SQL that might not be under your control
-// (for instance, if you're building an interface for an SQL server that
-// executes SQL from input). Please do not use this as a primary interface!
-// This will modify the map sent to it in place, so reuse the same map with
-// care. Columns which occur more than once in the result will overwrite
-// eachother!
-func MapScan(r ColScanner, dest map[string]interface{}) error {
- // ignore r.started, since we needn't use reflect for anything.
- columns, err := r.Columns()
- if err != nil {
- return err
- }
-
- values := make([]interface{}, len(columns))
- for i := range values {
- values[i] = new(interface{})
- }
-
- err = r.Scan(values...)
- if err != nil {
- return err
- }
-
- for i, column := range columns {
- dest[column] = *(values[i].(*interface{}))
- }
-
- return r.Err()
-}
-
-type rowsi interface {
- Close() error
- Columns() ([]string, error)
- Err() error
- Next() bool
- Scan(...interface{}) error
-}
-
-// structOnlyError returns an error appropriate for type when a non-scannable
-// struct is expected but something else is given
-func structOnlyError(t reflect.Type) error {
- isStruct := t.Kind() == reflect.Struct
- isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
- if !isStruct {
- return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
- }
- if isScanner {
- return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
- }
- return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
-}
-
-// scanAll scans all rows into a destination, which must be a slice of any
-// type. If the destination slice type is a Struct, then StructScan will be
-// used on each row. If the destination is some other kind of base type, then
-// each row must only have one column which can scan into that type. This
-// allows you to do something like:
-//
-// rows, _ := db.Query("select id from people;")
-// var ids []int
-// scanAll(rows, &ids, false)
-//
-// and ids will be a list of the id results. I realize that this is a desirable
-// interface to expose to users, but for now it will only be exposed via changes
-// to `Get` and `Select`. The reason that this has been implemented like this is
-// this is the only way to not duplicate reflect work in the new API while
-// maintaining backwards compatibility.
-func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
- var v, vp reflect.Value
-
- value := reflect.ValueOf(dest)
-
- // json.Unmarshal returns errors for these
- if value.Kind() != reflect.Ptr {
- return errors.New("must pass a pointer, not a value, to StructScan destination")
- }
- if value.IsNil() {
- return errors.New("nil pointer passed to StructScan destination")
- }
- direct := reflect.Indirect(value)
-
- slice, err := baseType(value.Type(), reflect.Slice)
- if err != nil {
- return err
- }
-
- isPtr := slice.Elem().Kind() == reflect.Ptr
- base := reflectx.Deref(slice.Elem())
- scannable := isScannable(base)
-
- if structOnly && scannable {
- return structOnlyError(base)
- }
-
- columns, err := rows.Columns()
- if err != nil {
- return err
- }
-
- // if it's a base type make sure it only has 1 column; if not return an error
- if scannable && len(columns) > 1 {
- return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
- }
-
- if !scannable {
- var values []interface{}
- var m *reflectx.Mapper
-
- switch rows.(type) {
- case *Rows:
- m = rows.(*Rows).Mapper
- default:
- m = mapper()
- }
-
- fields := m.TraversalsByName(base, columns)
- // if we are not unsafe and are missing fields, return an error
- if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
- return fmt.Errorf("missing destination name %s", columns[f])
- }
- values = make([]interface{}, len(columns))
-
- for rows.Next() {
- // create a new struct type (which returns PtrTo) and indirect it
- vp = reflect.New(base)
- v = reflect.Indirect(vp)
-
- err = fieldsByTraversal(v, fields, values, true)
-
- // scan into the struct field pointers and append to our results
- err = rows.Scan(values...)
- if err != nil {
- return err
- }
-
- if isPtr {
- direct.Set(reflect.Append(direct, vp))
- } else {
- direct.Set(reflect.Append(direct, v))
- }
- }
- } else {
- for rows.Next() {
- vp = reflect.New(base)
- err = rows.Scan(vp.Interface())
- // append
- if isPtr {
- direct.Set(reflect.Append(direct, vp))
- } else {
- direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
- }
- }
- }
-
- return rows.Err()
-}
-
-// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
-// it doesn't really feel like it's named properly. There is an incongruency
-// between this and the way that StructScan (which might better be ScanStruct
-// anyway) works on a rows object.
-
-// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
-// StructScan will scan in the entire rows result, so if you need do not want to
-// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
-// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
-func StructScan(rows rowsi, dest interface{}) error {
- return scanAll(rows, dest, true)
-
-}
-
-// reflect helpers
-
-func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
- t = reflectx.Deref(t)
- if t.Kind() != expected {
- return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
- }
- return t, nil
-}
-
-// fieldsByName fills a values interface with fields from the passed value based
-// on the traversals in int. If ptrs is true, return addresses instead of values.
-// We write this instead of using FieldsByName to save allocations and map lookups
-// when iterating over many rows. Empty traversals will get an interface pointer.
-// Because of the necessity of requesting ptrs or values, it's considered a bit too
-// specialized for inclusion in reflectx itself.
-func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
- v = reflect.Indirect(v)
- if v.Kind() != reflect.Struct {
- return errors.New("argument not a struct")
- }
-
- for i, traversal := range traversals {
- if len(traversal) == 0 {
- values[i] = new(interface{})
- continue
- }
- f := reflectx.FieldByIndexes(v, traversal)
- if ptrs {
- values[i] = f.Addr().Interface()
- } else {
- values[i] = f.Interface()
- }
- }
- return nil
-}
-
-func missingFields(transversals [][]int) (field int, err error) {
- for i, t := range transversals {
- if len(t) == 0 {
- return i, errors.New("missing field")
- }
- }
- return 0, nil
-}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_test.go b/vendor/github.com/jmoiron/sqlx/sqlx_test.go
deleted file mode 100644
index bc13485..0000000
--- a/vendor/github.com/jmoiron/sqlx/sqlx_test.go
+++ /dev/null
@@ -1,1311 +0,0 @@
-// The following environment variables, if set, will be used:
-//
-// * SQLX_SQLITE_DSN
-// * SQLX_POSTGRES_DSN
-// * SQLX_MYSQL_DSN
-//
-// Set any of these variables to 'skip' to skip them. Note that for MySQL,
-// the string '?parseTime=True' will be appended to the DSN if it's not there
-// already.
-//
-package sqlx
-
-import (
- "database/sql"
- "fmt"
- "log"
- "os"
- "strings"
- "testing"
- "time"
-
- _ "github.com/go-sql-driver/mysql"
- "github.com/jmoiron/sqlx/reflectx"
- _ "github.com/lib/pq"
- _ "github.com/mattn/go-sqlite3"
-)
-
-/* compile time checks that Db, Tx, Stmt (qStmt) implement expected interfaces */
-var _, _ Ext = &DB{}, &Tx{}
-var _, _ ColScanner = &Row{}, &Rows{}
-var _ Queryer = &qStmt{}
-var _ Execer = &qStmt{}
-
-var TestPostgres = true
-var TestSqlite = true
-var TestMysql = true
-
-var sldb *DB
-var pgdb *DB
-var mysqldb *DB
-var active = []*DB{}
-
-func init() {
- ConnectAll()
-}
-
-func ConnectAll() {
- var err error
-
- pgdsn := os.Getenv("SQLX_POSTGRES_DSN")
- mydsn := os.Getenv("SQLX_MYSQL_DSN")
- sqdsn := os.Getenv("SQLX_SQLITE_DSN")
-
- TestPostgres = pgdsn != "skip"
- TestMysql = mydsn != "skip"
- TestSqlite = sqdsn != "skip"
-
- if TestPostgres {
- pgdb, err = Connect("postgres", pgdsn)
- if err != nil {
- fmt.Printf("Disabling PG tests:\n %v\n", err)
- TestPostgres = false
- }
- } else {
- fmt.Println("Disabling Postgres tests.")
- }
-
- if TestMysql {
- mysqldb, err = Connect("mysql", mydsn)
- if err != nil {
- fmt.Printf("Disabling MySQL tests:\n %v", err)
- TestMysql = false
- }
- } else {
- fmt.Println("Disabling MySQL tests.")
- }
-
- if TestSqlite {
- sldb, err = Connect("sqlite3", sqdsn)
- if err != nil {
- fmt.Printf("Disabling SQLite:\n %v", err)
- TestSqlite = false
- }
- } else {
- fmt.Println("Disabling SQLite tests.")
- }
-}
-
-type Schema struct {
- create string
- drop string
-}
-
-func (s Schema) Postgres() (string, string) {
- return s.create, s.drop
-}
-
-func (s Schema) MySQL() (string, string) {
- return strings.Replace(s.create, `"`, "`", -1), s.drop
-}
-
-func (s Schema) Sqlite3() (string, string) {
- return strings.Replace(s.create, `now()`, `CURRENT_TIMESTAMP`, -1), s.drop
-}
-
-var defaultSchema = Schema{
- create: `
-CREATE TABLE person (
- first_name text,
- last_name text,
- email text,
- added_at timestamp default now()
-);
-
-CREATE TABLE place (
- country text,
- city text NULL,
- telcode integer
-);
-
-CREATE TABLE capplace (
- "COUNTRY" text,
- "CITY" text NULL,
- "TELCODE" integer
-);
-
-CREATE TABLE nullperson (
- first_name text NULL,
- last_name text NULL,
- email text NULL
-);
-`,
- drop: `
-drop table person;
-drop table place;
-drop table capplace;
-drop table nullperson;
-`,
-}
-
-type Person struct {
- FirstName string `db:"first_name"`
- LastName string `db:"last_name"`
- Email string
- AddedAt time.Time `db:"added_at"`
-}
-
-type Person2 struct {
- FirstName sql.NullString `db:"first_name"`
- LastName sql.NullString `db:"last_name"`
- Email sql.NullString
-}
-
-type Place struct {
- Country string
- City sql.NullString
- TelCode int
-}
-
-type PlacePtr struct {
- Country string
- City *string
- TelCode int
-}
-
-type PersonPlace struct {
- Person
- Place
-}
-
-type PersonPlacePtr struct {
- *Person
- *Place
-}
-
-type EmbedConflict struct {
- FirstName string `db:"first_name"`
- Person
-}
-
-type Loop1 struct {
- Person
-}
-
-type Loop2 struct {
- Loop1
-}
-
-type Loop3 struct {
- Loop2
-}
-
-type SliceMember struct {
- Country string
- City sql.NullString
- TelCode int
- People []Person `db:"-"`
- Addresses []Place `db:"-"`
-}
-
-// Note that because of field map caching, we need a new type here
-// if we've used Place already soemwhere in sqlx
-type CPlace Place
-
-func MultiExec(e Execer, query string) {
- stmts := strings.Split(query, ";\n")
- if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 {
- stmts = stmts[:len(stmts)-1]
- }
- for _, s := range stmts {
- _, err := e.Exec(s)
- if err != nil {
- fmt.Println(err, s)
- }
- }
-}
-
-func RunWithSchema(schema Schema, t *testing.T, test func(db *DB, t *testing.T)) {
- runner := func(db *DB, t *testing.T, create, drop string) {
- defer func() {
- MultiExec(db, drop)
- }()
-
- MultiExec(db, create)
- test(db, t)
- }
-
- if TestPostgres {
- create, drop := schema.Postgres()
- runner(pgdb, t, create, drop)
- }
- if TestSqlite {
- create, drop := schema.Sqlite3()
- runner(sldb, t, create, drop)
- }
- if TestMysql {
- create, drop := schema.MySQL()
- runner(mysqldb, t, create, drop)
- }
-}
-
-func loadDefaultFixture(db *DB, t *testing.T) {
- tx := db.MustBegin()
- tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "jmoiron@jmoiron.net")
- tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "johndoeDNE@gmail.net")
- tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1")
- tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852")
- tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65")
- if db.DriverName() == "mysql" {
- tx.MustExec(tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27")
- } else {
- tx.MustExec(tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27")
- }
- tx.Commit()
-}
-
-// Test a new backwards compatible feature, that missing scan destinations
-// will silently scan into sql.RawText rather than failing/panicing
-func TestMissingNames(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- type PersonPlus struct {
- FirstName string `db:"first_name"`
- LastName string `db:"last_name"`
- Email string
- //AddedAt time.Time `db:"added_at"`
- }
-
- // test Select first
- pps := []PersonPlus{}
- // pps lacks added_at destination
- err := db.Select(&pps, "SELECT * FROM person")
- if err == nil {
- t.Error("Expected missing name from Select to fail, but it did not.")
- }
-
- // test Get
- pp := PersonPlus{}
- err = db.Get(&pp, "SELECT * FROM person LIMIT 1")
- if err == nil {
- t.Error("Expected missing name Get to fail, but it did not.")
- }
-
- // test naked StructScan
- pps = []PersonPlus{}
- rows, err := db.Query("SELECT * FROM person LIMIT 1")
- if err != nil {
- t.Fatal(err)
- }
- rows.Next()
- err = StructScan(rows, &pps)
- if err == nil {
- t.Error("Expected missing name in StructScan to fail, but it did not.")
- }
- rows.Close()
-
- // now try various things with unsafe set.
- db = db.Unsafe()
- pps = []PersonPlus{}
- err = db.Select(&pps, "SELECT * FROM person")
- if err != nil {
- t.Error(err)
- }
-
- // test Get
- pp = PersonPlus{}
- err = db.Get(&pp, "SELECT * FROM person LIMIT 1")
- if err != nil {
- t.Error(err)
- }
-
- // test naked StructScan
- pps = []PersonPlus{}
- rowsx, err := db.Queryx("SELECT * FROM person LIMIT 1")
- if err != nil {
- t.Fatal(err)
- }
- rowsx.Next()
- err = StructScan(rowsx, &pps)
- if err != nil {
- t.Error(err)
- }
- rowsx.Close()
-
- })
-}
-
-func TestEmbeddedStructs(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- peopleAndPlaces := []PersonPlace{}
- err := db.Select(
- &peopleAndPlaces,
- `SELECT person.*, place.* FROM
- person natural join place`)
- if err != nil {
- t.Fatal(err)
- }
- for _, pp := range peopleAndPlaces {
- if len(pp.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- if len(pp.Place.Country) == 0 {
- t.Errorf("Expected non zero lengthed country.")
- }
- }
-
- // test embedded structs with StructScan
- rows, err := db.Queryx(
- `SELECT person.*, place.* FROM
- person natural join place`)
- if err != nil {
- t.Error(err)
- }
-
- perp := PersonPlace{}
- rows.Next()
- err = rows.StructScan(&perp)
- if err != nil {
- t.Error(err)
- }
-
- if len(perp.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- if len(perp.Place.Country) == 0 {
- t.Errorf("Expected non zero lengthed country.")
- }
-
- rows.Close()
-
- // test the same for embedded pointer structs
- peopleAndPlacesPtrs := []PersonPlacePtr{}
- err = db.Select(
- &peopleAndPlacesPtrs,
- `SELECT person.*, place.* FROM
- person natural join place`)
- if err != nil {
- t.Fatal(err)
- }
- for _, pp := range peopleAndPlacesPtrs {
- if len(pp.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- if len(pp.Place.Country) == 0 {
- t.Errorf("Expected non zero lengthed country.")
- }
- }
-
- // test "deep nesting"
- l3s := []Loop3{}
- err = db.Select(&l3s, `select * from person`)
- if err != nil {
- t.Fatal(err)
- }
- for _, l3 := range l3s {
- if len(l3.Loop2.Loop1.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- }
-
- // test "embed conflicts"
- ec := []EmbedConflict{}
- err = db.Select(&ec, `select * from person`)
- // I'm torn between erroring here or having some kind of working behavior
- // in order to allow for more flexibility in destination structs
- if err != nil {
- t.Errorf("Was not expecting an error on embed conflicts.")
- }
- })
-}
-
-func TestSelectSliceMapTime(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- rows, err := db.Queryx("SELECT * FROM person")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- _, err := rows.SliceScan()
- if err != nil {
- t.Error(err)
- }
- }
-
- rows, err = db.Queryx("SELECT * FROM person")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- m := map[string]interface{}{}
- err := rows.MapScan(m)
- if err != nil {
- t.Error(err)
- }
- }
-
- })
-}
-
-func TestNilReceiver(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- var p *Person
- err := db.Get(p, "SELECT * FROM person LIMIT 1")
- if err == nil {
- t.Error("Expected error when getting into nil struct ptr.")
- }
- var pp *[]Person
- err = db.Select(pp, "SELECT * FROM person")
- if err == nil {
- t.Error("Expected an error when selecting into nil slice ptr.")
- }
- })
-}
-
-func TestNamedQuery(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE person (
- first_name text NULL,
- last_name text NULL,
- email text NULL
- );
- CREATE TABLE jsperson (
- "FIRST" text NULL,
- last_name text NULL,
- "EMAIL" text NULL
- );`,
- drop: `
- drop table person;
- drop table jsperson;
- `,
- }
-
- RunWithSchema(schema, t, func(db *DB, t *testing.T) {
- type Person struct {
- FirstName sql.NullString `db:"first_name"`
- LastName sql.NullString `db:"last_name"`
- Email sql.NullString
- }
-
- p := Person{
- FirstName: sql.NullString{"ben", true},
- LastName: sql.NullString{"doe", true},
- Email: sql.NullString{"ben@doe.com", true},
- }
-
- q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)`
- _, err := db.NamedExec(q1, p)
- if err != nil {
- log.Fatal(err)
- }
-
- p2 := &Person{}
- rows, err := db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", p)
- if err != nil {
- log.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(p2)
- if err != nil {
- t.Error(err)
- }
- if p2.FirstName.String != "ben" {
- t.Error("Expected first name of `ben`, got " + p2.FirstName.String)
- }
- if p2.LastName.String != "doe" {
- t.Error("Expected first name of `doe`, got " + p2.LastName.String)
- }
- }
-
- // these are tests for #73; they verify that named queries work if you've
- // changed the db mapper. This code checks both NamedQuery "ad-hoc" style
- // queries and NamedStmt queries, which use different code paths internally.
- old := *db.Mapper
-
- type JsonPerson struct {
- FirstName sql.NullString `json:"FIRST"`
- LastName sql.NullString `json:"last_name"`
- Email sql.NullString
- }
-
- jp := JsonPerson{
- FirstName: sql.NullString{"ben", true},
- LastName: sql.NullString{"smith", true},
- Email: sql.NullString{"ben@smith.com", true},
- }
-
- db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper)
-
- // prepare queries for case sensitivity to test our ToUpper function.
- // postgres and sqlite accept "", but mysql uses ``; since Go's multi-line
- // strings are `` we use "" by default and swap out for MySQL
- pdb := func(s string, db *DB) string {
- if db.DriverName() == "mysql" {
- return strings.Replace(s, `"`, "`", -1)
- }
- return s
- }
-
- q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)`
- _, err = db.NamedExec(pdb(q1, db), jp)
- if err != nil {
- t.Fatal(err, db.DriverName())
- }
-
- // Checks that a person pulled out of the db matches the one we put in
- check := func(t *testing.T, rows *Rows) {
- jp = JsonPerson{}
- for rows.Next() {
- err = rows.StructScan(&jp)
- if err != nil {
- t.Error(err)
- }
- if jp.FirstName.String != "ben" {
- t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName())
- }
- if jp.LastName.String != "smith" {
- t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName())
- }
- if jp.Email.String != "ben@smith.com" {
- t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName())
- }
- }
- }
-
- ns, err := db.PrepareNamed(pdb(`
- SELECT * FROM jsperson
- WHERE
- "FIRST"=:FIRST AND
- last_name=:last_name AND
- "EMAIL"=:EMAIL
- `, db))
-
- if err != nil {
- t.Fatal(err)
- }
- rows, err = ns.Queryx(jp)
- if err != nil {
- t.Fatal(err)
- }
-
- check(t, rows)
-
- // Check exactly the same thing, but with db.NamedQuery, which does not go
- // through the PrepareNamed/NamedStmt path.
- rows, err = db.NamedQuery(pdb(`
- SELECT * FROM jsperson
- WHERE
- "FIRST"=:FIRST AND
- last_name=:last_name AND
- "EMAIL"=:EMAIL
- `, db), jp)
- if err != nil {
- t.Fatal(err)
- }
-
- check(t, rows)
-
- db.Mapper = &old
-
- })
-}
-
-func TestNilInserts(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE tt (
- id integer,
- value text NULL DEFAULT NULL
- );`,
- drop: "drop table tt;",
- }
-
- RunWithSchema(schema, t, func(db *DB, t *testing.T) {
- type TT struct {
- Id int
- Value *string
- }
- var v, v2 TT
- r := db.Rebind
-
- db.MustExec(r(`INSERT INTO tt (id) VALUES (1)`))
- db.Get(&v, r(`SELECT * FROM tt`))
- if v.Id != 1 {
- t.Errorf("Expecting id of 1, got %v", v.Id)
- }
- if v.Value != nil {
- t.Errorf("Expecting NULL to map to nil, got %s", v.Value)
- }
-
- v.Id = 2
- // NOTE: this incidentally uncovered a bug which was that named queries with
- // pointer destinations would not work if the passed value here was not addressable,
- // as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for
- // writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly
- // function. This next line is important as it provides the only coverage for this.
- db.NamedExec(`INSERT INTO tt (id, value) VALUES (:id, :value)`, v)
-
- db.Get(&v2, r(`SELECT * FROM tt WHERE id=2`))
- if v.Id != v2.Id {
- t.Errorf("%v != %v", v.Id, v2.Id)
- }
- if v2.Value != nil {
- t.Errorf("Expecting NULL to map to nil, got %s", v.Value)
- }
- })
-}
-
-func TestScanError(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE kv (
- k text,
- v integer
- );`,
- drop: `drop table kv;`,
- }
-
- RunWithSchema(schema, t, func(db *DB, t *testing.T) {
- type WrongTypes struct {
- K int
- V string
- }
- _, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1)
- if err != nil {
- t.Error(err)
- }
-
- rows, err := db.Queryx("SELECT * FROM kv")
- if err != nil {
- t.Error(err)
- }
- for rows.Next() {
- var wt WrongTypes
- err := rows.StructScan(&wt)
- if err == nil {
- t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName())
- }
- }
- })
-}
-
-// FIXME: this function is kinda big but it slows things down to be constantly
-// loading and reloading the schema..
-
-func TestUsage(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- slicemembers := []SliceMember{}
- err := db.Select(&slicemembers, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- people := []Person{}
-
- err = db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- jason, john := people[0], people[1]
- if jason.FirstName != "Jason" {
- t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName)
- }
- if jason.LastName != "Moiron" {
- t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName)
- }
- if jason.Email != "jmoiron@jmoiron.net" {
- t.Errorf("Expecting Email of jmoiron@jmoiron.net, got %s", jason.Email)
- }
- if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "johndoeDNE@gmail.net" {
- t.Errorf("John Doe's person record not what expected: Got %v\n", john)
- }
-
- jason = Person{}
- err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason")
-
- if err != nil {
- t.Fatal(err)
- }
- if jason.FirstName != "Jason" {
- t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName)
- }
-
- err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar")
- if err == nil {
- t.Errorf("Expecting an error, got nil\n")
- }
- if err != sql.ErrNoRows {
- t.Errorf("Expected sql.ErrNoRows, got %v\n", err)
- }
-
- // The following tests check statement reuse, which was actually a problem
- // due to copying being done when creating Stmt's which was eventually removed
- stmt1, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Fatal(err)
- }
- jason = Person{}
-
- row := stmt1.QueryRowx("DoesNotExist")
- row.Scan(&jason)
- row = stmt1.QueryRowx("DoesNotExist")
- row.Scan(&jason)
-
- err = stmt1.Get(&jason, "DoesNotExist User")
- if err == nil {
- t.Error("Expected an error")
- }
- err = stmt1.Get(&jason, "DoesNotExist User 2")
-
- stmt2, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Fatal(err)
- }
- jason = Person{}
- tx, err := db.Beginx()
- if err != nil {
- t.Fatal(err)
- }
- tstmt2 := tx.Stmtx(stmt2)
- row2 := tstmt2.QueryRowx("Jason")
- err = row2.StructScan(&jason)
- if err != nil {
- t.Error(err)
- }
- tx.Commit()
-
- places := []*Place{}
- err = db.Select(&places, "SELECT telcode FROM place ORDER BY telcode ASC")
- usa, singsing, honkers := places[0], places[1], places[2]
-
- if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 {
- t.Errorf("Expected integer telcodes to work, got %#v", places)
- }
-
- placesptr := []PlacePtr{}
- err = db.Select(&placesptr, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Error(err)
- }
- //fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2])
-
- // if you have null fields and use SELECT *, you must use sql.Null* in your struct
- // this test also verifies that you can use either a []Struct{} or a []*Struct{}
- places2 := []Place{}
- err = db.Select(&places2, "SELECT * FROM place ORDER BY telcode ASC")
- usa, singsing, honkers = &places2[0], &places2[1], &places2[2]
-
- // this should return a type error that &p is not a pointer to a struct slice
- p := Place{}
- err = db.Select(&p, "SELECT * FROM place ORDER BY telcode ASC")
- if err == nil {
- t.Errorf("Expected an error, argument to select should be a pointer to a struct slice")
- }
-
- // this should be an error
- pl := []Place{}
- err = db.Select(pl, "SELECT * FROM place ORDER BY telcode ASC")
- if err == nil {
- t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.")
- }
-
- if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 {
- t.Errorf("Expected integer telcodes to work, got %#v", places)
- }
-
- stmt, err := db.Preparex(db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC"))
- if err != nil {
- t.Error(err)
- }
-
- places = []*Place{}
- err = stmt.Select(&places, 10)
- if len(places) != 2 {
- t.Error("Expected 2 places, got 0.")
- }
- if err != nil {
- t.Fatal(err)
- }
- singsing, honkers = places[0], places[1]
- if singsing.TelCode != 65 || honkers.TelCode != 852 {
- t.Errorf("Expected the right telcodes, got %#v", places)
- }
-
- rows, err := db.Queryx("SELECT * FROM place")
- if err != nil {
- t.Fatal(err)
- }
- place := Place{}
- for rows.Next() {
- err = rows.StructScan(&place)
- if err != nil {
- t.Fatal(err)
- }
- }
-
- rows, err = db.Queryx("SELECT * FROM place")
- if err != nil {
- t.Fatal(err)
- }
- m := map[string]interface{}{}
- for rows.Next() {
- err = rows.MapScan(m)
- if err != nil {
- t.Fatal(err)
- }
- _, ok := m["country"]
- if !ok {
- t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m)
- }
- }
-
- rows, err = db.Queryx("SELECT * FROM place")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- s, err := rows.SliceScan()
- if err != nil {
- t.Error(err)
- }
- if len(s) != 3 {
- t.Errorf("Expected 3 columns in result, got %d\n", len(s))
- }
- }
-
- // test advanced querying
- // test that NamedExec works with a map as well as a struct
- _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{
- "first": "Bin",
- "last": "Smuth",
- "email": "bensmith@allblacks.nz",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // ensure that if the named param happens right at the end it still works
- // ensure that NamedQuery works with a map[string]interface{}
- rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"})
- if err != nil {
- t.Fatal(err)
- }
-
- ben := &Person{}
- for rows.Next() {
- err = rows.StructScan(ben)
- if err != nil {
- t.Fatal(err)
- }
- if ben.FirstName != "Bin" {
- t.Fatal("Expected first name of `Bin`, got " + ben.FirstName)
- }
- if ben.LastName != "Smuth" {
- t.Fatal("Expected first name of `Smuth`, got " + ben.LastName)
- }
- }
-
- ben.FirstName = "Ben"
- ben.LastName = "Smith"
- ben.Email = "binsmuth@allblacks.nz"
-
- // Insert via a named query using the struct
- _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben)
-
- if err != nil {
- t.Fatal(err)
- }
-
- rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", ben)
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(ben)
- if err != nil {
- t.Fatal(err)
- }
- if ben.FirstName != "Ben" {
- t.Fatal("Expected first name of `Ben`, got " + ben.FirstName)
- }
- if ben.LastName != "Smith" {
- t.Fatal("Expected first name of `Smith`, got " + ben.LastName)
- }
- }
- // ensure that Get does not panic on emppty result set
- person := &Person{}
- err = db.Get(person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist")
- if err == nil {
- t.Fatal("Should have got an error for Get on non-existant row.")
- }
-
- // lets test prepared statements some more
-
- stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Fatal(err)
- }
- rows, err = stmt.Queryx("Ben")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(ben)
- if err != nil {
- t.Fatal(err)
- }
- if ben.FirstName != "Ben" {
- t.Fatal("Expected first name of `Ben`, got " + ben.FirstName)
- }
- if ben.LastName != "Smith" {
- t.Fatal("Expected first name of `Smith`, got " + ben.LastName)
- }
- }
-
- john = Person{}
- stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Error(err)
- }
- err = stmt.Get(&john, "John")
- if err != nil {
- t.Error(err)
- }
-
- // test name mapping
- // THIS USED TO WORK BUT WILL NO LONGER WORK.
- db.MapperFunc(strings.ToUpper)
- rsa := CPlace{}
- err = db.Get(&rsa, "SELECT * FROM capplace;")
- if err != nil {
- t.Error(err, "in db:", db.DriverName())
- }
- db.MapperFunc(strings.ToLower)
-
- // create a copy and change the mapper, then verify the copy behaves
- // differently from the original.
- dbCopy := NewDb(db.DB, db.DriverName())
- dbCopy.MapperFunc(strings.ToUpper)
- err = dbCopy.Get(&rsa, "SELECT * FROM capplace;")
- if err != nil {
- fmt.Println(db.DriverName())
- t.Error(err)
- }
-
- err = db.Get(&rsa, "SELECT * FROM cappplace;")
- if err == nil {
- t.Error("Expected no error, got ", err)
- }
-
- // test base type slices
- var sdest []string
- rows, err = db.Queryx("SELECT email FROM person ORDER BY email ASC;")
- if err != nil {
- t.Error(err)
- }
- err = scanAll(rows, &sdest, false)
- if err != nil {
- t.Error(err)
- }
-
- // test Get with base types
- var count int
- err = db.Get(&count, "SELECT count(*) FROM person;")
- if err != nil {
- t.Error(err)
- }
- if count != len(sdest) {
- t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest))
- }
-
- // test Get and Select with time.Time, #84
- var addedAt time.Time
- err = db.Get(&addedAt, "SELECT added_at FROM person LIMIT 1;")
- if err != nil {
- t.Error(err)
- }
-
- var addedAts []time.Time
- err = db.Select(&addedAts, "SELECT added_at FROM person;")
- if err != nil {
- t.Error(err)
- }
-
- // test it on a double pointer
- var pcount *int
- err = db.Get(&pcount, "SELECT count(*) FROM person;")
- if err != nil {
- t.Error(err)
- }
- if *pcount != count {
- t.Error("expected %d = %d", *pcount, count)
- }
-
- // test Select...
- sdest = []string{}
- err = db.Select(&sdest, "SELECT first_name FROM person ORDER BY first_name ASC;")
- if err != nil {
- t.Error(err)
- }
- expected := []string{"Ben", "Bin", "Jason", "John"}
- for i, got := range sdest {
- if got != expected[i] {
- t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got)
- }
- }
-
- var nsdest []sql.NullString
- err = db.Select(&nsdest, "SELECT city FROM place ORDER BY city ASC")
- if err != nil {
- t.Error(err)
- }
- for _, val := range nsdest {
- if val.Valid && val.String != "New York" {
- t.Errorf("expected single valid result to be `New York`, but got %s", val.String)
- }
- }
- })
-}
-
-type Product struct {
- ProductID int
-}
-
-// tests that sqlx will not panic when the wrong driver is passed because
-// of an automatic nil dereference in sqlx.Open(), which was fixed.
-func TestDoNotPanicOnConnect(t *testing.T) {
- _, err := Connect("bogus", "hehe")
- if err == nil {
- t.Errorf("Should return error when using bogus driverName")
- }
-}
-func TestRebind(t *testing.T) {
- q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
- q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)`
-
- s1 := Rebind(DOLLAR, q1)
- s2 := Rebind(DOLLAR, q2)
-
- if s1 != `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)` {
- t.Errorf("q1 failed")
- }
-
- if s2 != `INSERT INTO foo (a, b, c) VALUES ($1, $2, "foo"), ("Hi", $3, $4)` {
- t.Errorf("q2 failed")
- }
-}
-
-func TestBindMap(t *testing.T) {
- // Test that it works..
- q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
- am := map[string]interface{}{
- "name": "Jason Moiron",
- "age": 30,
- "first": "Jason",
- "last": "Moiron",
- }
-
- bq, args, _ := bindMap(QUESTION, q1, am)
- expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)`
- if bq != expect {
- t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
- }
-
- if args[0].(string) != "Jason Moiron" {
- t.Errorf("Expected `Jason Moiron`, got %v\n", args[0])
- }
-
- if args[1].(int) != 30 {
- t.Errorf("Expected 30, got %v\n", args[1])
- }
-
- if args[2].(string) != "Jason" {
- t.Errorf("Expected Jason, got %v\n", args[2])
- }
-
- if args[3].(string) != "Moiron" {
- t.Errorf("Expected Moiron, got %v\n", args[3])
- }
-}
-
-func TestBindStruct(t *testing.T) {
- var err error
-
- q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
-
- type tt struct {
- Name string
- Age int
- First string
- Last string
- }
-
- type tt2 struct {
- Field1 string `db:"field_1"`
- Field2 string `db:"field_2"`
- }
-
- type tt3 struct {
- tt2
- Name string
- }
-
- am := tt{"Jason Moiron", 30, "Jason", "Moiron"}
-
- bq, args, _ := bindStruct(QUESTION, q1, am, mapper())
- expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)`
- if bq != expect {
- t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
- }
-
- if args[0].(string) != "Jason Moiron" {
- t.Errorf("Expected `Jason Moiron`, got %v\n", args[0])
- }
-
- if args[1].(int) != 30 {
- t.Errorf("Expected 30, got %v\n", args[1])
- }
-
- if args[2].(string) != "Jason" {
- t.Errorf("Expected Jason, got %v\n", args[2])
- }
-
- if args[3].(string) != "Moiron" {
- t.Errorf("Expected Moiron, got %v\n", args[3])
- }
-
- am2 := tt2{"Hello", "World"}
- bq, args, _ = bindStruct(QUESTION, "INSERT INTO foo (a, b) VALUES (:field_2, :field_1)", am2, mapper())
- expect = `INSERT INTO foo (a, b) VALUES (?, ?)`
- if bq != expect {
- t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
- }
-
- if args[0].(string) != "World" {
- t.Errorf("Expected 'World', got %s\n", args[0].(string))
- }
- if args[1].(string) != "Hello" {
- t.Errorf("Expected 'Hello', got %s\n", args[1].(string))
- }
-
- am3 := tt3{Name: "Hello!"}
- am3.Field1 = "Hello"
- am3.Field2 = "World"
-
- bq, args, err = bindStruct(QUESTION, "INSERT INTO foo (a, b, c) VALUES (:name, :field_1, :field_2)", am3, mapper())
-
- if err != nil {
- t.Fatal(err)
- }
-
- expect = `INSERT INTO foo (a, b, c) VALUES (?, ?, ?)`
- if bq != expect {
- t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
- }
-
- if args[0].(string) != "Hello!" {
- t.Errorf("Expected 'Hello!', got %s\n", args[0].(string))
- }
- if args[1].(string) != "Hello" {
- t.Errorf("Expected 'Hello', got %s\n", args[1].(string))
- }
- if args[2].(string) != "World" {
- t.Errorf("Expected 'World', got %s\n", args[0].(string))
- }
-}
-
-func TestEmbeddedLiterals(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE x (
- k text
- );`,
- drop: `drop table x;`,
- }
-
- RunWithSchema(schema, t, func(db *DB, t *testing.T) {
- type t1 struct {
- K *string
- }
- type t2 struct {
- Inline struct {
- F string
- }
- K *string
- }
-
- db.MustExec(db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three")
-
- target := t1{}
- err := db.Get(&target, db.Rebind("SELECT * FROM x WHERE k=?"), "one")
- if err != nil {
- t.Error(err)
- }
- if *target.K != "one" {
- t.Error("Expected target.K to be `one`, got ", target.K)
- }
-
- target2 := t2{}
- err = db.Get(&target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one")
- if err != nil {
- t.Error(err)
- }
- if *target2.K != "one" {
- t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K)
- }
-
- })
-}
-
-func BenchmarkBindStruct(b *testing.B) {
- b.StopTimer()
- q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
- type t struct {
- Name string
- Age int
- First string
- Last string
- }
- am := t{"Jason Moiron", 30, "Jason", "Moiron"}
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- bindStruct(DOLLAR, q1, am, mapper())
- //bindMap(QUESTION, q1, am)
- }
-}
-
-func BenchmarkBindMap(b *testing.B) {
- b.StopTimer()
- q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
- am := map[string]interface{}{
- "name": "Jason Moiron",
- "age": 30,
- "first": "Jason",
- "last": "Moiron",
- }
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- bindMap(DOLLAR, q1, am)
- //bindMap(QUESTION, q1, am)
- }
-}
-
-func BenchmarkRebind(b *testing.B) {
- b.StopTimer()
- q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`
- q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)`
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- Rebind(DOLLAR, q1)
- Rebind(DOLLAR, q2)
- }
-}
-
-func BenchmarkRebindBuffer(b *testing.B) {
- b.StopTimer()
- q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`
- q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)`
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- rebindBuff(DOLLAR, q1)
- rebindBuff(DOLLAR, q2)
- }
-}
diff --git a/vendor/github.com/jmoiron/sqlx/types/README.md b/vendor/github.com/jmoiron/sqlx/types/README.md
deleted file mode 100644
index 713abe5..0000000
--- a/vendor/github.com/jmoiron/sqlx/types/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# types
-
-The types package provides some useful types which implement the `sql.Scanner`
-and `driver.Valuer` interfaces, suitable for use as scan and value targets with
-database/sql.
diff --git a/vendor/github.com/jmoiron/sqlx/types/types.go b/vendor/github.com/jmoiron/sqlx/types/types.go
deleted file mode 100644
index f1700b6..0000000
--- a/vendor/github.com/jmoiron/sqlx/types/types.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package types
-
-import (
- "bytes"
- "compress/gzip"
- "database/sql/driver"
- "encoding/json"
- "errors"
-
- "io/ioutil"
-)
-
-type GzippedText []byte
-
-func (g GzippedText) Value() (driver.Value, error) {
- b := make([]byte, 0, len(g))
- buf := bytes.NewBuffer(b)
- w := gzip.NewWriter(buf)
- w.Write(g)
- w.Close()
- return buf.Bytes(), nil
-
-}
-
-func (g *GzippedText) Scan(src interface{}) error {
- var source []byte
- switch src.(type) {
- case string:
- source = []byte(src.(string))
- case []byte:
- source = src.([]byte)
- default:
- return errors.New("Incompatible type for GzippedText")
- }
- reader, err := gzip.NewReader(bytes.NewReader(source))
- defer reader.Close()
- b, err := ioutil.ReadAll(reader)
- if err != nil {
- return err
- }
- *g = GzippedText(b)
- return nil
-}
-
-// JsonText is a json.RawMessage, which is a []byte underneath.
-// Value() validates the json format in the source, and returns an error if
-// the json is not valid. Scan does no validation. JsonText additionally
-// implements `Unmarshal`, which unmarshals the json within to an interface{}
-type JsonText json.RawMessage
-
-// Returns the *j as the JSON encoding of j.
-func (j *JsonText) MarshalJSON() ([]byte, error) {
- return *j, nil
-}
-
-// UnmarshalJSON sets *j to a copy of data
-func (j *JsonText) UnmarshalJSON(data []byte) error {
- if j == nil {
- return errors.New("JsonText: UnmarshalJSON on nil pointer")
- }
- *j = append((*j)[0:0], data...)
- return nil
-
-}
-
-// Value returns j as a value. This does a validating unmarshal into another
-// RawMessage. If j is invalid json, it returns an error.
-func (j JsonText) Value() (driver.Value, error) {
- var m json.RawMessage
- var err = j.Unmarshal(&m)
- if err != nil {
- return []byte{}, err
- }
- return []byte(j), nil
-}
-
-// Scan stores the src in *j. No validation is done.
-func (j *JsonText) Scan(src interface{}) error {
- var source []byte
- switch src.(type) {
- case string:
- source = []byte(src.(string))
- case []byte:
- source = src.([]byte)
- default:
- return errors.New("Incompatible type for JsonText")
- }
- *j = JsonText(append((*j)[0:0], source...))
- return nil
-}
-
-// Unmarshal unmarshal's the json in j to v, as in json.Unmarshal.
-func (j *JsonText) Unmarshal(v interface{}) error {
- return json.Unmarshal([]byte(*j), v)
-}
diff --git a/vendor/github.com/jmoiron/sqlx/types/types_test.go b/vendor/github.com/jmoiron/sqlx/types/types_test.go
deleted file mode 100644
index e5c9e1a..0000000
--- a/vendor/github.com/jmoiron/sqlx/types/types_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package types
-
-import "testing"
-
-func TestGzipText(t *testing.T) {
- g := GzippedText("Hello, world")
- v, err := g.Value()
- if err != nil {
- t.Errorf("Was not expecting an error")
- }
- err = (&g).Scan(v)
- if err != nil {
- t.Errorf("Was not expecting an error")
- }
- if string(g) != "Hello, world" {
- t.Errorf("Was expecting the string we sent in (Hello World), got %s", string(g))
- }
-}
-
-func TestJsonText(t *testing.T) {
- j := JsonText(`{"foo": 1, "bar": 2}`)
- v, err := j.Value()
- if err != nil {
- t.Errorf("Was not expecting an error")
- }
- err = (&j).Scan(v)
- if err != nil {
- t.Errorf("Was not expecting an error")
- }
- m := map[string]interface{}{}
- j.Unmarshal(&m)
-
- if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 {
- t.Errorf("Expected valid json but got some garbage instead? %#v", m)
- }
-
- j = JsonText(`{"foo": 1, invalid, false}`)
- v, err = j.Value()
- if err == nil {
- t.Errorf("Was expecting invalid json to fail!")
- }
-}
diff --git a/vendor/github.com/juju/errors/LICENSE b/vendor/github.com/juju/errors/LICENSE
deleted file mode 100644
index ade9307..0000000
--- a/vendor/github.com/juju/errors/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-All files in this repository are licensed as follows. If you contribute
-to this repository, it is assumed that you license your contribution
-under the same license unless you state otherwise.
-
-All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/vendor/github.com/juju/errors/Makefile b/vendor/github.com/juju/errors/Makefile
deleted file mode 100644
index ab7c2e6..0000000
--- a/vendor/github.com/juju/errors/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-default: check
-
-check:
- go test && go test -compiler gccgo
-
-docs:
- godoc2md github.com/juju/errors > README.md
- sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)|' README.md
-
-
-.PHONY: default check docs
diff --git a/vendor/github.com/juju/errors/README.md b/vendor/github.com/juju/errors/README.md
deleted file mode 100644
index ee24891..0000000
--- a/vendor/github.com/juju/errors/README.md
+++ /dev/null
@@ -1,536 +0,0 @@
-
-# errors
- import "github.com/juju/errors"
-
-[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)
-
-The juju/errors provides an easy way to annotate errors without losing the
-orginal error context.
-
-The exported `New` and `Errorf` functions are designed to replace the
-`errors.New` and `fmt.Errorf` functions respectively. The same underlying
-error is there, but the package also records the location at which the error
-was created.
-
-A primary use case for this library is to add extra context any time an
-error is returned from a function.
-
-
- if err := SomeFunc(); err != nil {
- return err
- }
-
-This instead becomes:
-
-
- if err := SomeFunc(); err != nil {
- return errors.Trace(err)
- }
-
-which just records the file and line number of the Trace call, or
-
-
- if err := SomeFunc(); err != nil {
- return errors.Annotate(err, "more context")
- }
-
-which also adds an annotation to the error.
-
-When you want to check to see if an error is of a particular type, a helper
-function is normally exported by the package that returned the error, like the
-`os` package does. The underlying cause of the error is available using the
-`Cause` function.
-
-
- os.IsNotExist(errors.Cause(err))
-
-The result of the `Error()` call on an annotated error is the annotations joined
-with colons, then the result of the `Error()` method for the underlying error
-that was the cause.
-
-
- err := errors.Errorf("original")
- err = errors.Annotatef(err, "context")
- err = errors.Annotatef(err, "more context")
- err.Error() -> "more context: context: original"
-
-Obviously recording the file, line and functions is not very useful if you
-cannot get them back out again.
-
-
- errors.ErrorStack(err)
-
-will return something like:
-
-
- first error
- github.com/juju/errors/annotation_test.go:193:
- github.com/juju/errors/annotation_test.go:194: annotation
- github.com/juju/errors/annotation_test.go:195:
- github.com/juju/errors/annotation_test.go:196: more context
- github.com/juju/errors/annotation_test.go:197:
-
-The first error was generated by an external system, so there was no location
-associated. The second, fourth, and last lines were generated with Trace calls,
-and the other two through Annotate.
-
-Sometimes when responding to an error you want to return a more specific error
-for the situation.
-
-
- if err := FindField(field); err != nil {
- return errors.Wrap(err, errors.NotFoundf(field))
- }
-
-This returns an error where the complete error stack is still available, and
-`errors.Cause()` will return the `NotFound` error.
-
-
-
-
-
-
-## func AlreadyExistsf
-``` go
-func AlreadyExistsf(format string, args ...interface{}) error
-```
-AlreadyExistsf returns an error which satisfies IsAlreadyExists().
-
-
-## func Annotate
-``` go
-func Annotate(other error, message string) error
-```
-Annotate is used to add extra context to an existing error. The location of
-the Annotate call is recorded with the annotations. The file, line and
-function are also recorded.
-
-For example:
-
-
- if err := SomeFunc(); err != nil {
- return errors.Annotate(err, "failed to frombulate")
- }
-
-
-## func Annotatef
-``` go
-func Annotatef(other error, format string, args ...interface{}) error
-```
-Annotatef is used to add extra context to an existing error. The location of
-the Annotate call is recorded with the annotations. The file, line and
-function are also recorded.
-
-For example:
-
-
- if err := SomeFunc(); err != nil {
- return errors.Annotatef(err, "failed to frombulate the %s", arg)
- }
-
-
-## func Cause
-``` go
-func Cause(err error) error
-```
-Cause returns the cause of the given error. This will be either the
-original error, or the result of a Wrap or Mask call.
-
-Cause is the usual way to diagnose errors that may have been wrapped by
-the other errors functions.
-
-
-## func DeferredAnnotatef
-``` go
-func DeferredAnnotatef(err *error, format string, args ...interface{})
-```
-DeferredAnnotatef annotates the given error (when it is not nil) with the given
-format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
-does nothing. This method is used in a defer statement in order to annotate any
-resulting error with the same message.
-
-For example:
-
-
- defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
-
-
-## func Details
-``` go
-func Details(err error) string
-```
-Details returns information about the stack of errors wrapped by err, in
-the format:
-
-
- [{filename:99: error one} {otherfile:55: cause of error one}]
-
-This is a terse alternative to ErrorStack as it returns a single line.
-
-
-## func ErrorStack
-``` go
-func ErrorStack(err error) string
-```
-ErrorStack returns a string representation of the annotated error. If the
-error passed as the parameter is not an annotated error, the result is
-simply the result of the Error() method on that error.
-
-If the error is an annotated error, a multi-line string is returned where
-each line represents one entry in the annotation stack. The full filename
-from the call stack is used in the output.
-
-
- first error
- github.com/juju/errors/annotation_test.go:193:
- github.com/juju/errors/annotation_test.go:194: annotation
- github.com/juju/errors/annotation_test.go:195:
- github.com/juju/errors/annotation_test.go:196: more context
- github.com/juju/errors/annotation_test.go:197:
-
-
-## func Errorf
-``` go
-func Errorf(format string, args ...interface{}) error
-```
-Errorf creates a new annotated error and records the location that the
-error is created. This should be a drop in replacement for fmt.Errorf.
-
-For example:
-
-
- return errors.Errorf("validation failed: %s", message)
-
-
-## func IsAlreadyExists
-``` go
-func IsAlreadyExists(err error) bool
-```
-IsAlreadyExists reports whether the error was created with
-AlreadyExistsf() or NewAlreadyExists().
-
-
-## func IsNotFound
-``` go
-func IsNotFound(err error) bool
-```
-IsNotFound reports whether err was created with NotFoundf() or
-NewNotFound().
-
-
-## func IsNotImplemented
-``` go
-func IsNotImplemented(err error) bool
-```
-IsNotImplemented reports whether err was created with
-NotImplementedf() or NewNotImplemented().
-
-
-## func IsNotSupported
-``` go
-func IsNotSupported(err error) bool
-```
-IsNotSupported reports whether the error was created with
-NotSupportedf() or NewNotSupported().
-
-
-## func IsNotValid
-``` go
-func IsNotValid(err error) bool
-```
-IsNotValid reports whether the error was created with NotValidf() or
-NewNotValid().
-
-
-## func IsUnauthorized
-``` go
-func IsUnauthorized(err error) bool
-```
-IsUnauthorized reports whether err was created with Unauthorizedf() or
-NewUnauthorized().
-
-
-## func Mask
-``` go
-func Mask(other error) error
-```
-Mask hides the underlying error type, and records the location of the masking.
-
-
-## func Maskf
-``` go
-func Maskf(other error, format string, args ...interface{}) error
-```
-Mask masks the given error with the given format string and arguments (like
-fmt.Sprintf), returning a new error that maintains the error stack, but
-hides the underlying error type. The error string still contains the full
-annotations. If you want to hide the annotations, call Wrap.
-
-
-## func New
-``` go
-func New(message string) error
-```
-New is a drop in replacement for the standard libary errors module that records
-the location that the error is created.
-
-For example:
-
-
- return errors.New("validation failed")
-
-
-## func NewAlreadyExists
-``` go
-func NewAlreadyExists(err error, msg string) error
-```
-NewAlreadyExists returns an error which wraps err and satisfies
-IsAlreadyExists().
-
-
-## func NewNotFound
-``` go
-func NewNotFound(err error, msg string) error
-```
-NewNotFound returns an error which wraps err that satisfies
-IsNotFound().
-
-
-## func NewNotImplemented
-``` go
-func NewNotImplemented(err error, msg string) error
-```
-NewNotImplemented returns an error which wraps err and satisfies
-IsNotImplemented().
-
-
-## func NewNotSupported
-``` go
-func NewNotSupported(err error, msg string) error
-```
-NewNotSupported returns an error which wraps err and satisfies
-IsNotSupported().
-
-
-## func NewNotValid
-``` go
-func NewNotValid(err error, msg string) error
-```
-NewNotValid returns an error which wraps err and satisfies IsNotValid().
-
-
-## func NewUnauthorized
-``` go
-func NewUnauthorized(err error, msg string) error
-```
-NewUnauthorized returns an error which wraps err and satisfies
-IsUnauthorized().
-
-
-## func NotFoundf
-``` go
-func NotFoundf(format string, args ...interface{}) error
-```
-NotFoundf returns an error which satisfies IsNotFound().
-
-
-## func NotImplementedf
-``` go
-func NotImplementedf(format string, args ...interface{}) error
-```
-NotImplementedf returns an error which satisfies IsNotImplemented().
-
-
-## func NotSupportedf
-``` go
-func NotSupportedf(format string, args ...interface{}) error
-```
-NotSupportedf returns an error which satisfies IsNotSupported().
-
-
-## func NotValidf
-``` go
-func NotValidf(format string, args ...interface{}) error
-```
-NotValidf returns an error which satisfies IsNotValid().
-
-
-## func Trace
-``` go
-func Trace(other error) error
-```
-Trace adds the location of the Trace call to the stack. The Cause of the
-resulting error is the same as the error parameter. If the other error is
-nil, the result will be nil.
-
-For example:
-
-
- if err := SomeFunc(); err != nil {
- return errors.Trace(err)
- }
-
-
-## func Unauthorizedf
-``` go
-func Unauthorizedf(format string, args ...interface{}) error
-```
-Unauthorizedf returns an error which satisfies IsUnauthorized().
-
-
-## func Wrap
-``` go
-func Wrap(other, newDescriptive error) error
-```
-Wrap changes the Cause of the error. The location of the Wrap call is also
-stored in the error stack.
-
-For example:
-
-
- if err := SomeFunc(); err != nil {
- newErr := &packageError{"more context", private_value}
- return errors.Wrap(err, newErr)
- }
-
-
-## func Wrapf
-``` go
-func Wrapf(other, newDescriptive error, format string, args ...interface{}) error
-```
-Wrapf changes the Cause of the error, and adds an annotation. The location
-of the Wrap call is also stored in the error stack.
-
-For example:
-
-
- if err := SomeFunc(); err != nil {
- return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
- }
-
-
-
-## type Err
-``` go
-type Err struct {
- // contains filtered or unexported fields
-}
-```
-Err holds a description of an error along with information about
-where the error was created.
-
-It may be embedded in custom error types to add extra information that
-this errors package can understand.
-
-
-
-
-
-
-
-
-
-### func NewErr
-``` go
-func NewErr(format string, args ...interface{}) Err
-```
-NewErr is used to return an Err for the purpose of embedding in other
-structures. The location is not specified, and needs to be set with a call
-to SetLocation.
-
-For example:
-
-
- type FooError struct {
- errors.Err
- code int
- }
-
- func NewFooError(code int) error {
- err := &FooError{errors.NewErr("foo"), code}
- err.SetLocation(1)
- return err
- }
-
-
-
-
-### func (\*Err) Cause
-``` go
-func (e *Err) Cause() error
-```
-The Cause of an error is the most recent error in the error stack that
-meets one of these criteria: the original error that was raised; the new
-error that was passed into the Wrap function; the most recently masked
-error; or nil if the error itself is considered the Cause. Normally this
-method is not invoked directly, but instead through the Cause stand alone
-function.
-
-
-
-### func (\*Err) Error
-``` go
-func (e *Err) Error() string
-```
-Error implements error.Error.
-
-
-
-### func (\*Err) Location
-``` go
-func (e *Err) Location() (filename string, line int)
-```
-Location is the file and line of where the error was most recently
-created or annotated.
-
-
-
-### func (\*Err) Message
-``` go
-func (e *Err) Message() string
-```
-Message returns the message stored with the most recent location. This is
-the empty string if the most recent call was Trace, or the message stored
-with Annotate or Mask.
-
-
-
-### func (\*Err) SetLocation
-``` go
-func (e *Err) SetLocation(callDepth int)
-```
-SetLocation records the source location of the error at callDepth stack
-frames above the call.
-
-
-
-### func (\*Err) StackTrace
-``` go
-func (e *Err) StackTrace() []string
-```
-StackTrace returns one string for each location recorded in the stack of
-errors. The first value is the originating error, with a line for each
-other annotation or tracing of the error.
-
-
-
-### func (\*Err) Underlying
-``` go
-func (e *Err) Underlying() error
-```
-Underlying returns the previous error in the error stack, if any. A client
-should not ever really call this method. It is used to build the error
-stack and should not be introspected by client calls. Or more
-specifically, clients should not depend on anything but the `Cause` of an
-error.
-
-
-
-
-
-
-
-
-
-- - -
-Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/vendor/github.com/juju/errors/doc.go b/vendor/github.com/juju/errors/doc.go
deleted file mode 100644
index 35b119a..0000000
--- a/vendor/github.com/juju/errors/doc.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2013, 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-/*
-[godoc-link-here]
-
-The juju/errors provides an easy way to annotate errors without losing the
-orginal error context.
-
-The exported `New` and `Errorf` functions are designed to replace the
-`errors.New` and `fmt.Errorf` functions respectively. The same underlying
-error is there, but the package also records the location at which the error
-was created.
-
-A primary use case for this library is to add extra context any time an
-error is returned from a function.
-
- if err := SomeFunc(); err != nil {
- return err
- }
-
-This instead becomes:
-
- if err := SomeFunc(); err != nil {
- return errors.Trace(err)
- }
-
-which just records the file and line number of the Trace call, or
-
- if err := SomeFunc(); err != nil {
- return errors.Annotate(err, "more context")
- }
-
-which also adds an annotation to the error.
-
-When you want to check to see if an error is of a particular type, a helper
-function is normally exported by the package that returned the error, like the
-`os` package does. The underlying cause of the error is available using the
-`Cause` function.
-
- os.IsNotExist(errors.Cause(err))
-
-The result of the `Error()` call on an annotated error is the annotations joined
-with colons, then the result of the `Error()` method for the underlying error
-that was the cause.
-
- err := errors.Errorf("original")
- err = errors.Annotatef(err, "context")
- err = errors.Annotatef(err, "more context")
- err.Error() -> "more context: context: original"
-
-Obviously recording the file, line and functions is not very useful if you
-cannot get them back out again.
-
- errors.ErrorStack(err)
-
-will return something like:
-
- first error
- github.com/juju/errors/annotation_test.go:193:
- github.com/juju/errors/annotation_test.go:194: annotation
- github.com/juju/errors/annotation_test.go:195:
- github.com/juju/errors/annotation_test.go:196: more context
- github.com/juju/errors/annotation_test.go:197:
-
-The first error was generated by an external system, so there was no location
-associated. The second, fourth, and last lines were generated with Trace calls,
-and the other two through Annotate.
-
-Sometimes when responding to an error you want to return a more specific error
-for the situation.
-
- if err := FindField(field); err != nil {
- return errors.Wrap(err, errors.NotFoundf(field))
- }
-
-This returns an error where the complete error stack is still available, and
-`errors.Cause()` will return the `NotFound` error.
-
-*/
-package errors
diff --git a/vendor/github.com/juju/errors/error.go b/vendor/github.com/juju/errors/error.go
deleted file mode 100644
index 4799acb..0000000
--- a/vendor/github.com/juju/errors/error.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors
-
-import (
- "fmt"
- "reflect"
- "runtime"
-)
-
-// Err holds a description of an error along with information about
-// where the error was created.
-//
-// It may be embedded in custom error types to add extra information that
-// this errors package can understand.
-type Err struct {
- // message holds an annotation of the error.
- message string
-
- // cause holds the cause of the error as returned
- // by the Cause method.
- cause error
-
- // previous holds the previous error in the error stack, if any.
- previous error
-
- // file and line hold the source code location where the error was
- // created.
- file string
- line int
-}
-
-// NewErr is used to return an Err for the purpose of embedding in other
-// structures. The location is not specified, and needs to be set with a call
-// to SetLocation.
-//
-// For example:
-// type FooError struct {
-// errors.Err
-// code int
-// }
-//
-// func NewFooError(code int) error {
-// err := &FooError{errors.NewErr("foo"), code}
-// err.SetLocation(1)
-// return err
-// }
-func NewErr(format string, args ...interface{}) Err {
- return Err{
- message: fmt.Sprintf(format, args...),
- }
-}
-
-// Location is the file and line of where the error was most recently
-// created or annotated.
-func (e *Err) Location() (filename string, line int) {
- return e.file, e.line
-}
-
-// Underlying returns the previous error in the error stack, if any. A client
-// should not ever really call this method. It is used to build the error
-// stack and should not be introspected by client calls. Or more
-// specifically, clients should not depend on anything but the `Cause` of an
-// error.
-func (e *Err) Underlying() error {
- return e.previous
-}
-
-// The Cause of an error is the most recent error in the error stack that
-// meets one of these criteria: the original error that was raised; the new
-// error that was passed into the Wrap function; the most recently masked
-// error; or nil if the error itself is considered the Cause. Normally this
-// method is not invoked directly, but instead through the Cause stand alone
-// function.
-func (e *Err) Cause() error {
- return e.cause
-}
-
-// Message returns the message stored with the most recent location. This is
-// the empty string if the most recent call was Trace, or the message stored
-// with Annotate or Mask.
-func (e *Err) Message() string {
- return e.message
-}
-
-// Error implements error.Error.
-func (e *Err) Error() string {
- // We want to walk up the stack of errors showing the annotations
- // as long as the cause is the same.
- err := e.previous
- if !sameError(Cause(err), e.cause) && e.cause != nil {
- err = e.cause
- }
- switch {
- case err == nil:
- return e.message
- case e.message == "":
- return err.Error()
- }
- return fmt.Sprintf("%s: %v", e.message, err)
-}
-
-// SetLocation records the source location of the error at callDepth stack
-// frames above the call.
-func (e *Err) SetLocation(callDepth int) {
- _, file, line, _ := runtime.Caller(callDepth + 1)
- e.file = trimGoPath(file)
- e.line = line
-}
-
-// StackTrace returns one string for each location recorded in the stack of
-// errors. The first value is the originating error, with a line for each
-// other annotation or tracing of the error.
-func (e *Err) StackTrace() []string {
- return errorStack(e)
-}
-
-// Ideally we'd have a way to check identity, but deep equals will do.
-func sameError(e1, e2 error) bool {
- return reflect.DeepEqual(e1, e2)
-}
diff --git a/vendor/github.com/juju/errors/error_test.go b/vendor/github.com/juju/errors/error_test.go
deleted file mode 100644
index ac1d2b4..0000000
--- a/vendor/github.com/juju/errors/error_test.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors_test
-
-import (
- "fmt"
- "runtime"
-
- jc "github.com/juju/testing/checkers"
- gc "gopkg.in/check.v1"
-
- "github.com/juju/errors"
-)
-
-type errorsSuite struct{}
-
-var _ = gc.Suite(&errorsSuite{})
-
-var someErr = errors.New("some error") //err varSomeErr
-
-func (*errorsSuite) TestErrorString(c *gc.C) {
- for i, test := range []struct {
- message string
- generator func() error
- expected string
- }{
- {
- message: "uncomparable errors",
- generator: func() error {
- err := errors.Annotatef(newNonComparableError("uncomparable"), "annotation")
- return errors.Annotatef(err, "another")
- },
- expected: "another: annotation: uncomparable",
- }, {
- message: "Errorf",
- generator: func() error {
- return errors.Errorf("first error")
- },
- expected: "first error",
- }, {
- message: "annotated error",
- generator: func() error {
- err := errors.Errorf("first error")
- return errors.Annotatef(err, "annotation")
- },
- expected: "annotation: first error",
- }, {
- message: "test annotation format",
- generator: func() error {
- err := errors.Errorf("first %s", "error")
- return errors.Annotatef(err, "%s", "annotation")
- },
- expected: "annotation: first error",
- }, {
- message: "wrapped error",
- generator: func() error {
- err := newError("first error")
- return errors.Wrap(err, newError("detailed error"))
- },
- expected: "detailed error",
- }, {
- message: "wrapped annotated error",
- generator: func() error {
- err := errors.Errorf("first error")
- err = errors.Annotatef(err, "annotated")
- return errors.Wrap(err, fmt.Errorf("detailed error"))
- },
- expected: "detailed error",
- }, {
- message: "annotated wrapped error",
- generator: func() error {
- err := errors.Errorf("first error")
- err = errors.Wrap(err, fmt.Errorf("detailed error"))
- return errors.Annotatef(err, "annotated")
- },
- expected: "annotated: detailed error",
- }, {
- message: "traced, and annotated",
- generator: func() error {
- err := errors.New("first error")
- err = errors.Trace(err)
- err = errors.Annotate(err, "some context")
- err = errors.Trace(err)
- err = errors.Annotate(err, "more context")
- return errors.Trace(err)
- },
- expected: "more context: some context: first error",
- }, {
- message: "traced, and annotated, masked and annotated",
- generator: func() error {
- err := errors.New("first error")
- err = errors.Trace(err)
- err = errors.Annotate(err, "some context")
- err = errors.Maskf(err, "masked")
- err = errors.Annotate(err, "more context")
- return errors.Trace(err)
- },
- expected: "more context: masked: some context: first error",
- },
- } {
- c.Logf("%v: %s", i, test.message)
- err := test.generator()
- ok := c.Check(err.Error(), gc.Equals, test.expected)
- if !ok {
- c.Logf("%#v", test.generator())
- }
- }
-}
-
-type embed struct {
- errors.Err
-}
-
-func newEmbed(format string, args ...interface{}) *embed {
- err := &embed{errors.NewErr(format, args...)}
- err.SetLocation(1)
- return err
-}
-
-func (*errorsSuite) TestNewErr(c *gc.C) {
- if runtime.Compiler == "gccgo" {
- c.Skip("gccgo can't determine the location")
- }
- err := newEmbed("testing %d", 42) //err embedErr
- c.Assert(err.Error(), gc.Equals, "testing 42")
- c.Assert(errors.Cause(err), gc.Equals, err)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["embedErr"].String())
-}
-
-var _ error = (*embed)(nil)
-
-// This is an uncomparable error type, as it is a struct that supports the
-// error interface (as opposed to a pointer type).
-type error_ struct {
- info string
- slice []string
-}
-
-// Create a non-comparable error
-func newNonComparableError(message string) error {
- return error_{info: message}
-}
-
-func (e error_) Error() string {
- return e.info
-}
-
-func newError(message string) error {
- return testError{message}
-}
-
-// The testError is a value type error for ease of seeing results
-// when the test fails.
-type testError struct {
- message string
-}
-
-func (e testError) Error() string {
- return e.message
-}
diff --git a/vendor/github.com/juju/errors/errortypes.go b/vendor/github.com/juju/errors/errortypes.go
deleted file mode 100644
index 10b3b19..0000000
--- a/vendor/github.com/juju/errors/errortypes.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors
-
-import (
- "fmt"
-)
-
-// wrap is a helper to construct an *wrapper.
-func wrap(err error, format, suffix string, args ...interface{}) Err {
- newErr := Err{
- message: fmt.Sprintf(format+suffix, args...),
- previous: err,
- }
- newErr.SetLocation(2)
- return newErr
-}
-
-// notFound represents an error when something has not been found.
-type notFound struct {
- Err
-}
-
-// NotFoundf returns an error which satisfies IsNotFound().
-func NotFoundf(format string, args ...interface{}) error {
- return ¬Found{wrap(nil, format, " not found", args...)}
-}
-
-// NewNotFound returns an error which wraps err that satisfies
-// IsNotFound().
-func NewNotFound(err error, msg string) error {
- return ¬Found{wrap(err, msg, "")}
-}
-
-// IsNotFound reports whether err was created with NotFoundf() or
-// NewNotFound().
-func IsNotFound(err error) bool {
- err = Cause(err)
- _, ok := err.(*notFound)
- return ok
-}
-
-// userNotFound represents an error when an inexistent user is looked up.
-type userNotFound struct {
- Err
-}
-
-// UserNotFoundf returns an error which satisfies IsUserNotFound().
-func UserNotFoundf(format string, args ...interface{}) error {
- return &userNotFound{wrap(nil, format, " user not found", args...)}
-}
-
-// NewUserNotFound returns an error which wraps err and satisfies
-// IsUserNotFound().
-func NewUserNotFound(err error, msg string) error {
- return &userNotFound{wrap(err, msg, "")}
-}
-
-// IsUserNotFound reports whether err was created with UserNotFoundf() or
-// NewUserNotFound().
-func IsUserNotFound(err error) bool {
- err = Cause(err)
- _, ok := err.(*userNotFound)
- return ok
-}
-
-// unauthorized represents an error when an operation is unauthorized.
-type unauthorized struct {
- Err
-}
-
-// Unauthorizedf returns an error which satisfies IsUnauthorized().
-func Unauthorizedf(format string, args ...interface{}) error {
- return &unauthorized{wrap(nil, format, "", args...)}
-}
-
-// NewUnauthorized returns an error which wraps err and satisfies
-// IsUnauthorized().
-func NewUnauthorized(err error, msg string) error {
- return &unauthorized{wrap(err, msg, "")}
-}
-
-// IsUnauthorized reports whether err was created with Unauthorizedf() or
-// NewUnauthorized().
-func IsUnauthorized(err error) bool {
- err = Cause(err)
- _, ok := err.(*unauthorized)
- return ok
-}
-
-// notImplemented represents an error when something is not
-// implemented.
-type notImplemented struct {
- Err
-}
-
-// NotImplementedf returns an error which satisfies IsNotImplemented().
-func NotImplementedf(format string, args ...interface{}) error {
- return ¬Implemented{wrap(nil, format, " not implemented", args...)}
-}
-
-// NewNotImplemented returns an error which wraps err and satisfies
-// IsNotImplemented().
-func NewNotImplemented(err error, msg string) error {
- return ¬Implemented{wrap(err, msg, "")}
-}
-
-// IsNotImplemented reports whether err was created with
-// NotImplementedf() or NewNotImplemented().
-func IsNotImplemented(err error) bool {
- err = Cause(err)
- _, ok := err.(*notImplemented)
- return ok
-}
-
-// alreadyExists represents and error when something already exists.
-type alreadyExists struct {
- Err
-}
-
-// AlreadyExistsf returns an error which satisfies IsAlreadyExists().
-func AlreadyExistsf(format string, args ...interface{}) error {
- return &alreadyExists{wrap(nil, format, " already exists", args...)}
-}
-
-// NewAlreadyExists returns an error which wraps err and satisfies
-// IsAlreadyExists().
-func NewAlreadyExists(err error, msg string) error {
- return &alreadyExists{wrap(err, msg, "")}
-}
-
-// IsAlreadyExists reports whether the error was created with
-// AlreadyExistsf() or NewAlreadyExists().
-func IsAlreadyExists(err error) bool {
- err = Cause(err)
- _, ok := err.(*alreadyExists)
- return ok
-}
-
-// notSupported represents an error when something is not supported.
-type notSupported struct {
- Err
-}
-
-// NotSupportedf returns an error which satisfies IsNotSupported().
-func NotSupportedf(format string, args ...interface{}) error {
- return ¬Supported{wrap(nil, format, " not supported", args...)}
-}
-
-// NewNotSupported returns an error which wraps err and satisfies
-// IsNotSupported().
-func NewNotSupported(err error, msg string) error {
- return ¬Supported{wrap(err, msg, "")}
-}
-
-// IsNotSupported reports whether the error was created with
-// NotSupportedf() or NewNotSupported().
-func IsNotSupported(err error) bool {
- err = Cause(err)
- _, ok := err.(*notSupported)
- return ok
-}
-
-// notValid represents an error when something is not valid.
-type notValid struct {
- Err
-}
-
-// NotValidf returns an error which satisfies IsNotValid().
-func NotValidf(format string, args ...interface{}) error {
- return ¬Valid{wrap(nil, format, " not valid", args...)}
-}
-
-// NewNotValid returns an error which wraps err and satisfies IsNotValid().
-func NewNotValid(err error, msg string) error {
- return ¬Valid{wrap(err, msg, "")}
-}
-
-// IsNotValid reports whether the error was created with NotValidf() or
-// NewNotValid().
-func IsNotValid(err error) bool {
- err = Cause(err)
- _, ok := err.(*notValid)
- return ok
-}
-
-// notProvisioned represents an error when something is not yet provisioned.
-type notProvisioned struct {
- Err
-}
-
-// NotProvisionedf returns an error which satisfies IsNotProvisioned().
-func NotProvisionedf(format string, args ...interface{}) error {
- return ¬Provisioned{wrap(nil, format, " not provisioned", args...)}
-}
-
-// NewNotProvisioned returns an error which wraps err that satisfies
-// IsNotProvisioned().
-func NewNotProvisioned(err error, msg string) error {
- return ¬Provisioned{wrap(err, msg, "")}
-}
-
-// IsNotProvisioned reports whether err was created with NotProvisionedf() or
-// NewNotProvisioned().
-func IsNotProvisioned(err error) bool {
- err = Cause(err)
- _, ok := err.(*notProvisioned)
- return ok
-}
-
-// notAssigned represents an error when something is not yet assigned to
-// something else.
-type notAssigned struct {
- Err
-}
-
-// NotAssignedf returns an error which satisfies IsNotAssigned().
-func NotAssignedf(format string, args ...interface{}) error {
- return ¬Assigned{wrap(nil, format, " not assigned", args...)}
-}
-
-// NewNotAssigned returns an error which wraps err that satisfies
-// IsNotAssigned().
-func NewNotAssigned(err error, msg string) error {
- return ¬Assigned{wrap(err, msg, "")}
-}
-
-// IsNotAssigned reports whether err was created with NotAssignedf() or
-// NewNotAssigned().
-func IsNotAssigned(err error) bool {
- err = Cause(err)
- _, ok := err.(*notAssigned)
- return ok
-}
-
-// badRequest represents an error when a request has bad parameters.
-type badRequest struct {
- Err
-}
-
-// BadRequestf returns an error which satisfies IsBadRequest().
-func BadRequestf(format string, args ...interface{}) error {
- return &badRequest{wrap(nil, format, "", args...)}
-}
-
-// NewBadRequest returns an error which wraps err that satisfies
-// IsBadRequest().
-func NewBadRequest(err error, msg string) error {
- return &badRequest{wrap(err, msg, "")}
-}
-
-// IsBadRequest reports whether err was created with BadRequestf() or
-// NewBadRequest().
-func IsBadRequest(err error) bool {
- err = Cause(err)
- _, ok := err.(*badRequest)
- return ok
-}
-
-// methodNotAllowed represents an error when an HTTP request
-// is made with an inappropriate method.
-type methodNotAllowed struct {
- Err
-}
-
-// MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed().
-func MethodNotAllowedf(format string, args ...interface{}) error {
- return &methodNotAllowed{wrap(nil, format, "", args...)}
-}
-
-// NewMethodNotAllowed returns an error which wraps err that satisfies
-// IsMethodNotAllowed().
-func NewMethodNotAllowed(err error, msg string) error {
- return &methodNotAllowed{wrap(err, msg, "")}
-}
-
-// IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or
-// NewMethodNotAllowed().
-func IsMethodNotAllowed(err error) bool {
- err = Cause(err)
- _, ok := err.(*methodNotAllowed)
- return ok
-}
diff --git a/vendor/github.com/juju/errors/errortypes_test.go b/vendor/github.com/juju/errors/errortypes_test.go
deleted file mode 100644
index 7cc2da9..0000000
--- a/vendor/github.com/juju/errors/errortypes_test.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2013, 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors_test
-
-import (
- stderrors "errors"
- "fmt"
- "reflect"
- "runtime"
-
- "github.com/juju/errors"
- jc "github.com/juju/testing/checkers"
- gc "gopkg.in/check.v1"
-)
-
-// errorInfo holds information about a single error type: a satisfier
-// function, wrapping and variable arguments constructors and message
-// suffix.
-type errorInfo struct {
- satisfier func(error) bool
- argsConstructor func(string, ...interface{}) error
- wrapConstructor func(error, string) error
- suffix string
-}
-
-// allErrors holds information for all defined errors. When adding new
-// errors, add them here as well to include them in tests.
-var allErrors = []*errorInfo{
- &errorInfo{errors.IsNotFound, errors.NotFoundf, errors.NewNotFound, " not found"},
- &errorInfo{errors.IsUserNotFound, errors.UserNotFoundf, errors.NewUserNotFound, " user not found"},
- &errorInfo{errors.IsUnauthorized, errors.Unauthorizedf, errors.NewUnauthorized, ""},
- &errorInfo{errors.IsNotImplemented, errors.NotImplementedf, errors.NewNotImplemented, " not implemented"},
- &errorInfo{errors.IsAlreadyExists, errors.AlreadyExistsf, errors.NewAlreadyExists, " already exists"},
- &errorInfo{errors.IsNotSupported, errors.NotSupportedf, errors.NewNotSupported, " not supported"},
- &errorInfo{errors.IsNotValid, errors.NotValidf, errors.NewNotValid, " not valid"},
- &errorInfo{errors.IsNotProvisioned, errors.NotProvisionedf, errors.NewNotProvisioned, " not provisioned"},
- &errorInfo{errors.IsNotAssigned, errors.NotAssignedf, errors.NewNotAssigned, " not assigned"},
- &errorInfo{errors.IsMethodNotAllowed, errors.MethodNotAllowedf, errors.NewMethodNotAllowed, ""},
- &errorInfo{errors.IsBadRequest, errors.BadRequestf, errors.NewBadRequest, ""},
-}
-
-type errorTypeSuite struct{}
-
-var _ = gc.Suite(&errorTypeSuite{})
-
-func (t *errorInfo) satisfierName() string {
- value := reflect.ValueOf(t.satisfier)
- f := runtime.FuncForPC(value.Pointer())
- return f.Name()
-}
-
-func (t *errorInfo) equal(t0 *errorInfo) bool {
- if t0 == nil {
- return false
- }
- return t.satisfierName() == t0.satisfierName()
-}
-
-type errorTest struct {
- err error
- message string
- errInfo *errorInfo
-}
-
-func deferredAnnotatef(err error, format string, args ...interface{}) error {
- errors.DeferredAnnotatef(&err, format, args...)
- return err
-}
-
-func mustSatisfy(c *gc.C, err error, errInfo *errorInfo) {
- if errInfo != nil {
- msg := fmt.Sprintf("%#v must satisfy %v", err, errInfo.satisfierName())
- c.Check(err, jc.Satisfies, errInfo.satisfier, gc.Commentf(msg))
- }
-}
-
-func mustNotSatisfy(c *gc.C, err error, errInfo *errorInfo) {
- if errInfo != nil {
- msg := fmt.Sprintf("%#v must not satisfy %v", err, errInfo.satisfierName())
- c.Check(err, gc.Not(jc.Satisfies), errInfo.satisfier, gc.Commentf(msg))
- }
-}
-
-func checkErrorMatches(c *gc.C, err error, message string, errInfo *errorInfo) {
- if message == "" {
- c.Check(err, gc.IsNil)
- c.Check(errInfo, gc.IsNil)
- } else {
- c.Check(err, gc.ErrorMatches, message)
- }
-}
-
-func runErrorTests(c *gc.C, errorTests []errorTest, checkMustSatisfy bool) {
- for i, t := range errorTests {
- c.Logf("test %d: %T: %v", i, t.err, t.err)
- checkErrorMatches(c, t.err, t.message, t.errInfo)
- if checkMustSatisfy {
- mustSatisfy(c, t.err, t.errInfo)
- }
-
- // Check all other satisfiers to make sure none match.
- for _, otherErrInfo := range allErrors {
- if checkMustSatisfy && otherErrInfo.equal(t.errInfo) {
- continue
- }
- mustNotSatisfy(c, t.err, otherErrInfo)
- }
- }
-}
-
-func (*errorTypeSuite) TestDeferredAnnotatef(c *gc.C) {
- // Ensure DeferredAnnotatef annotates the errors.
- errorTests := []errorTest{}
- for _, errInfo := range allErrors {
- errorTests = append(errorTests, []errorTest{{
- deferredAnnotatef(nil, "comment"),
- "",
- nil,
- }, {
- deferredAnnotatef(stderrors.New("blast"), "comment"),
- "comment: blast",
- nil,
- }, {
- deferredAnnotatef(errInfo.argsConstructor("foo %d", 42), "comment %d", 69),
- "comment 69: foo 42" + errInfo.suffix,
- errInfo,
- }, {
- deferredAnnotatef(errInfo.argsConstructor(""), "comment"),
- "comment: " + errInfo.suffix,
- errInfo,
- }, {
- deferredAnnotatef(errInfo.wrapConstructor(stderrors.New("pow!"), "woo"), "comment"),
- "comment: woo: pow!",
- errInfo,
- }}...)
- }
-
- runErrorTests(c, errorTests, true)
-}
-
-func (*errorTypeSuite) TestAllErrors(c *gc.C) {
- errorTests := []errorTest{}
- for _, errInfo := range allErrors {
- errorTests = append(errorTests, []errorTest{{
- nil,
- "",
- nil,
- }, {
- errInfo.argsConstructor("foo %d", 42),
- "foo 42" + errInfo.suffix,
- errInfo,
- }, {
- errInfo.argsConstructor(""),
- errInfo.suffix,
- errInfo,
- }, {
- errInfo.wrapConstructor(stderrors.New("pow!"), "prefix"),
- "prefix: pow!",
- errInfo,
- }, {
- errInfo.wrapConstructor(stderrors.New("pow!"), ""),
- "pow!",
- errInfo,
- }, {
- errInfo.wrapConstructor(nil, "prefix"),
- "prefix",
- errInfo,
- }}...)
- }
-
- runErrorTests(c, errorTests, true)
-}
diff --git a/vendor/github.com/juju/errors/example_test.go b/vendor/github.com/juju/errors/example_test.go
deleted file mode 100644
index 2a79cf4..0000000
--- a/vendor/github.com/juju/errors/example_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2013, 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors_test
-
-import (
- "fmt"
-
- "github.com/juju/errors"
-)
-
-func ExampleTrace() {
- var err1 error = fmt.Errorf("something wicked this way comes")
- var err2 error = nil
-
- // Tracing a non nil error will return an error
- fmt.Println(errors.Trace(err1))
- // Tracing nil will return nil
- fmt.Println(errors.Trace(err2))
-
- // Output: something wicked this way comes
- //
-}
diff --git a/vendor/github.com/juju/errors/export_test.go b/vendor/github.com/juju/errors/export_test.go
deleted file mode 100644
index db57ec8..0000000
--- a/vendor/github.com/juju/errors/export_test.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2013, 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors
-
-// Since variables are declared before the init block, in order to get the goPath
-// we need to return it rather than just reference it.
-func GoPath() string {
- return goPath
-}
-
-var TrimGoPath = trimGoPath
diff --git a/vendor/github.com/juju/errors/functions.go b/vendor/github.com/juju/errors/functions.go
deleted file mode 100644
index 994208d..0000000
--- a/vendor/github.com/juju/errors/functions.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors
-
-import (
- "fmt"
- "strings"
-)
-
-// New is a drop in replacement for the standard libary errors module that records
-// the location that the error is created.
-//
-// For example:
-// return errors.New("validation failed")
-//
-func New(message string) error {
- err := &Err{message: message}
- err.SetLocation(1)
- return err
-}
-
-// Errorf creates a new annotated error and records the location that the
-// error is created. This should be a drop in replacement for fmt.Errorf.
-//
-// For example:
-// return errors.Errorf("validation failed: %s", message)
-//
-func Errorf(format string, args ...interface{}) error {
- err := &Err{message: fmt.Sprintf(format, args...)}
- err.SetLocation(1)
- return err
-}
-
-// Trace adds the location of the Trace call to the stack. The Cause of the
-// resulting error is the same as the error parameter. If the other error is
-// nil, the result will be nil.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// return errors.Trace(err)
-// }
-//
-func Trace(other error) error {
- if other == nil {
- return nil
- }
- err := &Err{previous: other, cause: Cause(other)}
- err.SetLocation(1)
- return err
-}
-
-// Annotate is used to add extra context to an existing error. The location of
-// the Annotate call is recorded with the annotations. The file, line and
-// function are also recorded.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// return errors.Annotate(err, "failed to frombulate")
-// }
-//
-func Annotate(other error, message string) error {
- if other == nil {
- return nil
- }
- err := &Err{
- previous: other,
- cause: Cause(other),
- message: message,
- }
- err.SetLocation(1)
- return err
-}
-
-// Annotatef is used to add extra context to an existing error. The location of
-// the Annotate call is recorded with the annotations. The file, line and
-// function are also recorded.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// return errors.Annotatef(err, "failed to frombulate the %s", arg)
-// }
-//
-func Annotatef(other error, format string, args ...interface{}) error {
- if other == nil {
- return nil
- }
- err := &Err{
- previous: other,
- cause: Cause(other),
- message: fmt.Sprintf(format, args...),
- }
- err.SetLocation(1)
- return err
-}
-
-// DeferredAnnotatef annotates the given error (when it is not nil) with the given
-// format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
-// does nothing. This method is used in a defer statement in order to annotate any
-// resulting error with the same message.
-//
-// For example:
-//
-// defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
-//
-func DeferredAnnotatef(err *error, format string, args ...interface{}) {
- if *err == nil {
- return
- }
- newErr := &Err{
- message: fmt.Sprintf(format, args...),
- cause: Cause(*err),
- previous: *err,
- }
- newErr.SetLocation(1)
- *err = newErr
-}
-
-// Wrap changes the Cause of the error. The location of the Wrap call is also
-// stored in the error stack.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// newErr := &packageError{"more context", private_value}
-// return errors.Wrap(err, newErr)
-// }
-//
-func Wrap(other, newDescriptive error) error {
- err := &Err{
- previous: other,
- cause: newDescriptive,
- }
- err.SetLocation(1)
- return err
-}
-
-// Wrapf changes the Cause of the error, and adds an annotation. The location
-// of the Wrap call is also stored in the error stack.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
-// }
-//
-func Wrapf(other, newDescriptive error, format string, args ...interface{}) error {
- err := &Err{
- message: fmt.Sprintf(format, args...),
- previous: other,
- cause: newDescriptive,
- }
- err.SetLocation(1)
- return err
-}
-
-// Mask masks the given error with the given format string and arguments (like
-// fmt.Sprintf), returning a new error that maintains the error stack, but
-// hides the underlying error type. The error string still contains the full
-// annotations. If you want to hide the annotations, call Wrap.
-func Maskf(other error, format string, args ...interface{}) error {
- if other == nil {
- return nil
- }
- err := &Err{
- message: fmt.Sprintf(format, args...),
- previous: other,
- }
- err.SetLocation(1)
- return err
-}
-
-// Mask hides the underlying error type, and records the location of the masking.
-func Mask(other error) error {
- if other == nil {
- return nil
- }
- err := &Err{
- previous: other,
- }
- err.SetLocation(1)
- return err
-}
-
-// Cause returns the cause of the given error. This will be either the
-// original error, or the result of a Wrap or Mask call.
-//
-// Cause is the usual way to diagnose errors that may have been wrapped by
-// the other errors functions.
-func Cause(err error) error {
- var diag error
- if err, ok := err.(causer); ok {
- diag = err.Cause()
- }
- if diag != nil {
- return diag
- }
- return err
-}
-
-type causer interface {
- Cause() error
-}
-
-type wrapper interface {
- // Message returns the top level error message,
- // not including the message from the Previous
- // error.
- Message() string
-
- // Underlying returns the Previous error, or nil
- // if there is none.
- Underlying() error
-}
-
-type locationer interface {
- Location() (string, int)
-}
-
-var (
- _ wrapper = (*Err)(nil)
- _ locationer = (*Err)(nil)
- _ causer = (*Err)(nil)
-)
-
-// Details returns information about the stack of errors wrapped by err, in
-// the format:
-//
-// [{filename:99: error one} {otherfile:55: cause of error one}]
-//
-// This is a terse alternative to ErrorStack as it returns a single line.
-func Details(err error) string {
- if err == nil {
- return "[]"
- }
- var s []byte
- s = append(s, '[')
- for {
- s = append(s, '{')
- if err, ok := err.(locationer); ok {
- file, line := err.Location()
- if file != "" {
- s = append(s, fmt.Sprintf("%s:%d", file, line)...)
- s = append(s, ": "...)
- }
- }
- if cerr, ok := err.(wrapper); ok {
- s = append(s, cerr.Message()...)
- err = cerr.Underlying()
- } else {
- s = append(s, err.Error()...)
- err = nil
- }
- s = append(s, '}')
- if err == nil {
- break
- }
- s = append(s, ' ')
- }
- s = append(s, ']')
- return string(s)
-}
-
-// ErrorStack returns a string representation of the annotated error. If the
-// error passed as the parameter is not an annotated error, the result is
-// simply the result of the Error() method on that error.
-//
-// If the error is an annotated error, a multi-line string is returned where
-// each line represents one entry in the annotation stack. The full filename
-// from the call stack is used in the output.
-//
-// first error
-// github.com/juju/errors/annotation_test.go:193:
-// github.com/juju/errors/annotation_test.go:194: annotation
-// github.com/juju/errors/annotation_test.go:195:
-// github.com/juju/errors/annotation_test.go:196: more context
-// github.com/juju/errors/annotation_test.go:197:
-func ErrorStack(err error) string {
- return strings.Join(errorStack(err), "\n")
-}
-
-func errorStack(err error) []string {
- if err == nil {
- return nil
- }
-
- // We want the first error first
- var lines []string
- for {
- var buff []byte
- if err, ok := err.(locationer); ok {
- file, line := err.Location()
- // Strip off the leading GOPATH/src path elements.
- file = trimGoPath(file)
- if file != "" {
- buff = append(buff, fmt.Sprintf("%s:%d", file, line)...)
- buff = append(buff, ": "...)
- }
- }
- if cerr, ok := err.(wrapper); ok {
- message := cerr.Message()
- buff = append(buff, message...)
- // If there is a cause for this error, and it is different to the cause
- // of the underlying error, then output the error string in the stack trace.
- var cause error
- if err1, ok := err.(causer); ok {
- cause = err1.Cause()
- }
- err = cerr.Underlying()
- if cause != nil && !sameError(Cause(err), cause) {
- if message != "" {
- buff = append(buff, ": "...)
- }
- buff = append(buff, cause.Error()...)
- }
- } else {
- buff = append(buff, err.Error()...)
- err = nil
- }
- lines = append(lines, string(buff))
- if err == nil {
- break
- }
- }
- // reverse the lines to get the original error, which was at the end of
- // the list, back to the start.
- var result []string
- for i := len(lines); i > 0; i-- {
- result = append(result, lines[i-1])
- }
- return result
-}
diff --git a/vendor/github.com/juju/errors/functions_test.go b/vendor/github.com/juju/errors/functions_test.go
deleted file mode 100644
index 7b1e43b..0000000
--- a/vendor/github.com/juju/errors/functions_test.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors_test
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "runtime"
- "strings"
-
- jc "github.com/juju/testing/checkers"
- gc "gopkg.in/check.v1"
-
- "github.com/juju/errors"
-)
-
-type functionSuite struct {
-}
-
-var _ = gc.Suite(&functionSuite{})
-
-func (*functionSuite) TestNew(c *gc.C) {
- err := errors.New("testing") //err newTest
- c.Assert(err.Error(), gc.Equals, "testing")
- c.Assert(errors.Cause(err), gc.Equals, err)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["newTest"].String())
-}
-
-func (*functionSuite) TestErrorf(c *gc.C) {
- err := errors.Errorf("testing %d", 42) //err errorfTest
- c.Assert(err.Error(), gc.Equals, "testing 42")
- c.Assert(errors.Cause(err), gc.Equals, err)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["errorfTest"].String())
-}
-
-func (*functionSuite) TestTrace(c *gc.C) {
- first := errors.New("first")
- err := errors.Trace(first) //err traceTest
- c.Assert(err.Error(), gc.Equals, "first")
- c.Assert(errors.Cause(err), gc.Equals, first)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["traceTest"].String())
-
- c.Assert(errors.Trace(nil), gc.IsNil)
-}
-
-func (*functionSuite) TestAnnotate(c *gc.C) {
- first := errors.New("first")
- err := errors.Annotate(first, "annotation") //err annotateTest
- c.Assert(err.Error(), gc.Equals, "annotation: first")
- c.Assert(errors.Cause(err), gc.Equals, first)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["annotateTest"].String())
-
- c.Assert(errors.Annotate(nil, "annotate"), gc.IsNil)
-}
-
-func (*functionSuite) TestAnnotatef(c *gc.C) {
- first := errors.New("first")
- err := errors.Annotatef(first, "annotation %d", 2) //err annotatefTest
- c.Assert(err.Error(), gc.Equals, "annotation 2: first")
- c.Assert(errors.Cause(err), gc.Equals, first)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["annotatefTest"].String())
-
- c.Assert(errors.Annotatef(nil, "annotate"), gc.IsNil)
-}
-
-func (*functionSuite) TestDeferredAnnotatef(c *gc.C) {
- // NOTE: this test fails with gccgo
- if runtime.Compiler == "gccgo" {
- c.Skip("gccgo can't determine the location")
- }
- first := errors.New("first")
- test := func() (err error) {
- defer errors.DeferredAnnotatef(&err, "deferred %s", "annotate")
- return first
- } //err deferredAnnotate
- err := test()
- c.Assert(err.Error(), gc.Equals, "deferred annotate: first")
- c.Assert(errors.Cause(err), gc.Equals, first)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["deferredAnnotate"].String())
-
- err = nil
- errors.DeferredAnnotatef(&err, "deferred %s", "annotate")
- c.Assert(err, gc.IsNil)
-}
-
-func (*functionSuite) TestWrap(c *gc.C) {
- first := errors.New("first") //err wrapFirst
- detailed := errors.New("detailed")
- err := errors.Wrap(first, detailed) //err wrapTest
- c.Assert(err.Error(), gc.Equals, "detailed")
- c.Assert(errors.Cause(err), gc.Equals, detailed)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["wrapFirst"].String())
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["wrapTest"].String())
-}
-
-func (*functionSuite) TestWrapOfNil(c *gc.C) {
- detailed := errors.New("detailed")
- err := errors.Wrap(nil, detailed) //err nilWrapTest
- c.Assert(err.Error(), gc.Equals, "detailed")
- c.Assert(errors.Cause(err), gc.Equals, detailed)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["nilWrapTest"].String())
-}
-
-func (*functionSuite) TestWrapf(c *gc.C) {
- first := errors.New("first") //err wrapfFirst
- detailed := errors.New("detailed")
- err := errors.Wrapf(first, detailed, "value %d", 42) //err wrapfTest
- c.Assert(err.Error(), gc.Equals, "value 42: detailed")
- c.Assert(errors.Cause(err), gc.Equals, detailed)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["wrapfFirst"].String())
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["wrapfTest"].String())
-}
-
-func (*functionSuite) TestWrapfOfNil(c *gc.C) {
- detailed := errors.New("detailed")
- err := errors.Wrapf(nil, detailed, "value %d", 42) //err nilWrapfTest
- c.Assert(err.Error(), gc.Equals, "value 42: detailed")
- c.Assert(errors.Cause(err), gc.Equals, detailed)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["nilWrapfTest"].String())
-}
-
-func (*functionSuite) TestMask(c *gc.C) {
- first := errors.New("first")
- err := errors.Mask(first) //err maskTest
- c.Assert(err.Error(), gc.Equals, "first")
- c.Assert(errors.Cause(err), gc.Equals, err)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["maskTest"].String())
-
- c.Assert(errors.Mask(nil), gc.IsNil)
-}
-
-func (*functionSuite) TestMaskf(c *gc.C) {
- first := errors.New("first")
- err := errors.Maskf(first, "masked %d", 42) //err maskfTest
- c.Assert(err.Error(), gc.Equals, "masked 42: first")
- c.Assert(errors.Cause(err), gc.Equals, err)
- c.Assert(errors.Details(err), jc.Contains, tagToLocation["maskfTest"].String())
-
- c.Assert(errors.Maskf(nil, "mask"), gc.IsNil)
-}
-
-func (*functionSuite) TestCause(c *gc.C) {
- c.Assert(errors.Cause(nil), gc.IsNil)
- c.Assert(errors.Cause(someErr), gc.Equals, someErr)
-
- fmtErr := fmt.Errorf("simple")
- c.Assert(errors.Cause(fmtErr), gc.Equals, fmtErr)
-
- err := errors.Wrap(someErr, fmtErr)
- c.Assert(errors.Cause(err), gc.Equals, fmtErr)
-
- err = errors.Annotate(err, "annotated")
- c.Assert(errors.Cause(err), gc.Equals, fmtErr)
-
- err = errors.Maskf(err, "maksed")
- c.Assert(errors.Cause(err), gc.Equals, err)
-
- // Look for a file that we know isn't there.
- dir := c.MkDir()
- _, err = os.Stat(filepath.Join(dir, "not-there"))
- c.Assert(os.IsNotExist(err), jc.IsTrue)
-
- err = errors.Annotatef(err, "wrap it")
- // Now the error itself isn't a 'IsNotExist'.
- c.Assert(os.IsNotExist(err), jc.IsFalse)
- // However if we use the Check method, it is.
- c.Assert(os.IsNotExist(errors.Cause(err)), jc.IsTrue)
-}
-
-func (s *functionSuite) TestDetails(c *gc.C) {
- if runtime.Compiler == "gccgo" {
- c.Skip("gccgo can't determine the location")
- }
- c.Assert(errors.Details(nil), gc.Equals, "[]")
-
- otherErr := fmt.Errorf("other")
- checkDetails(c, otherErr, "[{other}]")
-
- err0 := newEmbed("foo") //err TestStack#0
- checkDetails(c, err0, "[{$TestStack#0$: foo}]")
-
- err1 := errors.Annotate(err0, "bar") //err TestStack#1
- checkDetails(c, err1, "[{$TestStack#1$: bar} {$TestStack#0$: foo}]")
-
- err2 := errors.Trace(err1) //err TestStack#2
- checkDetails(c, err2, "[{$TestStack#2$: } {$TestStack#1$: bar} {$TestStack#0$: foo}]")
-}
-
-type tracer interface {
- StackTrace() []string
-}
-
-func (*functionSuite) TestErrorStack(c *gc.C) {
- for i, test := range []struct {
- message string
- generator func() error
- expected string
- tracer bool
- }{
- {
- message: "nil",
- generator: func() error {
- return nil
- },
- }, {
- message: "raw error",
- generator: func() error {
- return fmt.Errorf("raw")
- },
- expected: "raw",
- }, {
- message: "single error stack",
- generator: func() error {
- return errors.New("first error") //err single
- },
- expected: "$single$: first error",
- tracer: true,
- }, {
- message: "annotated error",
- generator: func() error {
- err := errors.New("first error") //err annotated-0
- return errors.Annotate(err, "annotation") //err annotated-1
- },
- expected: "" +
- "$annotated-0$: first error\n" +
- "$annotated-1$: annotation",
- tracer: true,
- }, {
- message: "wrapped error",
- generator: func() error {
- err := errors.New("first error") //err wrapped-0
- return errors.Wrap(err, newError("detailed error")) //err wrapped-1
- },
- expected: "" +
- "$wrapped-0$: first error\n" +
- "$wrapped-1$: detailed error",
- tracer: true,
- }, {
- message: "annotated wrapped error",
- generator: func() error {
- err := errors.Errorf("first error") //err ann-wrap-0
- err = errors.Wrap(err, fmt.Errorf("detailed error")) //err ann-wrap-1
- return errors.Annotatef(err, "annotated") //err ann-wrap-2
- },
- expected: "" +
- "$ann-wrap-0$: first error\n" +
- "$ann-wrap-1$: detailed error\n" +
- "$ann-wrap-2$: annotated",
- tracer: true,
- }, {
- message: "traced, and annotated",
- generator: func() error {
- err := errors.New("first error") //err stack-0
- err = errors.Trace(err) //err stack-1
- err = errors.Annotate(err, "some context") //err stack-2
- err = errors.Trace(err) //err stack-3
- err = errors.Annotate(err, "more context") //err stack-4
- return errors.Trace(err) //err stack-5
- },
- expected: "" +
- "$stack-0$: first error\n" +
- "$stack-1$: \n" +
- "$stack-2$: some context\n" +
- "$stack-3$: \n" +
- "$stack-4$: more context\n" +
- "$stack-5$: ",
- tracer: true,
- }, {
- message: "uncomparable, wrapped with a value error",
- generator: func() error {
- err := newNonComparableError("first error") //err mixed-0
- err = errors.Trace(err) //err mixed-1
- err = errors.Wrap(err, newError("value error")) //err mixed-2
- err = errors.Maskf(err, "masked") //err mixed-3
- err = errors.Annotate(err, "more context") //err mixed-4
- return errors.Trace(err) //err mixed-5
- },
- expected: "" +
- "first error\n" +
- "$mixed-1$: \n" +
- "$mixed-2$: value error\n" +
- "$mixed-3$: masked\n" +
- "$mixed-4$: more context\n" +
- "$mixed-5$: ",
- tracer: true,
- },
- } {
- c.Logf("%v: %s", i, test.message)
- err := test.generator()
- expected := replaceLocations(test.expected)
- stack := errors.ErrorStack(err)
- ok := c.Check(stack, gc.Equals, expected)
- if !ok {
- c.Logf("%#v", err)
- }
- tracer, ok := err.(tracer)
- c.Check(ok, gc.Equals, test.tracer)
- if ok {
- stackTrace := tracer.StackTrace()
- c.Check(stackTrace, gc.DeepEquals, strings.Split(stack, "\n"))
- }
- }
-}
diff --git a/vendor/github.com/juju/errors/package_test.go b/vendor/github.com/juju/errors/package_test.go
deleted file mode 100644
index 5bbb8f0..0000000
--- a/vendor/github.com/juju/errors/package_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2013, 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors_test
-
-import (
- "fmt"
- "io/ioutil"
- "strings"
- "testing"
-
- gc "gopkg.in/check.v1"
-
- "github.com/juju/errors"
-)
-
-func Test(t *testing.T) {
- gc.TestingT(t)
-}
-
-func checkDetails(c *gc.C, err error, details string) {
- c.Assert(err, gc.NotNil)
- expectedDetails := replaceLocations(details)
- c.Assert(errors.Details(err), gc.Equals, expectedDetails)
-}
-
-func checkErr(c *gc.C, err, cause error, msg string, details string) {
- c.Assert(err, gc.NotNil)
- c.Assert(err.Error(), gc.Equals, msg)
- c.Assert(errors.Cause(err), gc.Equals, cause)
- expectedDetails := replaceLocations(details)
- c.Assert(errors.Details(err), gc.Equals, expectedDetails)
-}
-
-func replaceLocations(line string) string {
- result := ""
- for {
- i := strings.Index(line, "$")
- if i == -1 {
- break
- }
- result += line[0:i]
- line = line[i+1:]
- i = strings.Index(line, "$")
- if i == -1 {
- panic("no second $")
- }
- result += location(line[0:i]).String()
- line = line[i+1:]
- }
- result += line
- return result
-}
-
-func location(tag string) Location {
- loc, ok := tagToLocation[tag]
- if !ok {
- panic(fmt.Sprintf("tag %q not found", tag))
- }
- return loc
-}
-
-type Location struct {
- file string
- line int
-}
-
-func (loc Location) String() string {
- return fmt.Sprintf("%s:%d", loc.file, loc.line)
-}
-
-var tagToLocation = make(map[string]Location)
-
-func setLocationsForErrorTags(filename string) {
- data, err := ioutil.ReadFile(filename)
- if err != nil {
- panic(err)
- }
- filename = "github.com/juju/errors/" + filename
- lines := strings.Split(string(data), "\n")
- for i, line := range lines {
- if j := strings.Index(line, "//err "); j >= 0 {
- tag := line[j+len("//err "):]
- if _, found := tagToLocation[tag]; found {
- panic(fmt.Sprintf("tag %q already processed previously", tag))
- }
- tagToLocation[tag] = Location{file: filename, line: i + 1}
- }
- }
-}
-
-func init() {
- setLocationsForErrorTags("error_test.go")
- setLocationsForErrorTags("functions_test.go")
-}
diff --git a/vendor/github.com/juju/errors/path.go b/vendor/github.com/juju/errors/path.go
deleted file mode 100644
index 3ec517c..0000000
--- a/vendor/github.com/juju/errors/path.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013, 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors
-
-import (
- "runtime"
- "strings"
-)
-
-// prefixSize is used internally to trim the user specific path from the
-// front of the returned filenames from the runtime call stack.
-var prefixSize int
-
-// goPath is the deduced path based on the location of this file as compiled.
-var goPath string
-
-func init() {
- _, file, _, ok := runtime.Caller(0)
- if ok {
- // We know that the end of the file should be:
- // github.com/juju/errors/path.go
- size := len(file)
- suffix := len("github.com/juju/errors/path.go")
- goPath = file[:size-suffix]
- prefixSize = len(goPath)
- }
-}
-
-func trimGoPath(filename string) string {
- if strings.HasPrefix(filename, goPath) {
- return filename[prefixSize:]
- }
- return filename
-}
diff --git a/vendor/github.com/juju/errors/path_test.go b/vendor/github.com/juju/errors/path_test.go
deleted file mode 100644
index ef4f34f..0000000
--- a/vendor/github.com/juju/errors/path_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2013, 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors_test
-
-import (
- "path"
-
- gc "gopkg.in/check.v1"
-
- "github.com/juju/errors"
-)
-
-type pathSuite struct{}
-
-var _ = gc.Suite(&pathSuite{})
-
-func (*pathSuite) TestGoPathSet(c *gc.C) {
- c.Assert(errors.GoPath(), gc.Not(gc.Equals), "")
-}
-
-func (*pathSuite) TestTrimGoPath(c *gc.C) {
- relativeImport := "github.com/foo/bar/baz.go"
- filename := path.Join(errors.GoPath(), relativeImport)
- c.Assert(errors.TrimGoPath(filename), gc.Equals, relativeImport)
-
- absoluteImport := "/usr/share/foo/bar/baz.go"
- c.Assert(errors.TrimGoPath(absoluteImport), gc.Equals, absoluteImport)
-}
diff --git a/vendor/github.com/ngaut/deadline/rw.go b/vendor/github.com/ngaut/deadline/rw.go
deleted file mode 100644
index 19d4368..0000000
--- a/vendor/github.com/ngaut/deadline/rw.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package deadline
-
-import (
- "io"
- "time"
-)
-
-type DeadlineReader interface {
- io.Reader
- SetReadDeadline(t time.Time) error
-}
-
-type DeadlineWriter interface {
- io.Writer
- SetWriteDeadline(t time.Time) error
-}
-
-type DeadlineReadWriter interface {
- io.ReadWriter
- SetReadDeadline(t time.Time) error
- SetWriteDeadline(t time.Time) error
-}
-
-type deadlineReader struct {
- DeadlineReader
- timeout time.Duration
-}
-
-func (r *deadlineReader) Read(p []byte) (int, error) {
- r.DeadlineReader.SetReadDeadline(time.Now().Add(r.timeout))
- return r.DeadlineReader.Read(p)
-}
-
-func NewDeadlineReader(r DeadlineReader, timeout time.Duration) io.Reader {
- return &deadlineReader{DeadlineReader: r, timeout: timeout}
-}
-
-type deadlineWriter struct {
- DeadlineWriter
- timeout time.Duration
-}
-
-func (r *deadlineWriter) Write(p []byte) (int, error) {
- r.DeadlineWriter.SetWriteDeadline(time.Now().Add(r.timeout))
- return r.DeadlineWriter.Write(p)
-}
-
-func NewDeadlineWriter(r DeadlineWriter, timeout time.Duration) io.Writer {
- return &deadlineWriter{DeadlineWriter: r, timeout: timeout}
-}
diff --git a/vendor/github.com/ngaut/log/LICENSE b/vendor/github.com/ngaut/log/LICENSE
deleted file mode 100644
index 6600f1c..0000000
--- a/vendor/github.com/ngaut/log/LICENSE
+++ /dev/null
@@ -1,165 +0,0 @@
-GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/vendor/github.com/ngaut/log/README.md b/vendor/github.com/ngaut/log/README.md
deleted file mode 100644
index e0e857e..0000000
--- a/vendor/github.com/ngaut/log/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-logging
-=======
diff --git a/vendor/github.com/ngaut/log/crash_unix.go b/vendor/github.com/ngaut/log/crash_unix.go
deleted file mode 100644
index 37f407d..0000000
--- a/vendor/github.com/ngaut/log/crash_unix.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build freebsd openbsd netbsd dragonfly darwin linux
-
-package log
-
-import (
- "log"
- "os"
- "syscall"
-)
-
-func CrashLog(file string) {
- f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
- if err != nil {
- log.Println(err.Error())
- } else {
- syscall.Dup2(int(f.Fd()), 2)
- }
-}
diff --git a/vendor/github.com/ngaut/log/crash_win.go b/vendor/github.com/ngaut/log/crash_win.go
deleted file mode 100644
index 7d612ee..0000000
--- a/vendor/github.com/ngaut/log/crash_win.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build windows
-
-package log
-
-import (
- "log"
- "os"
- "syscall"
-)
-
-var (
- kernel32 = syscall.MustLoadDLL("kernel32.dll")
- procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
-)
-
-func setStdHandle(stdhandle int32, handle syscall.Handle) error {
- r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
- if r0 == 0 {
- if e1 != 0 {
- return error(e1)
- }
- return syscall.EINVAL
- }
- return nil
-}
-
-func CrashLog(file string) {
- f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
- if err != nil {
- log.Println(err.Error())
- } else {
- err = setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
- if err != nil {
- log.Println(err.Error())
- }
- }
-}
diff --git a/vendor/github.com/ngaut/log/log.go b/vendor/github.com/ngaut/log/log.go
deleted file mode 100644
index 896b393..0000000
--- a/vendor/github.com/ngaut/log/log.go
+++ /dev/null
@@ -1,380 +0,0 @@
-//high level log wrapper, so it can output different log based on level
-package log
-
-import (
- "fmt"
- "io"
- "log"
- "os"
- "runtime"
- "sync"
- "time"
-)
-
-const (
- Ldate = log.Ldate
- Llongfile = log.Llongfile
- Lmicroseconds = log.Lmicroseconds
- Lshortfile = log.Lshortfile
- LstdFlags = log.LstdFlags
- Ltime = log.Ltime
-)
-
-type (
- LogLevel int
- LogType int
-)
-
-const (
- LOG_FATAL = LogType(0x1)
- LOG_ERROR = LogType(0x2)
- LOG_WARNING = LogType(0x4)
- LOG_INFO = LogType(0x8)
- LOG_DEBUG = LogType(0x10)
-)
-
-const (
- LOG_LEVEL_NONE = LogLevel(0x0)
- LOG_LEVEL_FATAL = LOG_LEVEL_NONE | LogLevel(LOG_FATAL)
- LOG_LEVEL_ERROR = LOG_LEVEL_FATAL | LogLevel(LOG_ERROR)
- LOG_LEVEL_WARN = LOG_LEVEL_ERROR | LogLevel(LOG_WARNING)
- LOG_LEVEL_INFO = LOG_LEVEL_WARN | LogLevel(LOG_INFO)
- LOG_LEVEL_DEBUG = LOG_LEVEL_INFO | LogLevel(LOG_DEBUG)
- LOG_LEVEL_ALL = LOG_LEVEL_DEBUG
-)
-
-const FORMAT_TIME_DAY string = "20060102"
-const FORMAT_TIME_HOUR string = "2006010215"
-
-var _log *logger = New()
-
-func init() {
- SetFlags(Ldate | Ltime | Lshortfile)
- SetHighlighting(runtime.GOOS != "windows")
-}
-
-func Logger() *log.Logger {
- return _log._log
-}
-
-func SetLevel(level LogLevel) {
- _log.SetLevel(level)
-}
-func GetLogLevel() LogLevel {
- return _log.level
-}
-
-func SetOutput(out io.Writer) {
- _log.SetOutput(out)
-}
-
-func SetOutputByName(path string) error {
- return _log.SetOutputByName(path)
-}
-
-func SetFlags(flags int) {
- _log._log.SetFlags(flags)
-}
-
-func Info(v ...interface{}) {
- _log.Info(v...)
-}
-
-func Infof(format string, v ...interface{}) {
- _log.Infof(format, v...)
-}
-
-func Debug(v ...interface{}) {
- _log.Debug(v...)
-}
-
-func Debugf(format string, v ...interface{}) {
- _log.Debugf(format, v...)
-}
-
-func Warn(v ...interface{}) {
- _log.Warning(v...)
-}
-
-func Warnf(format string, v ...interface{}) {
- _log.Warningf(format, v...)
-}
-
-func Warning(v ...interface{}) {
- _log.Warning(v...)
-}
-
-func Warningf(format string, v ...interface{}) {
- _log.Warningf(format, v...)
-}
-
-func Error(v ...interface{}) {
- _log.Error(v...)
-}
-
-func Errorf(format string, v ...interface{}) {
- _log.Errorf(format, v...)
-}
-
-func Fatal(v ...interface{}) {
- _log.Fatal(v...)
-}
-
-func Fatalf(format string, v ...interface{}) {
- _log.Fatalf(format, v...)
-}
-
-func SetLevelByString(level string) {
- _log.SetLevelByString(level)
-}
-
-func SetHighlighting(highlighting bool) {
- _log.SetHighlighting(highlighting)
-}
-
-func SetRotateByDay() {
- _log.SetRotateByDay()
-}
-
-func SetRotateByHour() {
- _log.SetRotateByHour()
-}
-
-type logger struct {
- _log *log.Logger
- level LogLevel
- highlighting bool
-
- dailyRolling bool
- hourRolling bool
-
- fileName string
- logSuffix string
- fd *os.File
-
- lock sync.Mutex
-}
-
-func (l *logger) SetHighlighting(highlighting bool) {
- l.highlighting = highlighting
-}
-
-func (l *logger) SetLevel(level LogLevel) {
- l.level = level
-}
-
-func (l *logger) SetLevelByString(level string) {
- l.level = StringToLogLevel(level)
-}
-
-func (l *logger) SetRotateByDay() {
- l.dailyRolling = true
- l.logSuffix = genDayTime(time.Now())
-}
-
-func (l *logger) SetRotateByHour() {
- l.hourRolling = true
- l.logSuffix = genHourTime(time.Now())
-}
-
-func (l *logger) rotate() error {
- l.lock.Lock()
- defer l.lock.Unlock()
-
- var suffix string
- if l.dailyRolling {
- suffix = genDayTime(time.Now())
- } else if l.hourRolling {
- suffix = genHourTime(time.Now())
- } else {
- return nil
- }
-
- // Notice: if suffix is not equal to l.LogSuffix, then rotate
- if suffix != l.logSuffix {
- err := l.doRotate(suffix)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (l *logger) doRotate(suffix string) error {
- // Notice: Not check error, is this ok?
- l.fd.Close()
-
- lastFileName := l.fileName + "." + l.logSuffix
- err := os.Rename(l.fileName, lastFileName)
- if err != nil {
- return err
- }
-
- err = l.SetOutputByName(l.fileName)
- if err != nil {
- return err
- }
-
- l.logSuffix = suffix
-
- return nil
-}
-
-func (l *logger) SetOutput(out io.Writer) {
- l._log = log.New(out, l._log.Prefix(), l._log.Flags())
-}
-
-func (l *logger) SetOutputByName(path string) error {
- f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
- if err != nil {
- log.Fatal(err)
- }
-
- l.SetOutput(f)
-
- l.fileName = path
- l.fd = f
-
- return err
-}
-
-func (l *logger) log(t LogType, v ...interface{}) {
- if l.level|LogLevel(t) != l.level {
- return
- }
-
- err := l.rotate()
- if err != nil {
- fmt.Fprintf(os.Stderr, "%s\n", err.Error())
- return
- }
-
- v1 := make([]interface{}, len(v)+2)
- logStr, logColor := LogTypeToString(t)
- if l.highlighting {
- v1[0] = "\033" + logColor + "m[" + logStr + "]"
- copy(v1[1:], v)
- v1[len(v)+1] = "\033[0m"
- } else {
- v1[0] = "[" + logStr + "]"
- copy(v1[1:], v)
- v1[len(v)+1] = ""
- }
-
- s := fmt.Sprintln(v1...)
- l._log.Output(4, s)
-}
-
-func (l *logger) logf(t LogType, format string, v ...interface{}) {
- if l.level|LogLevel(t) != l.level {
- return
- }
-
- err := l.rotate()
- if err != nil {
- fmt.Fprintf(os.Stderr, "%s\n", err.Error())
- return
- }
-
- logStr, logColor := LogTypeToString(t)
- var s string
- if l.highlighting {
- s = "\033" + logColor + "m[" + logStr + "] " + fmt.Sprintf(format, v...) + "\033[0m"
- } else {
- s = "[" + logStr + "] " + fmt.Sprintf(format, v...)
- }
- l._log.Output(4, s)
-}
-
-func (l *logger) Fatal(v ...interface{}) {
- l.log(LOG_FATAL, v...)
- os.Exit(-1)
-}
-
-func (l *logger) Fatalf(format string, v ...interface{}) {
- l.logf(LOG_FATAL, format, v...)
- os.Exit(-1)
-}
-
-func (l *logger) Error(v ...interface{}) {
- l.log(LOG_ERROR, v...)
-}
-
-func (l *logger) Errorf(format string, v ...interface{}) {
- l.logf(LOG_ERROR, format, v...)
-}
-
-func (l *logger) Warning(v ...interface{}) {
- l.log(LOG_WARNING, v...)
-}
-
-func (l *logger) Warningf(format string, v ...interface{}) {
- l.logf(LOG_WARNING, format, v...)
-}
-
-func (l *logger) Debug(v ...interface{}) {
- l.log(LOG_DEBUG, v...)
-}
-
-func (l *logger) Debugf(format string, v ...interface{}) {
- l.logf(LOG_DEBUG, format, v...)
-}
-
-func (l *logger) Info(v ...interface{}) {
- l.log(LOG_INFO, v...)
-}
-
-func (l *logger) Infof(format string, v ...interface{}) {
- l.logf(LOG_INFO, format, v...)
-}
-
-func StringToLogLevel(level string) LogLevel {
- switch level {
- case "fatal":
- return LOG_LEVEL_FATAL
- case "error":
- return LOG_LEVEL_ERROR
- case "warn":
- return LOG_LEVEL_WARN
- case "warning":
- return LOG_LEVEL_WARN
- case "debug":
- return LOG_LEVEL_DEBUG
- case "info":
- return LOG_LEVEL_INFO
- }
- return LOG_LEVEL_ALL
-}
-
-func LogTypeToString(t LogType) (string, string) {
- switch t {
- case LOG_FATAL:
- return "fatal", "[0;31"
- case LOG_ERROR:
- return "error", "[0;31"
- case LOG_WARNING:
- return "warning", "[0;33"
- case LOG_DEBUG:
- return "debug", "[0;36"
- case LOG_INFO:
- return "info", "[0;37"
- }
- return "unknown", "[0;37"
-}
-
-func genDayTime(t time.Time) string {
- return t.Format(FORMAT_TIME_DAY)
-}
-
-func genHourTime(t time.Time) string {
- return t.Format(FORMAT_TIME_HOUR)
-}
-
-func New() *logger {
- return Newlogger(os.Stderr, "")
-}
-
-func Newlogger(w io.Writer, prefix string) *logger {
- return &logger{_log: log.New(w, prefix, LstdFlags), level: LOG_LEVEL_ALL, highlighting: true}
-}
diff --git a/vendor/github.com/ngaut/log/log_test.go b/vendor/github.com/ngaut/log/log_test.go
deleted file mode 100644
index adbcfb3..0000000
--- a/vendor/github.com/ngaut/log/log_test.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package log
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strings"
- "testing"
- "time"
-)
-
-func isFileExists(name string) bool {
- f, err := os.Stat(name)
- if err != nil {
- if os.IsNotExist(err) {
- return false
- }
- }
-
- if f.IsDir() {
- return false
- }
-
- return true
-}
-
-func parseDate(value string, format string) (time.Time, error) {
- tt, err := time.ParseInLocation(format, value, time.Local)
- if err != nil {
- fmt.Println("[Error]" + err.Error())
- return tt, err
- }
-
- return tt, nil
-}
-
-func checkLogData(fileName string, containData string, num int64) error {
- input, err := os.OpenFile(fileName, os.O_RDONLY, 0)
- if err != nil {
- return err
- }
- defer input.Close()
-
- var lineNum int64
- br := bufio.NewReader(input)
- for {
- line, err := br.ReadString('\n')
- if err == io.EOF {
- break
- }
-
- realLine := strings.TrimRight(line, "\n")
- if strings.Contains(realLine, containData) {
- lineNum += 1
- }
- }
-
- // check whether num is equal to lineNum
- if lineNum != num {
- return fmt.Errorf("checkLogData fail - %d vs %d", lineNum, num)
- }
-
- return nil
-}
-
-func TestDayRotateCase(t *testing.T) {
- _log = New()
-
- logName := "example_day_test.log"
- if isFileExists(logName) {
- err := os.Remove(logName)
- if err != nil {
- t.Errorf("Remove old log file fail - %s, %s\n", err.Error(), logName)
- }
- }
-
- SetRotateByDay()
- err := SetOutputByName(logName)
- if err != nil {
- t.Errorf("SetOutputByName fail - %s, %s\n", err.Error(), logName)
- }
-
- if _log.logSuffix == "" {
- t.Errorf("bad log suffix fail - %s\n", _log.logSuffix)
- }
-
- day, err := parseDate(_log.logSuffix, FORMAT_TIME_DAY)
- if err != nil {
- t.Errorf("parseDate fail - %s, %s\n", err.Error(), _log.logSuffix)
- }
-
- _log.Info("Test data")
- _log.Infof("Test data - %s", day.String())
-
- // mock log suffix to check rotate
- lastDay := day.AddDate(0, 0, -1)
- _log.logSuffix = genDayTime(lastDay)
- oldLogSuffix := _log.logSuffix
-
- _log.Info("Test new data")
- _log.Infof("Test new data - %s", day.String())
-
- err = _log.fd.Close()
- if err != nil {
- t.Errorf("close log fd fail - %s, %s\n", err.Error(), _log.fileName)
- }
-
- // check both old and new log file datas
- oldLogName := logName + "." + oldLogSuffix
- err = checkLogData(oldLogName, "Test data", 2)
- if err != nil {
- t.Errorf("old log file checkLogData fail - %s, %s\n", err.Error(), oldLogName)
- }
-
- err = checkLogData(logName, "Test new data", 2)
- if err != nil {
- t.Errorf("new log file checkLogData fail - %s, %s\n", err.Error(), logName)
- }
-
- // remove test log files
- err = os.Remove(oldLogName)
- if err != nil {
- t.Errorf("Remove final old log file fail - %s, %s\n", err.Error(), oldLogName)
- }
-
- err = os.Remove(logName)
- if err != nil {
- t.Errorf("Remove final new log file fail - %s, %s\n", err.Error(), logName)
- }
-}
-
-func TestHourRotateCase(t *testing.T) {
- _log = New()
-
- logName := "example_hour_test.log"
- if isFileExists(logName) {
- err := os.Remove(logName)
- if err != nil {
- t.Errorf("Remove old log file fail - %s, %s\n", err.Error(), logName)
- }
- }
-
- SetRotateByHour()
- err := SetOutputByName(logName)
- if err != nil {
- t.Errorf("SetOutputByName fail - %s, %s\n", err.Error(), logName)
- }
-
- if _log.logSuffix == "" {
- t.Errorf("bad log suffix fail - %s\n", _log.logSuffix)
- }
-
- hour, err := parseDate(_log.logSuffix, FORMAT_TIME_HOUR)
- if err != nil {
- t.Errorf("parseDate fail - %s, %s\n", err.Error(), _log.logSuffix)
- }
-
- _log.Info("Test data")
- _log.Infof("Test data - %s", hour.String())
-
- // mock log suffix to check rotate
- lastHour := hour.Add(time.Duration(-1 * time.Hour))
- _log.logSuffix = genHourTime(lastHour)
- oldLogSuffix := _log.logSuffix
-
- _log.Info("Test new data")
- _log.Infof("Test new data - %s", hour.String())
-
- err = _log.fd.Close()
- if err != nil {
- t.Errorf("close log fd fail - %s, %s\n", err.Error(), _log.fileName)
- }
-
- // check both old and new log file datas
- oldLogName := logName + "." + oldLogSuffix
- err = checkLogData(oldLogName, "Test data", 2)
- if err != nil {
- t.Errorf("old log file checkLogData fail - %s, %s\n", err.Error(), oldLogName)
- }
-
- err = checkLogData(logName, "Test new data", 2)
- if err != nil {
- t.Errorf("new log file checkLogData fail - %s, %s\n", err.Error(), logName)
- }
-
- // remove test log files
- err = os.Remove(oldLogName)
- if err != nil {
- t.Errorf("Remove final old log file fail - %s, %s\n", err.Error(), oldLogName)
- }
-
- err = os.Remove(logName)
- if err != nil {
- t.Errorf("Remove final new log file fail - %s, %s\n", err.Error(), logName)
- }
-}
diff --git a/vendor/github.com/ngaut/pools/id_pool.go b/vendor/github.com/ngaut/pools/id_pool.go
deleted file mode 100644
index 31db606..0000000
--- a/vendor/github.com/ngaut/pools/id_pool.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2014, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pools
-
-import (
- "fmt"
- "sync"
-)
-
-// IDPool is used to ensure that the set of IDs in use concurrently never
-// contains any duplicates. The IDs start at 1 and increase without bound, but
-// will never be larger than the peak number of concurrent uses.
-//
-// IDPool's Get() and Set() methods can be used concurrently.
-type IDPool struct {
- sync.Mutex
-
- // used holds the set of values that have been returned to us with Put().
- used map[uint32]bool
- // maxUsed remembers the largest value we've given out.
- maxUsed uint32
-}
-
-// NewIDPool creates and initializes an IDPool.
-func NewIDPool() *IDPool {
- return &IDPool{
- used: make(map[uint32]bool),
- }
-}
-
-// Get returns an ID that is unique among currently active users of this pool.
-func (pool *IDPool) Get() (id uint32) {
- pool.Lock()
- defer pool.Unlock()
-
- // Pick a value that's been returned, if any.
- for key, _ := range pool.used {
- delete(pool.used, key)
- return key
- }
-
- // No recycled IDs are available, so increase the pool size.
- pool.maxUsed += 1
- return pool.maxUsed
-}
-
-// Put recycles an ID back into the pool for others to use. Putting back a value
-// or 0, or a value that is not currently "checked out", will result in a panic
-// because that should never happen except in the case of a programming error.
-func (pool *IDPool) Put(id uint32) {
- pool.Lock()
- defer pool.Unlock()
-
- if id < 1 || id > pool.maxUsed {
- panic(fmt.Errorf("IDPool.Put(%v): invalid value, must be in the range [1,%v]", id, pool.maxUsed))
- }
-
- if pool.used[id] {
- panic(fmt.Errorf("IDPool.Put(%v): can't put value that was already recycled", id))
- }
-
- // If we're recycling maxUsed, just shrink the pool.
- if id == pool.maxUsed {
- pool.maxUsed = id - 1
- return
- }
-
- // Add it to the set of recycled IDs.
- pool.used[id] = true
-}
diff --git a/vendor/github.com/ngaut/pools/id_pool_test.go b/vendor/github.com/ngaut/pools/id_pool_test.go
deleted file mode 100644
index a437874..0000000
--- a/vendor/github.com/ngaut/pools/id_pool_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2014, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pools
-
-import (
- "reflect"
- "strings"
- "testing"
-)
-
-func (pool *IDPool) want(want *IDPool, t *testing.T) {
- if pool.maxUsed != want.maxUsed {
- t.Errorf("pool.maxUsed = %#v, want %#v", pool.maxUsed, want.maxUsed)
- }
-
- if !reflect.DeepEqual(pool.used, want.used) {
- t.Errorf("pool.used = %#v, want %#v", pool.used, want.used)
- }
-}
-
-func TestIDPoolFirstGet(t *testing.T) {
- pool := NewIDPool()
-
- if got := pool.Get(); got != 1 {
- t.Errorf("pool.Get() = %v, want 1", got)
- }
-
- pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 1}, t)
-}
-
-func TestIDPoolSecondGet(t *testing.T) {
- pool := NewIDPool()
- pool.Get()
-
- if got := pool.Get(); got != 2 {
- t.Errorf("pool.Get() = %v, want 2", got)
- }
-
- pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 2}, t)
-}
-
-func TestIDPoolPutToUsedSet(t *testing.T) {
- pool := NewIDPool()
- id1 := pool.Get()
- pool.Get()
- pool.Put(id1)
-
- pool.want(&IDPool{used: map[uint32]bool{1: true}, maxUsed: 2}, t)
-}
-
-func TestIDPoolPutMaxUsed1(t *testing.T) {
- pool := NewIDPool()
- id1 := pool.Get()
- pool.Put(id1)
-
- pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 0}, t)
-}
-
-func TestIDPoolPutMaxUsed2(t *testing.T) {
- pool := NewIDPool()
- pool.Get()
- id2 := pool.Get()
- pool.Put(id2)
-
- pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 1}, t)
-}
-
-func TestIDPoolGetFromUsedSet(t *testing.T) {
- pool := NewIDPool()
- id1 := pool.Get()
- pool.Get()
- pool.Put(id1)
-
- if got := pool.Get(); got != 1 {
- t.Errorf("pool.Get() = %v, want 1", got)
- }
-
- pool.want(&IDPool{used: map[uint32]bool{}, maxUsed: 2}, t)
-}
-
-func wantError(want string, t *testing.T) {
- rec := recover()
- if rec == nil {
- t.Errorf("expected panic, but there wasn't one")
- }
- err, ok := rec.(error)
- if !ok || !strings.Contains(err.Error(), want) {
- t.Errorf("wrong error, got '%v', want '%v'", err, want)
- }
-}
-
-func TestIDPoolPut0(t *testing.T) {
- pool := NewIDPool()
- pool.Get()
-
- defer wantError("invalid value", t)
- pool.Put(0)
-}
-
-func TestIDPoolPutInvalid(t *testing.T) {
- pool := NewIDPool()
- pool.Get()
-
- defer wantError("invalid value", t)
- pool.Put(5)
-}
-
-func TestIDPoolPutDuplicate(t *testing.T) {
- pool := NewIDPool()
- pool.Get()
- pool.Get()
- pool.Put(1)
-
- defer wantError("already recycled", t)
- pool.Put(1)
-}
diff --git a/vendor/github.com/ngaut/pools/numbered.go b/vendor/github.com/ngaut/pools/numbered.go
deleted file mode 100644
index e170e03..0000000
--- a/vendor/github.com/ngaut/pools/numbered.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pools
-
-import (
- "fmt"
- "sync"
- "time"
-)
-
-// Numbered allows you to manage resources by tracking them with numbers.
-// There are no interface restrictions on what you can track.
-type Numbered struct {
- mu sync.Mutex
- empty *sync.Cond // Broadcast when pool becomes empty
- resources map[int64]*numberedWrapper
-}
-
-type numberedWrapper struct {
- val interface{}
- inUse bool
- purpose string
- timeCreated time.Time
- timeUsed time.Time
-}
-
-func NewNumbered() *Numbered {
- n := &Numbered{resources: make(map[int64]*numberedWrapper)}
- n.empty = sync.NewCond(&n.mu)
- return n
-}
-
-// Register starts tracking a resource by the supplied id.
-// It does not lock the object.
-// It returns an error if the id already exists.
-func (nu *Numbered) Register(id int64, val interface{}) error {
- nu.mu.Lock()
- defer nu.mu.Unlock()
- if _, ok := nu.resources[id]; ok {
- return fmt.Errorf("already present")
- }
- now := time.Now()
- nu.resources[id] = &numberedWrapper{
- val: val,
- timeCreated: now,
- timeUsed: now,
- }
- return nil
-}
-
-// Unregiester forgets the specified resource.
-// If the resource is not present, it's ignored.
-func (nu *Numbered) Unregister(id int64) {
- nu.mu.Lock()
- defer nu.mu.Unlock()
- delete(nu.resources, id)
- if len(nu.resources) == 0 {
- nu.empty.Broadcast()
- }
-}
-
-// Get locks the resource for use. It accepts a purpose as a string.
-// If it cannot be found, it returns a "not found" error. If in use,
-// it returns a "in use: purpose" error.
-func (nu *Numbered) Get(id int64, purpose string) (val interface{}, err error) {
- nu.mu.Lock()
- defer nu.mu.Unlock()
- nw, ok := nu.resources[id]
- if !ok {
- return nil, fmt.Errorf("not found")
- }
- if nw.inUse {
- return nil, fmt.Errorf("in use: %s", nw.purpose)
- }
- nw.inUse = true
- nw.purpose = purpose
- return nw.val, nil
-}
-
-// Put unlocks a resource for someone else to use.
-func (nu *Numbered) Put(id int64) {
- nu.mu.Lock()
- defer nu.mu.Unlock()
- if nw, ok := nu.resources[id]; ok {
- nw.inUse = false
- nw.purpose = ""
- nw.timeUsed = time.Now()
- }
-}
-
-// GetOutdated returns a list of resources that are older than age, and locks them.
-// It does not return any resources that are already locked.
-func (nu *Numbered) GetOutdated(age time.Duration, purpose string) (vals []interface{}) {
- nu.mu.Lock()
- defer nu.mu.Unlock()
- now := time.Now()
- for _, nw := range nu.resources {
- if nw.inUse {
- continue
- }
- if nw.timeCreated.Add(age).Sub(now) <= 0 {
- nw.inUse = true
- nw.purpose = purpose
- vals = append(vals, nw.val)
- }
- }
- return vals
-}
-
-// GetIdle returns a list of resurces that have been idle for longer
-// than timeout, and locks them. It does not return any resources that
-// are already locked.
-func (nu *Numbered) GetIdle(timeout time.Duration, purpose string) (vals []interface{}) {
- nu.mu.Lock()
- defer nu.mu.Unlock()
- now := time.Now()
- for _, nw := range nu.resources {
- if nw.inUse {
- continue
- }
- if nw.timeUsed.Add(timeout).Sub(now) <= 0 {
- nw.inUse = true
- nw.purpose = purpose
- vals = append(vals, nw.val)
- }
- }
- return vals
-}
-
-// WaitForEmpty returns as soon as the pool becomes empty
-func (nu *Numbered) WaitForEmpty() {
- nu.mu.Lock()
- defer nu.mu.Unlock()
- for len(nu.resources) != 0 {
- nu.empty.Wait()
- }
-}
-
-func (nu *Numbered) StatsJSON() string {
- return fmt.Sprintf("{\"Size\": %v}", nu.Size())
-}
-
-func (nu *Numbered) Size() (size int64) {
- nu.mu.Lock()
- defer nu.mu.Unlock()
- return int64(len(nu.resources))
-}
diff --git a/vendor/github.com/ngaut/pools/numbered_test.go b/vendor/github.com/ngaut/pools/numbered_test.go
deleted file mode 100644
index 54d5946..0000000
--- a/vendor/github.com/ngaut/pools/numbered_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pools
-
-import (
- "testing"
- "time"
-)
-
-func TestNumbered(t *testing.T) {
- id := int64(0)
- p := NewNumbered()
-
- var err error
- if err = p.Register(id, id); err != nil {
- t.Errorf("Error %v", err)
- }
- if err = p.Register(id, id); err.Error() != "already present" {
- t.Errorf("want 'already present', got '%v'", err)
- }
- var v interface{}
- if v, err = p.Get(id, "test"); err != nil {
- t.Errorf("Error %v", err)
- }
- if v.(int64) != id {
- t.Errorf("want %v, got %v", id, v.(int64))
- }
- if v, err = p.Get(id, "test1"); err.Error() != "in use: test" {
- t.Errorf("want 'in use: test', got '%v'", err)
- }
- p.Put(id)
- if v, err = p.Get(1, "test2"); err.Error() != "not found" {
- t.Errorf("want 'not found', got '%v'", err)
- }
- p.Unregister(1) // Should not fail
- p.Unregister(0)
- // p is now empty
-
- p.Register(id, id)
- id++
- p.Register(id, id)
- time.Sleep(300 * time.Millisecond)
- id++
- p.Register(id, id)
- time.Sleep(100 * time.Millisecond)
-
- // p has 0, 1, 2 (0 & 1 are aged)
- vals := p.GetOutdated(200*time.Millisecond, "by outdated")
- if len(vals) != 2 {
- t.Errorf("want 2, got %v", len(vals))
- }
- if v, err = p.Get(vals[0].(int64), "test1"); err.Error() != "in use: by outdated" {
- t.Errorf("want 'in use: by outdated', got '%v'", err)
- }
- for _, v := range vals {
- p.Put(v.(int64))
- }
- time.Sleep(100 * time.Millisecond)
-
- // p has 0, 1, 2 (2 is idle)
- vals = p.GetIdle(200*time.Millisecond, "by idle")
- if len(vals) != 1 {
- t.Errorf("want 1, got %v", len(vals))
- }
- if v, err = p.Get(vals[0].(int64), "test1"); err.Error() != "in use: by idle" {
- t.Errorf("want 'in use: by idle', got '%v'", err)
- }
- if vals[0].(int64) != 2 {
- t.Errorf("want 2, got %v", vals[0])
- }
- p.Unregister(vals[0].(int64))
-
- // p has 0 & 1
- if p.Size() != 2 {
- t.Errorf("want 2, got %v", p.Size())
- }
- go func() {
- p.Unregister(0)
- p.Unregister(1)
- }()
- p.WaitForEmpty()
-}
diff --git a/vendor/github.com/ngaut/pools/resource_pool.go b/vendor/github.com/ngaut/pools/resource_pool.go
deleted file mode 100644
index b02cf04..0000000
--- a/vendor/github.com/ngaut/pools/resource_pool.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package pools provides functionality to manage and reuse resources
-// like connections.
-package pools
-
-import (
- "fmt"
- "time"
-
- "github.com/ngaut/sync2"
-)
-
-var (
- CLOSED_ERR = fmt.Errorf("ResourcePool is closed")
-)
-
-// Factory is a function that can be used to create a resource.
-type Factory func() (Resource, error)
-
-// Every resource needs to suport the Resource interface.
-// Thread synchronization between Close() and IsClosed()
-// is the responsibility the caller.
-type Resource interface {
- Close()
-}
-
-// ResourcePool allows you to use a pool of resources.
-type ResourcePool struct {
- resources chan resourceWrapper
- factory Factory
- capacity sync2.AtomicInt64
- idleTimeout sync2.AtomicDuration
-
- // stats
- waitCount sync2.AtomicInt64
- waitTime sync2.AtomicDuration
-}
-
-type resourceWrapper struct {
- resource Resource
- timeUsed time.Time
-}
-
-// NewResourcePool creates a new ResourcePool pool.
-// capacity is the initial capacity of the pool.
-// maxCap is the maximum capacity.
-// If a resource is unused beyond idleTimeout, it's discarded.
-// An idleTimeout of 0 means that there is no timeout.
-func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Duration) *ResourcePool {
- if capacity <= 0 || maxCap <= 0 || capacity > maxCap {
- panic(fmt.Errorf("Invalid/out of range capacity"))
- }
- rp := &ResourcePool{
- resources: make(chan resourceWrapper, maxCap),
- factory: factory,
- capacity: sync2.AtomicInt64(capacity),
- idleTimeout: sync2.AtomicDuration(idleTimeout),
- }
- for i := 0; i < capacity; i++ {
- rp.resources <- resourceWrapper{}
- }
- return rp
-}
-
-// Close empties the pool calling Close on all its resources.
-// You can call Close while there are outstanding resources.
-// It waits for all resources to be returned (Put).
-// After a Close, Get and TryGet are not allowed.
-func (rp *ResourcePool) Close() {
- rp.SetCapacity(0)
-}
-
-func (rp *ResourcePool) IsClosed() (closed bool) {
- return rp.capacity.Get() == 0
-}
-
-// Get will return the next available resource. If capacity
-// has not been reached, it will create a new one using the factory. Otherwise,
-// it will indefinitely wait till the next resource becomes available.
-func (rp *ResourcePool) Get() (resource Resource, err error) {
- return rp.get(true)
-}
-
-// TryGet will return the next available resource. If none is available, and capacity
-// has not been reached, it will create a new one using the factory. Otherwise,
-// it will return nil with no error.
-func (rp *ResourcePool) TryGet() (resource Resource, err error) {
- return rp.get(false)
-}
-
-func (rp *ResourcePool) get(wait bool) (resource Resource, err error) {
- // Fetch
- var wrapper resourceWrapper
- var ok bool
- select {
- case wrapper, ok = <-rp.resources:
- default:
- if !wait {
- return nil, nil
- }
- startTime := time.Now()
- wrapper, ok = <-rp.resources
- rp.recordWait(startTime)
- }
- if !ok {
- return nil, CLOSED_ERR
- }
-
- // Unwrap
- timeout := rp.idleTimeout.Get()
- if wrapper.resource != nil && timeout > 0 && wrapper.timeUsed.Add(timeout).Sub(time.Now()) < 0 {
- wrapper.resource.Close()
- wrapper.resource = nil
- }
- if wrapper.resource == nil {
- wrapper.resource, err = rp.factory()
- if err != nil {
- rp.resources <- resourceWrapper{}
- }
- }
- return wrapper.resource, err
-}
-
-// Put will return a resource to the pool. For every successful Get,
-// a corresponding Put is required. If you no longer need a resource,
-// you will need to call Put(nil) instead of returning the closed resource.
-// The will eventually cause a new resource to be created in its place.
-func (rp *ResourcePool) Put(resource Resource) {
- var wrapper resourceWrapper
- if resource != nil {
- wrapper = resourceWrapper{resource, time.Now()}
- }
- select {
- case rp.resources <- wrapper:
- default:
- panic(fmt.Errorf("Attempt to Put into a full ResourcePool"))
- }
-}
-
-// SetCapacity changes the capacity of the pool.
-// You can use it to shrink or expand, but not beyond
-// the max capacity. If the change requires the pool
-// to be shrunk, SetCapacity waits till the necessary
-// number of resources are returned to the pool.
-// A SetCapacity of 0 is equivalent to closing the ResourcePool.
-func (rp *ResourcePool) SetCapacity(capacity int) error {
- if capacity < 0 || capacity > cap(rp.resources) {
- return fmt.Errorf("capacity %d is out of range", capacity)
- }
-
- // Atomically swap new capacity with old, but only
- // if old capacity is non-zero.
- var oldcap int
- for {
- oldcap = int(rp.capacity.Get())
- if oldcap == 0 {
- return CLOSED_ERR
- }
- if oldcap == capacity {
- return nil
- }
- if rp.capacity.CompareAndSwap(int64(oldcap), int64(capacity)) {
- break
- }
- }
-
- if capacity < oldcap {
- for i := 0; i < oldcap-capacity; i++ {
- wrapper := <-rp.resources
- if wrapper.resource != nil {
- wrapper.resource.Close()
- }
- }
- } else {
- for i := 0; i < capacity-oldcap; i++ {
- rp.resources <- resourceWrapper{}
- }
- }
- if capacity == 0 {
- close(rp.resources)
- }
- return nil
-}
-
-func (rp *ResourcePool) recordWait(start time.Time) {
- rp.waitCount.Add(1)
- rp.waitTime.Add(time.Now().Sub(start))
-}
-
-func (rp *ResourcePool) SetIdleTimeout(idleTimeout time.Duration) {
- rp.idleTimeout.Set(idleTimeout)
-}
-
-func (rp *ResourcePool) StatsJSON() string {
- c, a, mx, wc, wt, it := rp.Stats()
- return fmt.Sprintf(`{"Capacity": %v, "Available": %v, "MaxCapacity": %v, "WaitCount": %v, "WaitTime": %v, "IdleTimeout": %v}`, c, a, mx, wc, int64(wt), int64(it))
-}
-
-func (rp *ResourcePool) Stats() (capacity, available, maxCap, waitCount int64, waitTime, idleTimeout time.Duration) {
- return rp.Capacity(), rp.Available(), rp.MaxCap(), rp.WaitCount(), rp.WaitTime(), rp.IdleTimeout()
-}
-
-func (rp *ResourcePool) Capacity() int64 {
- return rp.capacity.Get()
-}
-
-func (rp *ResourcePool) Available() int64 {
- return int64(len(rp.resources))
-}
-
-func (rp *ResourcePool) MaxCap() int64 {
- return int64(cap(rp.resources))
-}
-
-func (rp *ResourcePool) WaitCount() int64 {
- return rp.waitCount.Get()
-}
-
-func (rp *ResourcePool) WaitTime() time.Duration {
- return rp.waitTime.Get()
-}
-
-func (rp *ResourcePool) IdleTimeout() time.Duration {
- return rp.idleTimeout.Get()
-}
diff --git a/vendor/github.com/ngaut/pools/resource_pool_test.go b/vendor/github.com/ngaut/pools/resource_pool_test.go
deleted file mode 100644
index 27820dd..0000000
--- a/vendor/github.com/ngaut/pools/resource_pool_test.go
+++ /dev/null
@@ -1,487 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pools
-
-import (
- "errors"
- "testing"
- "time"
-
- "github.com/ngaut/sync2"
-)
-
-var lastId, count sync2.AtomicInt64
-
-type TestResource struct {
- num int64
- closed bool
-}
-
-func (tr *TestResource) Close() {
- if !tr.closed {
- count.Add(-1)
- tr.closed = true
- }
-}
-
-func (tr *TestResource) IsClosed() bool {
- return tr.closed
-}
-
-func PoolFactory() (Resource, error) {
- count.Add(1)
- return &TestResource{lastId.Add(1), false}, nil
-}
-
-func FailFactory() (Resource, error) {
- return nil, errors.New("Failed")
-}
-
-func SlowFailFactory() (Resource, error) {
- time.Sleep(10 * time.Nanosecond)
- return nil, errors.New("Failed")
-}
-
-func TestOpen(t *testing.T) {
- lastId.Set(0)
- count.Set(0)
- p := NewResourcePool(PoolFactory, 6, 6, time.Second)
- p.SetCapacity(5)
- var resources [10]Resource
-
- // Test Get
- for i := 0; i < 5; i++ {
- r, err := p.Get()
- resources[i] = r
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- _, available, _, waitCount, waitTime, _ := p.Stats()
- if available != int64(5-i-1) {
- t.Errorf("expecting %d, received %d", 5-i-1, available)
- }
- if waitCount != 0 {
- t.Errorf("expecting 0, received %d", waitCount)
- }
- if waitTime != 0 {
- t.Errorf("expecting 0, received %d", waitTime)
- }
- if lastId.Get() != int64(i+1) {
- t.Errorf("Expecting %d, received %d", i+1, lastId.Get())
- }
- if count.Get() != int64(i+1) {
- t.Errorf("Expecting %d, received %d", i+1, count.Get())
- }
- }
-
- // Test TryGet
- r, err := p.TryGet()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- if r != nil {
- t.Errorf("Expecting nil")
- }
- for i := 0; i < 5; i++ {
- p.Put(resources[i])
- _, available, _, _, _, _ := p.Stats()
- if available != int64(i+1) {
- t.Errorf("expecting %d, received %d", 5-i-1, available)
- }
- }
- for i := 0; i < 5; i++ {
- r, err := p.TryGet()
- resources[i] = r
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- if r == nil {
- t.Errorf("Expecting non-nil")
- }
- if lastId.Get() != 5 {
- t.Errorf("Expecting 5, received %d", lastId.Get())
- }
- if count.Get() != 5 {
- t.Errorf("Expecting 5, received %d", count.Get())
- }
- }
-
- // Test that Get waits
- ch := make(chan bool)
- go func() {
- for i := 0; i < 5; i++ {
- r, err := p.Get()
- if err != nil {
- t.Errorf("Get failed: %v", err)
- }
- resources[i] = r
- }
- for i := 0; i < 5; i++ {
- p.Put(resources[i])
- }
- ch <- true
- }()
- for i := 0; i < 5; i++ {
- // Sleep to ensure the goroutine waits
- time.Sleep(10 * time.Nanosecond)
- p.Put(resources[i])
- }
- <-ch
- _, _, _, waitCount, waitTime, _ := p.Stats()
- if waitCount != 5 {
- t.Errorf("Expecting 5, received %d", waitCount)
- }
- if waitTime == 0 {
- t.Errorf("Expecting non-zero")
- }
- if lastId.Get() != 5 {
- t.Errorf("Expecting 5, received %d", lastId.Get())
- }
-
- // Test Close resource
- r, err = p.Get()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- r.Close()
- p.Put(nil)
- if count.Get() != 4 {
- t.Errorf("Expecting 4, received %d", count.Get())
- }
- for i := 0; i < 5; i++ {
- r, err := p.Get()
- if err != nil {
- t.Errorf("Get failed: %v", err)
- }
- resources[i] = r
- }
- for i := 0; i < 5; i++ {
- p.Put(resources[i])
- }
- if count.Get() != 5 {
- t.Errorf("Expecting 5, received %d", count.Get())
- }
- if lastId.Get() != 6 {
- t.Errorf("Expecting 6, received %d", lastId.Get())
- }
-
- // SetCapacity
- p.SetCapacity(3)
- if count.Get() != 3 {
- t.Errorf("Expecting 3, received %d", count.Get())
- }
- if lastId.Get() != 6 {
- t.Errorf("Expecting 6, received %d", lastId.Get())
- }
- capacity, available, _, _, _, _ := p.Stats()
- if capacity != 3 {
- t.Errorf("Expecting 3, received %d", capacity)
- }
- if available != 3 {
- t.Errorf("Expecting 3, received %d", available)
- }
- p.SetCapacity(6)
- capacity, available, _, _, _, _ = p.Stats()
- if capacity != 6 {
- t.Errorf("Expecting 6, received %d", capacity)
- }
- if available != 6 {
- t.Errorf("Expecting 6, received %d", available)
- }
- for i := 0; i < 6; i++ {
- r, err := p.Get()
- if err != nil {
- t.Errorf("Get failed: %v", err)
- }
- resources[i] = r
- }
- for i := 0; i < 6; i++ {
- p.Put(resources[i])
- }
- if count.Get() != 6 {
- t.Errorf("Expecting 5, received %d", count.Get())
- }
- if lastId.Get() != 9 {
- t.Errorf("Expecting 9, received %d", lastId.Get())
- }
-
- // Close
- p.Close()
- capacity, available, _, _, _, _ = p.Stats()
- if capacity != 0 {
- t.Errorf("Expecting 0, received %d", capacity)
- }
- if available != 0 {
- t.Errorf("Expecting 0, received %d", available)
- }
- if count.Get() != 0 {
- t.Errorf("Expecting 0, received %d", count.Get())
- }
-}
-
-func TestShrinking(t *testing.T) {
- lastId.Set(0)
- count.Set(0)
- p := NewResourcePool(PoolFactory, 5, 5, time.Second)
- var resources [10]Resource
- // Leave one empty slot in the pool
- for i := 0; i < 4; i++ {
- r, err := p.Get()
- if err != nil {
- t.Errorf("Get failed: %v", err)
- }
- resources[i] = r
- }
- go p.SetCapacity(3)
- time.Sleep(10 * time.Nanosecond)
- stats := p.StatsJSON()
- expected := `{"Capacity": 3, "Available": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
- if stats != expected {
- t.Errorf(`expecting '%s', received '%s'`, expected, stats)
- }
-
- // TryGet is allowed when shrinking
- r, err := p.TryGet()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- if r != nil {
- t.Errorf("Expecting nil")
- }
-
- // Get is allowed when shrinking, but it will wait
- getdone := make(chan bool)
- go func() {
- r, err := p.Get()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- p.Put(r)
- getdone <- true
- }()
-
- // Put is allowed when shrinking. It's necessary.
- for i := 0; i < 4; i++ {
- p.Put(resources[i])
- }
- // Wait for Get test to complete
- <-getdone
- stats = p.StatsJSON()
- expected = `{"Capacity": 3, "Available": 3, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
- if stats != expected {
- t.Errorf(`expecting '%s', received '%s'`, expected, stats)
- }
- if count.Get() != 3 {
- t.Errorf("Expecting 3, received %d", count.Get())
- }
-
- // Ensure no deadlock if SetCapacity is called after we start
- // waiting for a resource
- for i := 0; i < 3; i++ {
- resources[i], err = p.Get()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- }
- // This will wait because pool is empty
- go func() {
- r, err := p.Get()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- p.Put(r)
- getdone <- true
- }()
- time.Sleep(10 * time.Nanosecond)
-
- // This will wait till we Put
- go p.SetCapacity(2)
- time.Sleep(10 * time.Nanosecond)
-
- // This should not hang
- for i := 0; i < 3; i++ {
- p.Put(resources[i])
- }
- <-getdone
- capacity, available, _, _, _, _ := p.Stats()
- if capacity != 2 {
- t.Errorf("Expecting 2, received %d", capacity)
- }
- if available != 2 {
- t.Errorf("Expecting 2, received %d", available)
- }
- if count.Get() != 2 {
- t.Errorf("Expecting 2, received %d", count.Get())
- }
-
- // Test race condition of SetCapacity with itself
- p.SetCapacity(3)
- for i := 0; i < 3; i++ {
- resources[i], err = p.Get()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- }
- // This will wait because pool is empty
- go func() {
- r, err := p.Get()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- p.Put(r)
- getdone <- true
- }()
- time.Sleep(10 * time.Nanosecond)
-
- // This will wait till we Put
- go p.SetCapacity(2)
- time.Sleep(10 * time.Nanosecond)
- go p.SetCapacity(4)
- time.Sleep(10 * time.Nanosecond)
-
- // This should not hang
- for i := 0; i < 3; i++ {
- p.Put(resources[i])
- }
- <-getdone
-
- err = p.SetCapacity(-1)
- if err == nil {
- t.Errorf("Expecting error")
- }
- err = p.SetCapacity(255555)
- if err == nil {
- t.Errorf("Expecting error")
- }
-
- capacity, available, _, _, _, _ = p.Stats()
- if capacity != 4 {
- t.Errorf("Expecting 4, received %d", capacity)
- }
- if available != 4 {
- t.Errorf("Expecting 4, received %d", available)
- }
-}
-
-func TestClosing(t *testing.T) {
- lastId.Set(0)
- count.Set(0)
- p := NewResourcePool(PoolFactory, 5, 5, time.Second)
- var resources [10]Resource
- for i := 0; i < 5; i++ {
- r, err := p.Get()
- if err != nil {
- t.Errorf("Get failed: %v", err)
- }
- resources[i] = r
- }
- ch := make(chan bool)
- go func() {
- p.Close()
- ch <- true
- }()
-
- // Wait for goroutine to call Close
- time.Sleep(10 * time.Nanosecond)
- stats := p.StatsJSON()
- expected := `{"Capacity": 0, "Available": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
- if stats != expected {
- t.Errorf(`expecting '%s', received '%s'`, expected, stats)
- }
-
- // Put is allowed when closing
- for i := 0; i < 5; i++ {
- p.Put(resources[i])
- }
-
- // Wait for Close to return
- <-ch
-
- // SetCapacity must be ignored after Close
- err := p.SetCapacity(1)
- if err == nil {
- t.Errorf("expecting error")
- }
-
- stats = p.StatsJSON()
- expected = `{"Capacity": 0, "Available": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
- if stats != expected {
- t.Errorf(`expecting '%s', received '%s'`, expected, stats)
- }
- if lastId.Get() != 5 {
- t.Errorf("Expecting 5, received %d", count.Get())
- }
- if count.Get() != 0 {
- t.Errorf("Expecting 0, received %d", count.Get())
- }
-}
-
-func TestIdleTimeout(t *testing.T) {
- lastId.Set(0)
- count.Set(0)
- p := NewResourcePool(PoolFactory, 1, 1, 10*time.Nanosecond)
- defer p.Close()
-
- r, err := p.Get()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- p.Put(r)
- if lastId.Get() != 1 {
- t.Errorf("Expecting 1, received %d", count.Get())
- }
- if count.Get() != 1 {
- t.Errorf("Expecting 1, received %d", count.Get())
- }
- time.Sleep(20 * time.Nanosecond)
- r, err = p.Get()
- if err != nil {
- t.Errorf("Unexpected error %v", err)
- }
- if lastId.Get() != 2 {
- t.Errorf("Expecting 2, received %d", count.Get())
- }
- if count.Get() != 1 {
- t.Errorf("Expecting 1, received %d", count.Get())
- }
- p.Put(r)
-}
-
-func TestCreateFail(t *testing.T) {
- lastId.Set(0)
- count.Set(0)
- p := NewResourcePool(FailFactory, 5, 5, time.Second)
- defer p.Close()
- if _, err := p.Get(); err.Error() != "Failed" {
- t.Errorf("Expecting Failed, received %v", err)
- }
- stats := p.StatsJSON()
- expected := `{"Capacity": 5, "Available": 5, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000}`
- if stats != expected {
- t.Errorf(`expecting '%s', received '%s'`, expected, stats)
- }
-}
-
-func TestSlowCreateFail(t *testing.T) {
- lastId.Set(0)
- count.Set(0)
- p := NewResourcePool(SlowFailFactory, 2, 2, time.Second)
- defer p.Close()
- ch := make(chan bool)
- // The third Get should not wait indefinitely
- for i := 0; i < 3; i++ {
- go func() {
- p.Get()
- ch <- true
- }()
- }
- for i := 0; i < 3; i++ {
- <-ch
- }
- _, available, _, _, _, _ := p.Stats()
- if available != 2 {
- t.Errorf("Expecting 2, received %d", available)
- }
-}
diff --git a/vendor/github.com/ngaut/pools/roundrobin.go b/vendor/github.com/ngaut/pools/roundrobin.go
deleted file mode 100644
index b06985f..0000000
--- a/vendor/github.com/ngaut/pools/roundrobin.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pools
-
-import (
- "fmt"
- "sync"
- "time"
-)
-
-// RoundRobin is deprecated. Use ResourcePool instead.
-// RoundRobin allows you to use a pool of resources in a round robin fashion.
-type RoundRobin struct {
- mu sync.Mutex
- available *sync.Cond
- resources chan fifoWrapper
- size int64
- factory Factory
- idleTimeout time.Duration
-
- // stats
- waitCount int64
- waitTime time.Duration
-}
-
-type fifoWrapper struct {
- resource Resource
- timeUsed time.Time
-}
-
-// NewRoundRobin creates a new RoundRobin pool.
-// capacity is the maximum number of resources RoundRobin will create.
-// factory will be the function used to create resources.
-// If a resource is unused beyond idleTimeout, it's discarded.
-func NewRoundRobin(capacity int, idleTimeout time.Duration) *RoundRobin {
- r := &RoundRobin{
- resources: make(chan fifoWrapper, capacity),
- size: 0,
- idleTimeout: idleTimeout,
- }
- r.available = sync.NewCond(&r.mu)
- return r
-}
-
-// Open starts allowing the creation of resources
-func (rr *RoundRobin) Open(factory Factory) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- rr.factory = factory
-}
-
-// Close empties the pool calling Close on all its resources.
-// It waits for all resources to be returned (Put).
-func (rr *RoundRobin) Close() {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- for rr.size > 0 {
- select {
- case fw := <-rr.resources:
- go fw.resource.Close()
- rr.size--
- default:
- rr.available.Wait()
- }
- }
- rr.factory = nil
-}
-
-func (rr *RoundRobin) IsClosed() bool {
- return rr.factory == nil
-}
-
-// Get will return the next available resource. If none is available, and capacity
-// has not been reached, it will create a new one using the factory. Otherwise,
-// it will indefinitely wait till the next resource becomes available.
-func (rr *RoundRobin) Get() (resource Resource, err error) {
- return rr.get(true)
-}
-
-// TryGet will return the next available resource. If none is available, and capacity
-// has not been reached, it will create a new one using the factory. Otherwise,
-// it will return nil with no error.
-func (rr *RoundRobin) TryGet() (resource Resource, err error) {
- return rr.get(false)
-}
-
-func (rr *RoundRobin) get(wait bool) (resource Resource, err error) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- // Any waits in this loop will release the lock, and it will be
- // reacquired before the waits return.
- for {
- select {
- case fw := <-rr.resources:
- // Found a free resource in the channel
- if rr.idleTimeout > 0 && fw.timeUsed.Add(rr.idleTimeout).Sub(time.Now()) < 0 {
- // resource has been idle for too long. Discard & go for next.
- go fw.resource.Close()
- rr.size--
- // Nobody else should be waiting, but signal anyway.
- rr.available.Signal()
- continue
- }
- return fw.resource, nil
- default:
- // resource channel is empty
- if rr.size >= int64(cap(rr.resources)) {
- // The pool is full
- if wait {
- start := time.Now()
- rr.available.Wait()
- rr.recordWait(start)
- continue
- }
- return nil, nil
- }
- // Pool is not full. Create a resource.
- if resource, err = rr.waitForCreate(); err != nil {
- // size was decremented, and somebody could be waiting.
- rr.available.Signal()
- return nil, err
- }
- // Creation successful. Account for this by incrementing size.
- rr.size++
- return resource, err
- }
- }
-}
-
-func (rr *RoundRobin) recordWait(start time.Time) {
- rr.waitCount++
- rr.waitTime += time.Now().Sub(start)
-}
-
-func (rr *RoundRobin) waitForCreate() (resource Resource, err error) {
- // Prevent thundering herd: increment size before creating resource, and decrement after.
- rr.size++
- rr.mu.Unlock()
- defer func() {
- rr.mu.Lock()
- rr.size--
- }()
- return rr.factory()
-}
-
-// Put will return a resource to the pool. You MUST return every resource to the pool,
-// even if it's closed. If a resource is closed, you should call Put(nil).
-func (rr *RoundRobin) Put(resource Resource) {
- rr.mu.Lock()
- defer rr.available.Signal()
- defer rr.mu.Unlock()
-
- if rr.size > int64(cap(rr.resources)) {
- if resource != nil {
- go resource.Close()
- }
- rr.size--
- } else if resource == nil {
- rr.size--
- } else {
- if len(rr.resources) == cap(rr.resources) {
- panic("unexpected")
- }
- rr.resources <- fifoWrapper{resource, time.Now()}
- }
-}
-
-// Set capacity changes the capacity of the pool.
-// You can use it to expand or shrink.
-func (rr *RoundRobin) SetCapacity(capacity int) error {
- rr.mu.Lock()
- defer rr.available.Broadcast()
- defer rr.mu.Unlock()
-
- nr := make(chan fifoWrapper, capacity)
- // This loop transfers resources from the old channel
- // to the new one, until it fills up or runs out.
- // It discards extras, if any.
- for {
- select {
- case fw := <-rr.resources:
- if len(nr) < cap(nr) {
- nr <- fw
- } else {
- go fw.resource.Close()
- rr.size--
- }
- continue
- default:
- }
- break
- }
- rr.resources = nr
- return nil
-}
-
-func (rr *RoundRobin) SetIdleTimeout(idleTimeout time.Duration) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- rr.idleTimeout = idleTimeout
-}
-
-func (rr *RoundRobin) StatsJSON() string {
- s, c, a, wc, wt, it := rr.Stats()
- return fmt.Sprintf("{\"Size\": %v, \"Capacity\": %v, \"Available\": %v, \"WaitCount\": %v, \"WaitTime\": %v, \"IdleTimeout\": %v}", s, c, a, wc, int64(wt), int64(it))
-}
-
-func (rr *RoundRobin) Stats() (size, capacity, available, waitCount int64, waitTime, idleTimeout time.Duration) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- return rr.size, int64(cap(rr.resources)), int64(len(rr.resources)), rr.waitCount, rr.waitTime, rr.idleTimeout
-}
diff --git a/vendor/github.com/ngaut/pools/roundrobin_test.go b/vendor/github.com/ngaut/pools/roundrobin_test.go
deleted file mode 100644
index 870503d..0000000
--- a/vendor/github.com/ngaut/pools/roundrobin_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pools
-
-import (
- "testing"
- "time"
-)
-
-func TestPool(t *testing.T) {
- lastId.Set(0)
- p := NewRoundRobin(5, time.Duration(10e9))
- p.Open(PoolFactory)
- defer p.Close()
-
- for i := 0; i < 2; i++ {
- r, err := p.TryGet()
- if err != nil {
- t.Errorf("TryGet failed: %v", err)
- }
- if r.(*TestResource).num != 1 {
- t.Errorf("Expecting 1, received %d", r.(*TestResource).num)
- }
- p.Put(r)
- }
- // p = [1]
-
- all := make([]Resource, 5)
- for i := 0; i < 5; i++ {
- if all[i], _ = p.TryGet(); all[i] == nil {
- t.Errorf("TryGet failed with nil")
- }
- }
- // all = [1-5], p is empty
- if none, _ := p.TryGet(); none != nil {
- t.Errorf("TryGet failed with non-nil")
- }
-
- ch := make(chan bool)
- go ResourceWait(p, t, ch)
- time.Sleep(1e8)
- for i := 0; i < 5; i++ {
- p.Put(all[i])
- }
- // p = [1-5]
- <-ch
- // p = [1-5]
- if p.waitCount != 1 {
- t.Errorf("Expecting 1, received %d", p.waitCount)
- }
-
- for i := 0; i < 5; i++ {
- all[i], _ = p.Get()
- }
- // all = [1-5], p is empty
- all[0].(*TestResource).Close()
- p.Put(nil)
- for i := 1; i < 5; i++ {
- p.Put(all[i])
- }
- // p = [2-5]
-
- for i := 0; i < 4; i++ {
- r, _ := p.Get()
- if r.(*TestResource).num != int64(i+2) {
- t.Errorf("Expecting %d, received %d", i+2, r.(*TestResource).num)
- }
- p.Put(r)
- }
-
- p.SetCapacity(3)
- // p = [2-4]
- if p.size != 3 {
- t.Errorf("Expecting 3, received %d", p.size)
- }
-
- p.SetIdleTimeout(time.Duration(1e8))
- time.Sleep(2e8)
- r, _ := p.Get()
- if r.(*TestResource).num != 6 {
- t.Errorf("Expecting 6, received %d", r.(*TestResource).num)
- }
- p.Put(r)
- // p = [6]
-}
-
-func TestPoolFail(t *testing.T) {
- p := NewRoundRobin(5, time.Duration(10e9))
- p.Open(FailFactory)
- defer p.Close()
- if _, err := p.Get(); err.Error() != "Failed" {
- t.Errorf("Expecting Failed, received %v", err)
- }
-}
-
-func TestPoolFullFail(t *testing.T) {
- p := NewRoundRobin(2, time.Duration(10e9))
- p.Open(SlowFailFactory)
- defer p.Close()
- ch := make(chan bool)
- // The third get should not wait indefinitely
- for i := 0; i < 3; i++ {
- go func() {
- p.Get()
- ch <- true
- }()
- }
- for i := 0; i < 3; i++ {
- <-ch
- }
-}
-
-func ResourceWait(p *RoundRobin, t *testing.T, ch chan bool) {
- for i := 0; i < 5; i++ {
- if r, err := p.Get(); err != nil {
- t.Errorf("TryGet failed: %v", err)
- } else if r.(*TestResource).num != int64(i+1) {
- t.Errorf("Expecting %d, received %d", i+1, r.(*TestResource).num)
- } else {
- p.Put(r)
- }
- }
- ch <- true
-}
diff --git a/vendor/github.com/ngaut/pools/vitess_license b/vendor/github.com/ngaut/pools/vitess_license
deleted file mode 100644
index 989d02e..0000000
--- a/vendor/github.com/ngaut/pools/vitess_license
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2012, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ngaut/sync2/atomic.go b/vendor/github.com/ngaut/sync2/atomic.go
deleted file mode 100644
index 909f3b1..0000000
--- a/vendor/github.com/ngaut/sync2/atomic.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2013, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync2
-
-import (
- "sync"
- "sync/atomic"
- "time"
-)
-
-type AtomicInt32 int32
-
-func (i *AtomicInt32) Add(n int32) int32 {
- return atomic.AddInt32((*int32)(i), n)
-}
-
-func (i *AtomicInt32) Set(n int32) {
- atomic.StoreInt32((*int32)(i), n)
-}
-
-func (i *AtomicInt32) Get() int32 {
- return atomic.LoadInt32((*int32)(i))
-}
-
-func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) {
- return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval)
-}
-
-type AtomicUint32 uint32
-
-func (i *AtomicUint32) Add(n uint32) uint32 {
- return atomic.AddUint32((*uint32)(i), n)
-}
-
-func (i *AtomicUint32) Set(n uint32) {
- atomic.StoreUint32((*uint32)(i), n)
-}
-
-func (i *AtomicUint32) Get() uint32 {
- return atomic.LoadUint32((*uint32)(i))
-}
-
-func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) {
- return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval)
-}
-
-type AtomicInt64 int64
-
-func (i *AtomicInt64) Add(n int64) int64 {
- return atomic.AddInt64((*int64)(i), n)
-}
-
-func (i *AtomicInt64) Set(n int64) {
- atomic.StoreInt64((*int64)(i), n)
-}
-
-func (i *AtomicInt64) Get() int64 {
- return atomic.LoadInt64((*int64)(i))
-}
-
-func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) {
- return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval)
-}
-
-type AtomicDuration int64
-
-func (d *AtomicDuration) Add(duration time.Duration) time.Duration {
- return time.Duration(atomic.AddInt64((*int64)(d), int64(duration)))
-}
-
-func (d *AtomicDuration) Set(duration time.Duration) {
- atomic.StoreInt64((*int64)(d), int64(duration))
-}
-
-func (d *AtomicDuration) Get() time.Duration {
- return time.Duration(atomic.LoadInt64((*int64)(d)))
-}
-
-func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) {
- return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval))
-}
-
-// AtomicString gives you atomic-style APIs for string, but
-// it's only a convenience wrapper that uses a mutex. So, it's
-// not as efficient as the rest of the atomic types.
-type AtomicString struct {
- mu sync.Mutex
- str string
-}
-
-func (s *AtomicString) Set(str string) {
- s.mu.Lock()
- s.str = str
- s.mu.Unlock()
-}
-
-func (s *AtomicString) Get() string {
- s.mu.Lock()
- str := s.str
- s.mu.Unlock()
- return str
-}
-
-func (s *AtomicString) CompareAndSwap(oldval, newval string) (swqpped bool) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.str == oldval {
- s.str = newval
- return true
- }
- return false
-}
diff --git a/vendor/github.com/ngaut/sync2/atomic_test.go b/vendor/github.com/ngaut/sync2/atomic_test.go
deleted file mode 100644
index 7261159..0000000
--- a/vendor/github.com/ngaut/sync2/atomic_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync2
-
-import (
- "testing"
-)
-
-func TestAtomicString(t *testing.T) {
- var s AtomicString
- if s.Get() != "" {
- t.Errorf("want empty, got %s", s.Get())
- }
- s.Set("a")
- if s.Get() != "a" {
- t.Errorf("want a, got %s", s.Get())
- }
- if s.CompareAndSwap("b", "c") {
- t.Errorf("want false, got true")
- }
- if s.Get() != "a" {
- t.Errorf("want a, got %s", s.Get())
- }
- if !s.CompareAndSwap("a", "c") {
- t.Errorf("want true, got false")
- }
- if s.Get() != "c" {
- t.Errorf("want c, got %s", s.Get())
- }
-}
diff --git a/vendor/github.com/ngaut/sync2/cond.go b/vendor/github.com/ngaut/sync2/cond.go
deleted file mode 100644
index dea11ae..0000000
--- a/vendor/github.com/ngaut/sync2/cond.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync2
-
-import (
- "sync"
-)
-
-// Cond is an alternate implementation of sync.Cond
-type Cond struct {
- L sync.Locker
- sema chan struct{}
- waiters AtomicInt64
-}
-
-func NewCond(l sync.Locker) *Cond {
- return &Cond{L: l, sema: make(chan struct{})}
-}
-
-func (c *Cond) Wait() {
- c.waiters.Add(1)
- c.L.Unlock()
- <-c.sema
- c.L.Lock()
-}
-
-func (c *Cond) Signal() {
- for {
- w := c.waiters.Get()
- if w == 0 {
- return
- }
- if c.waiters.CompareAndSwap(w, w-1) {
- break
- }
- }
- c.sema <- struct{}{}
-}
-
-func (c *Cond) Broadcast() {
- var w int64
- for {
- w = c.waiters.Get()
- if w == 0 {
- return
- }
- if c.waiters.CompareAndSwap(w, 0) {
- break
- }
- }
- for i := int64(0); i < w; i++ {
- c.sema <- struct{}{}
- }
-}
diff --git a/vendor/github.com/ngaut/sync2/cond_test.go b/vendor/github.com/ngaut/sync2/cond_test.go
deleted file mode 100644
index 4dd3929..0000000
--- a/vendor/github.com/ngaut/sync2/cond_test.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package sync2
-
-import (
- "fmt"
- "runtime"
- "sync"
- "testing"
-)
-
-func TestCondSignal(t *testing.T) {
- var m sync.Mutex
- c := NewCond(&m)
- n := 2
- running := make(chan bool, n)
- awake := make(chan bool, n)
- for i := 0; i < n; i++ {
- go func() {
- m.Lock()
- running <- true
- c.Wait()
- awake <- true
- m.Unlock()
- }()
- }
- for i := 0; i < n; i++ {
- <-running // Wait for everyone to run.
- }
- for n > 0 {
- select {
- case <-awake:
- t.Fatal("goroutine not asleep")
- default:
- }
- m.Lock()
- c.Signal()
- m.Unlock()
- <-awake // Will deadlock if no goroutine wakes up
- select {
- case <-awake:
- t.Fatal("too many goroutines awake")
- default:
- }
- n--
- }
- c.Signal()
-}
-
-func TestCondSignalGenerations(t *testing.T) {
- var m sync.Mutex
- c := NewCond(&m)
- n := 100
- running := make(chan bool, n)
- awake := make(chan int, n)
- for i := 0; i < n; i++ {
- go func(i int) {
- m.Lock()
- running <- true
- c.Wait()
- awake <- i
- m.Unlock()
- }(i)
- if i > 0 {
- a := <-awake
- if a != i-1 {
- t.Fatalf("wrong goroutine woke up: want %d, got %d", i-1, a)
- }
- }
- <-running
- m.Lock()
- c.Signal()
- m.Unlock()
- }
-}
-
-func TestCondBroadcast(t *testing.T) {
- var m sync.Mutex
- c := NewCond(&m)
- n := 200
- running := make(chan int, n)
- awake := make(chan int, n)
- exit := false
- for i := 0; i < n; i++ {
- go func(g int) {
- m.Lock()
- for !exit {
- running <- g
- c.Wait()
- awake <- g
- }
- m.Unlock()
- }(i)
- }
- for i := 0; i < n; i++ {
- for i := 0; i < n; i++ {
- <-running // Will deadlock unless n are running.
- }
- if i == n-1 {
- m.Lock()
- exit = true
- m.Unlock()
- }
- select {
- case <-awake:
- t.Fatal("goroutine not asleep")
- default:
- }
- m.Lock()
- c.Broadcast()
- m.Unlock()
- seen := make([]bool, n)
- for i := 0; i < n; i++ {
- g := <-awake
- if seen[g] {
- t.Fatal("goroutine woke up twice")
- }
- seen[g] = true
- }
- }
- select {
- case <-running:
- t.Fatal("goroutine did not exit")
- default:
- }
- c.Broadcast()
-}
-
-func TestRace(t *testing.T) {
- x := 0
- c := NewCond(&sync.Mutex{})
- done := make(chan bool)
- go func() {
- c.L.Lock()
- x = 1
- c.Wait()
- if x != 2 {
- t.Fatal("want 2")
- }
- x = 3
- c.Signal()
- c.L.Unlock()
- done <- true
- }()
- go func() {
- c.L.Lock()
- for {
- if x == 1 {
- x = 2
- c.Signal()
- break
- }
- c.L.Unlock()
- runtime.Gosched()
- c.L.Lock()
- }
- c.L.Unlock()
- done <- true
- }()
- go func() {
- c.L.Lock()
- for {
- if x == 2 {
- c.Wait()
- if x != 3 {
- t.Fatal("want 3")
- }
- break
- }
- if x == 3 {
- break
- }
- c.L.Unlock()
- runtime.Gosched()
- c.L.Lock()
- }
- c.L.Unlock()
- done <- true
- }()
- <-done
- <-done
- <-done
-}
-
-// Bench: Rename this function to TestBench for running benchmarks
-func Bench(t *testing.T) {
- waitvals := []int{1, 2, 4, 8}
- maxprocs := []int{1, 2, 4}
- fmt.Printf("procs\twaiters\told\tnew\tdelta\n")
- for _, procs := range maxprocs {
- runtime.GOMAXPROCS(procs)
- for _, waiters := range waitvals {
- oldbench := func(b *testing.B) {
- benchmarkCond(b, waiters)
- }
- oldbr := testing.Benchmark(oldbench)
- newbench := func(b *testing.B) {
- benchmarkCond2(b, waiters)
- }
- newbr := testing.Benchmark(newbench)
- oldns := oldbr.NsPerOp()
- newns := newbr.NsPerOp()
- percent := float64(newns-oldns) * 100.0 / float64(oldns)
- fmt.Printf("%d\t%d\t%d\t%d\t%6.2f%%\n", procs, waiters, oldns, newns, percent)
- }
- }
-}
-
-func benchmarkCond2(b *testing.B, waiters int) {
- c := NewCond(&sync.Mutex{})
- done := make(chan bool)
- id := 0
-
- for routine := 0; routine < waiters+1; routine++ {
- go func() {
- for i := 0; i < b.N; i++ {
- c.L.Lock()
- if id == -1 {
- c.L.Unlock()
- break
- }
- id++
- if id == waiters+1 {
- id = 0
- c.Broadcast()
- } else {
- c.Wait()
- }
- c.L.Unlock()
- }
- c.L.Lock()
- id = -1
- c.Broadcast()
- c.L.Unlock()
- done <- true
- }()
- }
- for routine := 0; routine < waiters+1; routine++ {
- <-done
- }
-}
-
-func benchmarkCond(b *testing.B, waiters int) {
- c := sync.NewCond(&sync.Mutex{})
- done := make(chan bool)
- id := 0
-
- for routine := 0; routine < waiters+1; routine++ {
- go func() {
- for i := 0; i < b.N; i++ {
- c.L.Lock()
- if id == -1 {
- c.L.Unlock()
- break
- }
- id++
- if id == waiters+1 {
- id = 0
- c.Broadcast()
- } else {
- c.Wait()
- }
- c.L.Unlock()
- }
- c.L.Lock()
- id = -1
- c.Broadcast()
- c.L.Unlock()
- done <- true
- }()
- }
- for routine := 0; routine < waiters+1; routine++ {
- <-done
- }
-}
diff --git a/vendor/github.com/ngaut/sync2/semaphore.go b/vendor/github.com/ngaut/sync2/semaphore.go
deleted file mode 100644
index 190a27d..0000000
--- a/vendor/github.com/ngaut/sync2/semaphore.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync2
-
-// What's in a name? Channels have all you need to emulate a counting
-// semaphore with a boatload of extra functionality. However, in some
-// cases, you just want a familiar API.
-
-import (
- "time"
-)
-
-// Semaphore is a counting semaphore with the option to
-// specify a timeout.
-type Semaphore struct {
- slots chan struct{}
- timeout time.Duration
-}
-
-// NewSemaphore creates a Semaphore. The count parameter must be a positive
-// number. A timeout of zero means that there is no timeout.
-func NewSemaphore(count int, timeout time.Duration) *Semaphore {
- sem := &Semaphore{
- slots: make(chan struct{}, count),
- timeout: timeout,
- }
- for i := 0; i < count; i++ {
- sem.slots <- struct{}{}
- }
- return sem
-}
-
-// Acquire returns true on successful acquisition, and
-// false on a timeout.
-func (sem *Semaphore) Acquire() bool {
- if sem.timeout == 0 {
- <-sem.slots
- return true
- }
- select {
- case <-sem.slots:
- return true
- case <-time.After(sem.timeout):
- return false
- }
-}
-
-// Release releases the acquired semaphore. You must
-// not release more than the number of semaphores you've
-// acquired.
-func (sem *Semaphore) Release() {
- sem.slots <- struct{}{}
-}
diff --git a/vendor/github.com/ngaut/sync2/semaphore_test.go b/vendor/github.com/ngaut/sync2/semaphore_test.go
deleted file mode 100644
index 207c8f9..0000000
--- a/vendor/github.com/ngaut/sync2/semaphore_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync2
-
-import (
- "testing"
- "time"
-)
-
-func TestSemaNoTimeout(t *testing.T) {
- s := NewSemaphore(1, 0)
- s.Acquire()
- released := false
- go func() {
- time.Sleep(10 * time.Millisecond)
- released = true
- s.Release()
- }()
- s.Acquire()
- if !released {
- t.Errorf("want true, got false")
- }
-}
-
-func TestSemaTimeout(t *testing.T) {
- s := NewSemaphore(1, 5*time.Millisecond)
- s.Acquire()
- go func() {
- time.Sleep(10 * time.Millisecond)
- s.Release()
- }()
- if ok := s.Acquire(); ok {
- t.Errorf("want false, got true")
- }
- time.Sleep(10 * time.Millisecond)
- if ok := s.Acquire(); !ok {
- t.Errorf("want true, got false")
- }
-}
diff --git a/vendor/github.com/ngaut/sync2/service_manager.go b/vendor/github.com/ngaut/sync2/service_manager.go
deleted file mode 100644
index 4b85e01..0000000
--- a/vendor/github.com/ngaut/sync2/service_manager.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2013, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync2
-
-import (
- "sync"
-)
-
-// These are the three predefined states of a service.
-const (
- SERVICE_STOPPED = iota
- SERVICE_RUNNING
- SERVICE_SHUTTING_DOWN
-)
-
-var stateNames = []string{
- "Stopped",
- "Running",
- "ShuttingDown",
-}
-
-// ServiceManager manages the state of a service through its lifecycle.
-type ServiceManager struct {
- mu sync.Mutex
- wg sync.WaitGroup
- err error // err is the error returned from the service function.
- state AtomicInt64
- // shutdown is created when the service starts and is closed when the service
- // enters the SERVICE_SHUTTING_DOWN state.
- shutdown chan struct{}
-}
-
-// Go tries to change the state from SERVICE_STOPPED to SERVICE_RUNNING.
-//
-// If the current state is not SERVICE_STOPPED (already running), it returns
-// false immediately.
-//
-// On successful transition, it launches the service as a goroutine and returns
-// true. The service function is responsible for returning on its own when
-// requested, either by regularly checking svc.IsRunning(), or by waiting for
-// the svc.ShuttingDown channel to be closed.
-//
-// When the service func returns, the state is reverted to SERVICE_STOPPED.
-func (svm *ServiceManager) Go(service func(svc *ServiceContext) error) bool {
- svm.mu.Lock()
- defer svm.mu.Unlock()
- if !svm.state.CompareAndSwap(SERVICE_STOPPED, SERVICE_RUNNING) {
- return false
- }
- svm.wg.Add(1)
- svm.err = nil
- svm.shutdown = make(chan struct{})
- go func() {
- svm.err = service(&ServiceContext{ShuttingDown: svm.shutdown})
- svm.state.Set(SERVICE_STOPPED)
- svm.wg.Done()
- }()
- return true
-}
-
-// Stop tries to change the state from SERVICE_RUNNING to SERVICE_SHUTTING_DOWN.
-// If the current state is not SERVICE_RUNNING, it returns false immediately.
-// On successul transition, it waits for the service to finish, and returns true.
-// You are allowed to Go() again after a Stop().
-func (svm *ServiceManager) Stop() bool {
- svm.mu.Lock()
- defer svm.mu.Unlock()
- if !svm.state.CompareAndSwap(SERVICE_RUNNING, SERVICE_SHUTTING_DOWN) {
- return false
- }
- // Signal the service that we've transitioned to SERVICE_SHUTTING_DOWN.
- close(svm.shutdown)
- svm.shutdown = nil
- svm.wg.Wait()
- return true
-}
-
-// Wait waits for the service to terminate if it's currently running.
-func (svm *ServiceManager) Wait() {
- svm.wg.Wait()
-}
-
-// Join waits for the service to terminate and returns the value returned by the
-// service function.
-func (svm *ServiceManager) Join() error {
- svm.wg.Wait()
- return svm.err
-}
-
-// State returns the current state of the service.
-// This should only be used to report the current state.
-func (svm *ServiceManager) State() int64 {
- return svm.state.Get()
-}
-
-// StateName returns the name of the current state.
-func (svm *ServiceManager) StateName() string {
- return stateNames[svm.State()]
-}
-
-// ServiceContext is passed into the service function to give it access to
-// information about the running service.
-type ServiceContext struct {
- // ShuttingDown is a channel that the service can select on to be notified
- // when it should shut down. The channel is closed when the state transitions
- // from SERVICE_RUNNING to SERVICE_SHUTTING_DOWN.
- ShuttingDown chan struct{}
-}
-
-// IsRunning returns true if the ServiceContext.ShuttingDown channel has not
-// been closed yet.
-func (svc *ServiceContext) IsRunning() bool {
- select {
- case <-svc.ShuttingDown:
- return false
- default:
- return true
- }
-}
diff --git a/vendor/github.com/ngaut/sync2/service_manager_test.go b/vendor/github.com/ngaut/sync2/service_manager_test.go
deleted file mode 100644
index a41912e..0000000
--- a/vendor/github.com/ngaut/sync2/service_manager_test.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2013, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync2
-
-import (
- "fmt"
- "testing"
- "time"
-)
-
-type testService struct {
- activated AtomicInt64
- t *testing.T
-}
-
-func (ts *testService) service(svc *ServiceContext) error {
- if !ts.activated.CompareAndSwap(0, 1) {
- ts.t.Fatalf("service called more than once")
- }
- for svc.IsRunning() {
- time.Sleep(10 * time.Millisecond)
-
- }
- if !ts.activated.CompareAndSwap(1, 0) {
- ts.t.Fatalf("service ended more than once")
- }
- return nil
-}
-
-func (ts *testService) selectService(svc *ServiceContext) error {
- if !ts.activated.CompareAndSwap(0, 1) {
- ts.t.Fatalf("service called more than once")
- }
-serviceLoop:
- for svc.IsRunning() {
- select {
- case <-time.After(1 * time.Second):
- ts.t.Errorf("service didn't stop when shutdown channel was closed")
- case <-svc.ShuttingDown:
- break serviceLoop
- }
- }
- if !ts.activated.CompareAndSwap(1, 0) {
- ts.t.Fatalf("service ended more than once")
- }
- return nil
-}
-
-func TestServiceManager(t *testing.T) {
- ts := &testService{t: t}
- var sm ServiceManager
- if sm.StateName() != "Stopped" {
- t.Errorf("want Stopped, got %s", sm.StateName())
- }
- result := sm.Go(ts.service)
- if !result {
- t.Errorf("want true, got false")
- }
- if sm.StateName() != "Running" {
- t.Errorf("want Running, got %s", sm.StateName())
- }
- time.Sleep(5 * time.Millisecond)
- if val := ts.activated.Get(); val != 1 {
- t.Errorf("want 1, got %d", val)
- }
- result = sm.Go(ts.service)
- if result {
- t.Errorf("want false, got true")
- }
- result = sm.Stop()
- if !result {
- t.Errorf("want true, got false")
- }
- if val := ts.activated.Get(); val != 0 {
- t.Errorf("want 0, got %d", val)
- }
- result = sm.Stop()
- if result {
- t.Errorf("want false, got true")
- }
- sm.state.Set(SERVICE_SHUTTING_DOWN)
- if sm.StateName() != "ShuttingDown" {
- t.Errorf("want ShuttingDown, got %s", sm.StateName())
- }
-}
-
-func TestServiceManagerSelect(t *testing.T) {
- ts := &testService{t: t}
- var sm ServiceManager
- if sm.StateName() != "Stopped" {
- t.Errorf("want Stopped, got %s", sm.StateName())
- }
- result := sm.Go(ts.selectService)
- if !result {
- t.Errorf("want true, got false")
- }
- if sm.StateName() != "Running" {
- t.Errorf("want Running, got %s", sm.StateName())
- }
- time.Sleep(5 * time.Millisecond)
- if val := ts.activated.Get(); val != 1 {
- t.Errorf("want 1, got %d", val)
- }
- result = sm.Go(ts.service)
- if result {
- t.Errorf("want false, got true")
- }
- result = sm.Stop()
- if !result {
- t.Errorf("want true, got false")
- }
- if val := ts.activated.Get(); val != 0 {
- t.Errorf("want 0, got %d", val)
- }
- result = sm.Stop()
- if result {
- t.Errorf("want false, got true")
- }
- sm.state.Set(SERVICE_SHUTTING_DOWN)
- if sm.StateName() != "ShuttingDown" {
- t.Errorf("want ShuttingDown, got %s", sm.StateName())
- }
-}
-
-func TestServiceManagerWaitNotRunning(t *testing.T) {
- done := make(chan struct{})
- var sm ServiceManager
- go func() {
- sm.Wait()
- close(done)
- }()
- select {
- case <-done:
- case <-time.After(1 * time.Second):
- t.Errorf("Wait() blocked even though service wasn't running.")
- }
-}
-
-func TestServiceManagerWait(t *testing.T) {
- done := make(chan struct{})
- stop := make(chan struct{})
- var sm ServiceManager
- sm.Go(func(*ServiceContext) error {
- <-stop
- return nil
- })
- go func() {
- sm.Wait()
- close(done)
- }()
- time.Sleep(100 * time.Millisecond)
- select {
- case <-done:
- t.Errorf("Wait() didn't block while service was still running.")
- default:
- }
- close(stop)
- select {
- case <-done:
- case <-time.After(100 * time.Millisecond):
- t.Errorf("Wait() didn't unblock when service stopped.")
- }
-}
-
-func TestServiceManagerJoin(t *testing.T) {
- want := "error 123"
- var sm ServiceManager
- sm.Go(func(*ServiceContext) error {
- return fmt.Errorf("error 123")
- })
- if got := sm.Join().Error(); got != want {
- t.Errorf("Join().Error() = %#v, want %#v", got, want)
- }
-}
diff --git a/vendor/github.com/ngaut/systimemon/LICENSE b/vendor/github.com/ngaut/systimemon/LICENSE
deleted file mode 100644
index 8dada3e..0000000
--- a/vendor/github.com/ngaut/systimemon/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/ngaut/systimemon/README.md b/vendor/github.com/ngaut/systimemon/README.md
deleted file mode 100644
index 39e3630..0000000
--- a/vendor/github.com/ngaut/systimemon/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-# systimemon
-System time monitor
diff --git a/vendor/github.com/ngaut/systimemon/systime_mon.go b/vendor/github.com/ngaut/systimemon/systime_mon.go
deleted file mode 100644
index 0d3defe..0000000
--- a/vendor/github.com/ngaut/systimemon/systime_mon.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package systimemon
-
-import (
- "time"
-
- "github.com/ngaut/log"
-)
-
-// StartMonitor will call systimeErrHandler if system time jump backward.
-func StartMonitor(now func() time.Time, systimeErrHandler func()) {
- log.Info("start system time monitor")
- tick := time.NewTicker(100 * time.Millisecond)
- defer tick.Stop()
- for {
- last := now()
- select {
- case <-tick.C:
- if now().Sub(last) < 0 {
- log.Errorf("system time jump backward, last:%v", last)
- systimeErrHandler()
- }
- }
- }
-}
diff --git a/vendor/github.com/ngaut/systimemon/systime_mon_test.go b/vendor/github.com/ngaut/systimemon/systime_mon_test.go
deleted file mode 100644
index 3f6dc1b..0000000
--- a/vendor/github.com/ngaut/systimemon/systime_mon_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package systimemon
-
-import (
- "testing"
- "time"
-)
-
-func TestSystimeMonitor(t *testing.T) {
- jumpForward := false
- trigged := false
- go StartMonitor(
- func() time.Time {
- if !trigged {
- trigged = true
- return time.Now()
- }
-
- return time.Now().Add(-2 * time.Second)
- }, func() {
- jumpForward = true
- })
-
- time.Sleep(1 * time.Second)
-
- if !jumpForward {
- t.Error("should detect time error")
- }
-}
diff --git a/vendor/github.com/openark/golib/LICENSE b/vendor/github.com/openark/golib/LICENSE
new file mode 100644
index 0000000..6875dca
--- /dev/null
+++ b/vendor/github.com/openark/golib/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction,
+and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by
+the copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all
+other entities that control, are controlled by, or are under common
+control with that entity. For the purposes of this definition,
+"control" means (i) the power, direct or indirect, to cause the
+direction or management of such entity, whether by contract or
+otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity
+exercising permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation
+source, and configuration files.
+
+"Object" form shall mean any form resulting from mechanical
+transformation or translation of a Source form, including but
+not limited to compiled object code, generated documentation,
+and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or
+Object form, made available under the License, as indicated by a
+copyright notice that is included in or attached to the work
+(an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object
+form, that is based on (or derived from) the Work and for which the
+editorial revisions, annotations, elaborations, or other modifications
+represent, as a whole, an original work of authorship. For the purposes
+of this License, Derivative Works shall not include works that remain
+separable from, or merely link (or bind by name) to the interfaces of,
+the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including
+the original version of the Work and any modifications or additions
+to that Work or Derivative Works thereof, that is intentionally
+submitted to Licensor for inclusion in the Work by the copyright owner
+or by an individual or Legal Entity authorized to submit on behalf of
+the copyright owner. For the purposes of this definition, "submitted"
+means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems,
+and issue tracking systems that are managed by, or on behalf of, the
+Licensor for the purpose of discussing and improving the Work, but
+excluding communication that is conspicuously marked or otherwise
+designated in writing by the copyright owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity
+on behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+this License, each Contributor hereby grants to You a perpetual,
+worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the
+Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+this License, each Contributor hereby grants to You a perpetual,
+worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+(except as stated in this section) patent license to make, have made,
+use, offer to sell, sell, import, and otherwise transfer the Work,
+where such license applies only to those patent claims licensable
+by such Contributor that are necessarily infringed by their
+Contribution(s) alone or by combination of their Contribution(s)
+with the Work to which such Contribution(s) was submitted. If You
+institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work
+or a Contribution incorporated within the Work constitutes direct
+or contributory patent infringement, then any patent licenses
+granted to You under this License for that Work shall terminate
+as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+Work or Derivative Works thereof in any medium, with or without
+modifications, and in Source or Object form, provided that You
+meet the following conditions:
+
+(a) You must give any other recipients of the Work or
+Derivative Works a copy of this License; and
+
+(b) You must cause any modified files to carry prominent notices
+stating that You changed the files; and
+
+(c) You must retain, in the Source form of any Derivative Works
+that You distribute, all copyright, patent, trademark, and
+attribution notices from the Source form of the Work,
+excluding those notices that do not pertain to any part of
+the Derivative Works; and
+
+(d) If the Work includes a "NOTICE" text file as part of its
+distribution, then any Derivative Works that You distribute must
+include a readable copy of the attribution notices contained
+within such NOTICE file, excluding those notices that do not
+pertain to any part of the Derivative Works, in at least one
+of the following places: within a NOTICE text file distributed
+as part of the Derivative Works; within the Source form or
+documentation, if provided along with the Derivative Works; or,
+within a display generated by the Derivative Works, if and
+wherever such third-party notices normally appear. The contents
+of the NOTICE file are for informational purposes only and
+do not modify the License. You may add Your own attribution
+notices within Derivative Works that You distribute, alongside
+or as an addendum to the NOTICE text from the Work, provided
+that such additional attribution notices cannot be construed
+as modifying the License.
+
+You may add Your own copyright statement to Your modifications and
+may provide additional or different license terms and conditions
+for use, reproduction, or distribution of Your modifications, or
+for any such Derivative Works as a whole, provided Your use,
+reproduction, and distribution of the Work otherwise complies with
+the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+any Contribution intentionally submitted for inclusion in the Work
+by You to the Licensor shall be under the terms and conditions of
+this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify
+the terms of any separate license agreement you may have executed
+with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+names, trademarks, service marks, or product names of the Licensor,
+except as required for reasonable and customary use in describing the
+origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+agreed to in writing, Licensor provides the Work (and each
+Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied, including, without limitation, any warranties or conditions
+of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+PARTICULAR PURPOSE. You are solely responsible for determining the
+appropriateness of using or redistributing the Work and assume any
+risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+whether in tort (including negligence), contract, or otherwise,
+unless required by applicable law (such as deliberate and grossly
+negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special,
+incidental, or consequential damages of any character arising as a
+result of this License or out of the use or inability to use the
+Work (including but not limited to damages for loss of goodwill,
+work stoppage, computer failure or malfunction, or any and all
+other commercial damages or losses), even if such Contributor
+has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+the Work or Derivative Works thereof, You may choose to offer,
+and charge a fee for, acceptance of support, warranty, indemnity,
+or other liability obligations and/or rights consistent with this
+License. However, in accepting such obligations, You may act only
+on Your own behalf and on Your sole responsibility, not on behalf
+of any other Contributor, and only if You agree to indemnify,
+defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason
+of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+To apply the Apache License to your work, attach the following
+boilerplate notice, with the fields enclosed by brackets "{}"
+replaced with your own identifying information. (Don't include
+the brackets!) The text should be enclosed in the appropriate
+comment syntax for the file format. We also recommend that a
+file or class name and description of purpose be included on the
+same "printed page" as the copyright notice for easier
+identification within third-party archives.
+
+Copyright 2014 Outbrain Inc
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/outbrain/golib/log/log.go b/vendor/github.com/openark/golib/log/log.go
similarity index 93%
rename from vendor/github.com/outbrain/golib/log/log.go
rename to vendor/github.com/openark/golib/log/log.go
index d3e9c59..26d33c9 100644
--- a/vendor/github.com/outbrain/golib/log/log.go
+++ b/vendor/github.com/openark/golib/log/log.go
@@ -126,8 +126,18 @@ func logFormattedEntry(logLevel LogLevel, message string, args ...interface{}) s
if logLevel > globalLogLevel {
return ""
}
+ // if TZ env variable is set, update the timestamp timezone
+ localizedTime := time.Now()
+ tzLocation := os.Getenv("TZ")
+ if tzLocation != "" {
+ location, err := time.LoadLocation(tzLocation)
+ if err == nil { // if invalid tz location was provided, just leave it as the default
+ localizedTime = time.Now().In(location)
+ }
+ }
+
msgArgs := fmt.Sprintf(message, args...)
- entryString := fmt.Sprintf("%s %s %s", time.Now().Format(TimeFormat), logLevel, msgArgs)
+ entryString := fmt.Sprintf("%s %s %s", localizedTime.Format(TimeFormat), logLevel, msgArgs)
fmt.Fprintln(os.Stderr, entryString)
if syslogWriter != nil {
diff --git a/vendor/github.com/openark/golib/sqlutils/dialect.go b/vendor/github.com/openark/golib/sqlutils/dialect.go
new file mode 100644
index 0000000..19cb55d
--- /dev/null
+++ b/vendor/github.com/openark/golib/sqlutils/dialect.go
@@ -0,0 +1,49 @@
+/*
+ Copyright 2017 GitHub Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package sqlutils
+
+import (
+ "regexp"
+ "strings"
+)
+
+type regexpMap struct {
+ r *regexp.Regexp
+ replacement string
+}
+
+func (this *regexpMap) process(text string) (result string) {
+ return this.r.ReplaceAllString(text, this.replacement)
+}
+
+func rmap(regexpExpression string, replacement string) regexpMap {
+ return regexpMap{
+ r: regexp.MustCompile(regexpSpaces(regexpExpression)),
+ replacement: replacement,
+ }
+}
+
+func regexpSpaces(statement string) string {
+ return strings.Replace(statement, " ", `[\s]+`, -1)
+}
+
+func applyConversions(statement string, conversions []regexpMap) string {
+ for _, rmap := range conversions {
+ statement = rmap.process(statement)
+ }
+ return statement
+}
diff --git a/vendor/github.com/openark/golib/sqlutils/sqlite_dialect.go b/vendor/github.com/openark/golib/sqlutils/sqlite_dialect.go
new file mode 100644
index 0000000..5937aa4
--- /dev/null
+++ b/vendor/github.com/openark/golib/sqlutils/sqlite_dialect.go
@@ -0,0 +1,130 @@
+/*
+ Copyright 2017 GitHub Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// What's this about?
+// This is a brute-force regular-expression based conversion from MySQL syntax to sqlite3 syntax.
+// It is NOT meant to be a general purpose solution and is only expected & confirmed to run on
+// queries issued by orchestrator. There are known limitations to this design.
+// It's not even pretty.
+// In fact...
+// Well, it gets the job done at this time. Call it debt.
+
+package sqlutils
+
+import (
+ "regexp"
+)
+
+var sqlite3CreateTableConversions = []regexpMap{
+ rmap(`(?i) (character set|charset) [\S]+`, ``),
+ rmap(`(?i)int unsigned`, `int`),
+ rmap(`(?i)int[\s]*[(][\s]*([0-9]+)[\s]*[)] unsigned`, `int`),
+ rmap(`(?i)engine[\s]*=[\s]*(innodb|myisam|ndb|memory|tokudb)`, ``),
+ rmap(`(?i)DEFAULT CHARSET[\s]*=[\s]*[\S]+`, ``),
+ rmap(`(?i)[\S]*int( not null|) auto_increment`, `integer`),
+ rmap(`(?i)comment '[^']*'`, ``),
+ rmap(`(?i)after [\S]+`, ``),
+ rmap(`(?i)alter table ([\S]+) add (index|key) ([\S]+) (.+)`, `create index ${3}_${1} on $1 $4`),
+ rmap(`(?i)alter table ([\S]+) add unique (index|key) ([\S]+) (.+)`, `create unique index ${3}_${1} on $1 $4`),
+ rmap(`(?i)([\S]+) enum[\s]*([(].*?[)])`, `$1 text check($1 in $2)`),
+ rmap(`(?i)([\s\S]+[/][*] sqlite3-skip [*][/][\s\S]+)`, ``),
+ rmap(`(?i)timestamp default current_timestamp`, `timestamp default ('')`),
+ rmap(`(?i)timestamp not null default current_timestamp`, `timestamp not null default ('')`),
+
+ rmap(`(?i)add column (.*int) not null[\s]*$`, `add column $1 not null default 0`),
+ rmap(`(?i)add column (.* text) not null[\s]*$`, `add column $1 not null default ''`),
+ rmap(`(?i)add column (.* varchar.*) not null[\s]*$`, `add column $1 not null default ''`),
+}
+
+var sqlite3InsertConversions = []regexpMap{
+ rmap(`(?i)insert ignore ([\s\S]+) on duplicate key update [\s\S]+`, `insert or ignore $1`),
+ rmap(`(?i)insert ignore`, `insert or ignore`),
+ rmap(`(?i)now[(][)]`, `datetime('now')`),
+ rmap(`(?i)insert into ([\s\S]+) on duplicate key update [\s\S]+`, `replace into $1`),
+}
+
+var sqlite3GeneralConversions = []regexpMap{
+ rmap(`(?i)now[(][)][\s]*[-][\s]*interval [?] ([\w]+)`, `datetime('now', printf('-%d $1', ?))`),
+ rmap(`(?i)now[(][)][\s]*[+][\s]*interval [?] ([\w]+)`, `datetime('now', printf('+%d $1', ?))`),
+ rmap(`(?i)now[(][)][\s]*[-][\s]*interval ([0-9.]+) ([\w]+)`, `datetime('now', '-${1} $2')`),
+ rmap(`(?i)now[(][)][\s]*[+][\s]*interval ([0-9.]+) ([\w]+)`, `datetime('now', '+${1} $2')`),
+
+ rmap(`(?i)[=<>\s]([\S]+[.][\S]+)[\s]*[-][\s]*interval [?] ([\w]+)`, ` datetime($1, printf('-%d $2', ?))`),
+ rmap(`(?i)[=<>\s]([\S]+[.][\S]+)[\s]*[+][\s]*interval [?] ([\w]+)`, ` datetime($1, printf('+%d $2', ?))`),
+
+ rmap(`(?i)unix_timestamp[(][)]`, `strftime('%s', 'now')`),
+ rmap(`(?i)unix_timestamp[(]([^)]+)[)]`, `strftime('%s', $1)`),
+ rmap(`(?i)now[(][)]`, `datetime('now')`),
+ rmap(`(?i)cast[(][\s]*([\S]+) as signed[\s]*[)]`, `cast($1 as integer)`),
+
+ rmap(`(?i)\bconcat[(][\s]*([^,)]+)[\s]*,[\s]*([^,)]+)[\s]*[)]`, `($1 || $2)`),
+ rmap(`(?i)\bconcat[(][\s]*([^,)]+)[\s]*,[\s]*([^,)]+)[\s]*,[\s]*([^,)]+)[\s]*[)]`, `($1 || $2 || $3)`),
+
+ rmap(`(?i) rlike `, ` like `),
+
+ rmap(`(?i)create index([\s\S]+)[(][\s]*[0-9]+[\s]*[)]([\s\S]+)`, `create index ${1}${2}`),
+ rmap(`(?i)drop index ([\S]+) on ([\S]+)`, `drop index if exists $1`),
+}
+
+var (
+ sqlite3IdentifyCreateTableStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*create table`))
+ sqlite3IdentifyCreateIndexStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*create( unique|) index`))
+ sqlite3IdentifyDropIndexStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*drop index`))
+ sqlite3IdentifyAlterTableStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*alter table`))
+ sqlite3IdentifyInsertStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*(insert|replace)`))
+)
+
+func IsInsert(statement string) bool {
+ return sqlite3IdentifyInsertStatement.MatchString(statement)
+}
+
+func IsCreateTable(statement string) bool {
+ return sqlite3IdentifyCreateTableStatement.MatchString(statement)
+}
+
+func IsCreateIndex(statement string) bool {
+ return sqlite3IdentifyCreateIndexStatement.MatchString(statement)
+}
+
+func IsDropIndex(statement string) bool {
+ return sqlite3IdentifyDropIndexStatement.MatchString(statement)
+}
+
+func IsAlterTable(statement string) bool {
+ return sqlite3IdentifyAlterTableStatement.MatchString(statement)
+}
+
+func ToSqlite3CreateTable(statement string) string {
+ return applyConversions(statement, sqlite3CreateTableConversions)
+}
+
+func ToSqlite3Insert(statement string) string {
+ return applyConversions(statement, sqlite3InsertConversions)
+}
+
+func ToSqlite3Dialect(statement string) (translated string) {
+ if IsCreateTable(statement) {
+ return ToSqlite3CreateTable(statement)
+ }
+ if IsAlterTable(statement) {
+ return ToSqlite3CreateTable(statement)
+ }
+ statement = applyConversions(statement, sqlite3GeneralConversions)
+ if IsInsert(statement) {
+ return ToSqlite3Insert(statement)
+ }
+ return statement
+}
diff --git a/vendor/github.com/outbrain/golib/sqlutils/sqlutils.go b/vendor/github.com/openark/golib/sqlutils/sqlutils.go
similarity index 71%
rename from vendor/github.com/outbrain/golib/sqlutils/sqlutils.go
rename to vendor/github.com/openark/golib/sqlutils/sqlutils.go
index 8d98690..0a2eda2 100644
--- a/vendor/github.com/outbrain/golib/sqlutils/sqlutils.go
+++ b/vendor/github.com/openark/golib/sqlutils/sqlutils.go
@@ -21,13 +21,16 @@ import (
"encoding/json"
"errors"
"fmt"
- _ "github.com/go-sql-driver/mysql"
- "github.com/outbrain/golib/log"
"strconv"
"strings"
"sync"
+ "time"
+
+ "github.com/openark/golib/log"
)
+const DateTimeFormat = "2006-01-02 15:04:05.999999"
+
// RowMap represents one row in a result set. Its objective is to allow
// for easy, typed getters by column name.
type RowMap map[string]CellData
@@ -43,6 +46,18 @@ func (this *CellData) MarshalJSON() ([]byte, error) {
}
}
+// UnmarshalJSON reds this object from JSON
+func (this *CellData) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ (*this).String = s
+ (*this).Valid = true
+
+ return nil
+}
+
func (this *CellData) NullString() *sql.NullString {
return (*sql.NullString)(this)
}
@@ -60,8 +75,20 @@ func (this *RowData) MarshalJSON() ([]byte, error) {
return json.Marshal(cells)
}
+func (this *RowData) Args() []interface{} {
+ result := make([]interface{}, len(*this))
+ for i := range *this {
+ result[i] = (*(*this)[i].NullString())
+ }
+ return result
+}
+
// ResultData is an ordered row set of RowData
type ResultData []RowData
+type NamedResultData struct {
+ Columns []string
+ Data ResultData
+}
var EmptyResultData = ResultData{}
@@ -105,7 +132,7 @@ func (this *RowMap) GetIntD(key string, def int) int {
}
func (this *RowMap) GetUint(key string) uint {
- res, _ := strconv.Atoi(this.GetString(key))
+ res, _ := strconv.ParseUint(this.GetString(key), 10, 0)
return uint(res)
}
@@ -117,31 +144,63 @@ func (this *RowMap) GetUintD(key string, def uint) uint {
return uint(res)
}
+func (this *RowMap) GetUint64(key string) uint64 {
+ res, _ := strconv.ParseUint(this.GetString(key), 10, 0)
+ return res
+}
+
+func (this *RowMap) GetUint64D(key string, def uint64) uint64 {
+ res, err := strconv.ParseUint(this.GetString(key), 10, 0)
+ if err != nil {
+ return def
+ }
+ return uint64(res)
+}
+
func (this *RowMap) GetBool(key string) bool {
return this.GetInt(key) != 0
}
+func (this *RowMap) GetTime(key string) time.Time {
+ if t, err := time.Parse(DateTimeFormat, this.GetString(key)); err == nil {
+ return t
+ }
+ return time.Time{}
+}
+
// knownDBs is a DB cache by uri
var knownDBs map[string]*sql.DB = make(map[string]*sql.DB)
var knownDBsMutex = &sync.Mutex{}
// GetDB returns a DB instance based on uri.
// bool result indicates whether the DB was returned from cache; err
-func GetDB(mysql_uri string) (*sql.DB, bool, error) {
+func GetGenericDB(driverName, dataSourceName string) (*sql.DB, bool, error) {
knownDBsMutex.Lock()
defer func() {
knownDBsMutex.Unlock()
}()
var exists bool
- if _, exists = knownDBs[mysql_uri]; !exists {
- if db, err := sql.Open("mysql", mysql_uri); err == nil {
- knownDBs[mysql_uri] = db
+ if _, exists = knownDBs[dataSourceName]; !exists {
+ if db, err := sql.Open(driverName, dataSourceName); err == nil {
+ knownDBs[dataSourceName] = db
} else {
return db, exists, err
}
}
- return knownDBs[mysql_uri], exists, nil
+ return knownDBs[dataSourceName], exists, nil
+}
+
+// GetDB returns a MySQL DB instance based on uri.
+// bool result indicates whether the DB was returned from cache; err
+func GetDB(mysql_uri string) (*sql.DB, bool, error) {
+ return GetGenericDB("mysql", mysql_uri)
+}
+
+// GetDB returns a SQLite DB instance based on DB file name.
+// bool result indicates whether the DB was returned from cache; err
+func GetSQLiteDB(dbFile string) (*sql.DB, bool, error) {
+ return GetGenericDB("sqlite3", dbFile)
}
// RowToArray is a convenience function, typically not called directly, which maps a
@@ -195,43 +254,44 @@ func ScanRowsToMaps(rows *sql.Rows, on_row func(RowMap) error) error {
// QueryRowsMap is a convenience function allowing querying a result set while poviding a callback
// function activated per read row.
-func QueryRowsMap(db *sql.DB, query string, on_row func(RowMap) error, args ...interface{}) error {
- var err error
+func QueryRowsMap(db *sql.DB, query string, on_row func(RowMap) error, args ...interface{}) (err error) {
defer func() {
if derr := recover(); derr != nil {
- err = errors.New(fmt.Sprintf("QueryRowsMap unexpected error: %+v", derr))
+ err = fmt.Errorf("QueryRowsMap unexpected error: %+v", derr)
}
}()
- rows, err := db.Query(query, args...)
- defer rows.Close()
+ var rows *sql.Rows
+ rows, err = db.Query(query, args...)
+ if rows != nil {
+ defer rows.Close()
+ }
if err != nil && err != sql.ErrNoRows {
return log.Errore(err)
}
err = ScanRowsToMaps(rows, on_row)
- return err
+ return
}
// queryResultData returns a raw array of rows for a given query, optionally reading and returning column names
-func queryResultData(db *sql.DB, query string, retrieveColumns bool, args ...interface{}) (ResultData, []string, error) {
- var err error
+func queryResultData(db *sql.DB, query string, retrieveColumns bool, args ...interface{}) (resultData ResultData, columns []string, err error) {
defer func() {
if derr := recover(); derr != nil {
err = errors.New(fmt.Sprintf("QueryRowsMap unexpected error: %+v", derr))
}
}()
- columns := []string{}
- rows, err := db.Query(query, args...)
+ var rows *sql.Rows
+ rows, err = db.Query(query, args...)
defer rows.Close()
if err != nil && err != sql.ErrNoRows {
- return EmptyResultData, columns, log.Errore(err)
+ return EmptyResultData, columns, err
}
if retrieveColumns {
// Don't pay if you don't want to
columns, _ = rows.Columns()
}
- resultData := ResultData{}
+ resultData = ResultData{}
err = ScanRowsToArrays(rows, func(rowData []CellData) error {
resultData = append(resultData, rowData)
return nil
@@ -246,8 +306,9 @@ func QueryResultData(db *sql.DB, query string, args ...interface{}) (ResultData,
}
// QueryResultDataNamed returns a raw array of rows, with column names
-func QueryResultDataNamed(db *sql.DB, query string, args ...interface{}) (ResultData, []string, error) {
- return queryResultData(db, query, true, args...)
+func QueryNamedResultData(db *sql.DB, query string, args ...interface{}) (NamedResultData, error) {
+ resultData, columns, err := queryResultData(db, query, true, args...)
+ return NamedResultData{Columns: columns, Data: resultData}, err
}
// QueryRowsMapBuffered reads data from the database into a buffer, and only then applies the given function per row.
@@ -269,15 +330,13 @@ func QueryRowsMapBuffered(db *sql.DB, query string, on_row func(RowMap) error, a
}
// ExecNoPrepare executes given query using given args on given DB, without using prepared statements.
-func ExecNoPrepare(db *sql.DB, query string, args ...interface{}) (sql.Result, error) {
- var err error
+func ExecNoPrepare(db *sql.DB, query string, args ...interface{}) (res sql.Result, err error) {
defer func() {
if derr := recover(); derr != nil {
err = errors.New(fmt.Sprintf("ExecNoPrepare unexpected error: %+v", derr))
}
}()
- var res sql.Result
res, err = db.Exec(query, args...)
if err != nil {
log.Errore(err)
@@ -287,20 +346,18 @@ func ExecNoPrepare(db *sql.DB, query string, args ...interface{}) (sql.Result, e
// ExecQuery executes given query using given args on given DB. It will safele prepare, execute and close
// the statement.
-func execInternal(silent bool, db *sql.DB, query string, args ...interface{}) (sql.Result, error) {
- var err error
+func execInternal(silent bool, db *sql.DB, query string, args ...interface{}) (res sql.Result, err error) {
defer func() {
if derr := recover(); derr != nil {
err = errors.New(fmt.Sprintf("execInternal unexpected error: %+v", derr))
}
}()
-
- stmt, err := db.Prepare(query)
+ var stmt *sql.Stmt
+ stmt, err = db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
- var res sql.Result
res, err = stmt.Exec(args...)
if err != nil && !silent {
log.Errore(err)
@@ -331,3 +388,40 @@ func InClauseStringValues(terms []string) string {
func Args(args ...interface{}) []interface{} {
return args
}
+
+func NilIfZero(i int64) interface{} {
+ if i == 0 {
+ return nil
+ }
+ return i
+}
+
+func ScanTable(db *sql.DB, tableName string) (NamedResultData, error) {
+ query := fmt.Sprintf("select * from %s", tableName)
+ return QueryNamedResultData(db, query)
+}
+
+func WriteTable(db *sql.DB, tableName string, data NamedResultData) (err error) {
+ if len(data.Data) == 0 {
+ return nil
+ }
+ if len(data.Columns) == 0 {
+ return nil
+ }
+ placeholders := make([]string, len(data.Columns))
+ for i := range placeholders {
+ placeholders[i] = "?"
+ }
+ query := fmt.Sprintf(
+ `replace into %s (%s) values (%s)`,
+ tableName,
+ strings.Join(data.Columns, ","),
+ strings.Join(placeholders, ","),
+ )
+ for _, rowData := range data.Data {
+ if _, execErr := db.Exec(query, rowData.Args()...); execErr != nil {
+ err = execErr
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/outbrain/golib/tests/spec.go b/vendor/github.com/openark/golib/tests/spec.go
similarity index 96%
rename from vendor/github.com/outbrain/golib/tests/spec.go
rename to vendor/github.com/openark/golib/tests/spec.go
index 27b15b6..a52c729 100644
--- a/vendor/github.com/outbrain/golib/tests/spec.go
+++ b/vendor/github.com/openark/golib/tests/spec.go
@@ -35,7 +35,7 @@ func (spec *Spec) ExpectEquals(actual, value interface{}) {
if actual == value {
return
}
- spec.t.Errorf("Expected %+v, got %+v", value, actual)
+ spec.t.Errorf("Expected:\n[[[%+v]]]\n- got:\n[[[%+v]]]", value, actual)
}
// ExpectNotEquals expects given values to be nonequal (comparison via `==`), or errors
diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/pingcap/errors/.gitignore
similarity index 92%
rename from vendor/github.com/jmoiron/sqlx/.gitignore
rename to vendor/github.com/pingcap/errors/.gitignore
index 529841c..0c9e329 100644
--- a/vendor/github.com/jmoiron/sqlx/.gitignore
+++ b/vendor/github.com/pingcap/errors/.gitignore
@@ -6,6 +6,7 @@
# Folders
_obj
_test
+.idea
# Architecture specific extensions/prefixes
*.[568vq]
@@ -20,5 +21,5 @@ _cgo_export.*
_testmain.go
*.exe
-tags
-environ
+*.test
+*.prof
diff --git a/vendor/github.com/pingcap/errors/.travis.yml b/vendor/github.com/pingcap/errors/.travis.yml
new file mode 100644
index 0000000..15e5a19
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.4.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - tip
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/pingcap/errors/LICENSE b/vendor/github.com/pingcap/errors/LICENSE
new file mode 100644
index 0000000..835ba3e
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pingcap/errors/README.md b/vendor/github.com/pingcap/errors/README.md
new file mode 100644
index 0000000..6483ba2
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/README.md
@@ -0,0 +1,52 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Contributing
+
+We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+
+Before proposing a change, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pingcap/errors/appveyor.yml b/vendor/github.com/pingcap/errors/appveyor.yml
new file mode 100644
index 0000000..a932ead
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pingcap/errors/compatible_shim.go b/vendor/github.com/pingcap/errors/compatible_shim.go
new file mode 100644
index 0000000..ba446e0
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/compatible_shim.go
@@ -0,0 +1,99 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "encoding/json"
+ "strconv"
+ "strings"
+)
+
+// class2RFCCode is used for compatible with old version of TiDB. When
+// marshal Error to json, old version of TiDB contain a 'class' field
+// which is represented for error class. In order to parse and convert
+// json to errors.Error, using this map to convert error class to RFC
+// error code text. here is reference:
+// https://github.com/pingcap/parser/blob/release-3.0/terror/terror.go#L58
+var class2RFCCode = map[int]string{
+ 1: "autoid",
+ 2: "ddl",
+ 3: "domain",
+ 4: "evaluator",
+ 5: "executor",
+ 6: "expression",
+ 7: "admin",
+ 8: "kv",
+ 9: "meta",
+ 10: "planner",
+ 11: "parser",
+ 12: "perfschema",
+ 13: "privilege",
+ 14: "schema",
+ 15: "server",
+ 16: "struct",
+ 17: "variable",
+ 18: "xeval",
+ 19: "table",
+ 20: "types",
+ 21: "global",
+ 22: "mocktikv",
+ 23: "json",
+ 24: "tikv",
+ 25: "session",
+ 26: "plugin",
+ 27: "util",
+}
+var rfcCode2class map[string]int
+
+func init() {
+ rfcCode2class = make(map[string]int)
+ for k, v := range class2RFCCode {
+ rfcCode2class[v] = k
+ }
+}
+
+
+// MarshalJSON implements json.Marshaler interface.
+// aware that this function cannot save a 'registered' status,
+// since we cannot access the registry when unmarshaling,
+// and the original global registry would be removed here.
+// This function is reserved for compatibility.
+func (e *Error) MarshalJSON() ([]byte, error) {
+ ec := strings.Split(string(e.codeText), ":")[0]
+ return json.Marshal(&jsonError{
+ Class: rfcCode2class[ec],
+ Code: int(e.code),
+ Msg: e.GetMsg(),
+ RFCCode: string(e.codeText),
+ })
+}
+
+// UnmarshalJSON implements json.Unmarshaler interface.
+// aware that this function cannot create a 'registered' error,
+// since we cannot access the registry in this context,
+// and the original global registry is removed.
+// This function is reserved for compatibility.
+func (e *Error) UnmarshalJSON(data []byte) error {
+ tErr := &jsonError{}
+ if err := json.Unmarshal(data, &tErr); err != nil {
+ return Trace(err)
+ }
+ e.codeText = ErrCodeText(tErr.RFCCode)
+ if tErr.RFCCode == "" && tErr.Class > 0 {
+ e.codeText = ErrCodeText(class2RFCCode[tErr.Class] + ":" + strconv.Itoa(tErr.Code))
+ }
+ e.code = ErrCode(tErr.Code)
+ e.message = tErr.Msg
+ return nil
+}
diff --git a/vendor/github.com/pingcap/errors/errors.go b/vendor/github.com/pingcap/errors/errors.go
new file mode 100644
index 0000000..2e1d3f6
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/errors.go
@@ -0,0 +1,324 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Annotate function returns a new error that adds context to the
+// original error by recording a stack trace at the point Annotate is called,
+// and the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Annotate(err, "read failed")
+// }
+//
+// If additional control is required the errors.AddStack and errors.WithMessage
+// functions destructure errors.Annotate into its component operations of annotating
+// an error with a stack trace and an a message, respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Annotate constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Annotate to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error which does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// causer interface is not exported by this package, but is considered a part
+// of stable public API.
+// errors.Unwrap is also available: this will retrieve the next error in the chain.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Annotate, and Annotatef record a stack trace at the point they are invoked.
+// This information can be retrieved with the StackTracer interface that returns
+// a StackTrace. Where errors.StackTrace is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if stacked := errors.GetStackTracer(err); stacked != nil {
+// for _, f := range stacked.StackTrace() {
+// fmt.Printf("%+s:%d", f)
+// }
+// }
+//
+// See the documentation for Frame.Format for more details.
+//
+// errors.Find can be used to search for an error in the error chain.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// StackTraceAware is an optimization to avoid repetitive traversals of an error chain.
+// HasStack checks for this marker first.
+// Annotate/Wrap and Annotatef/Wrapf will produce this marker.
+type StackTraceAware interface {
+ HasStack() bool
+}
+
+// HasStack tells whether a StackTracer exists in the error chain
+func HasStack(err error) bool {
+ if errWithStack, ok := err.(StackTraceAware); ok {
+ return errWithStack.HasStack()
+ }
+ return GetStackTracer(err) != nil
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+//
+// For most use cases this is deprecated and AddStack should be used (which will ensure just one stack trace).
+// However, one may want to use this in some situations, for example to create a 2nd trace across a goroutine.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// AddStack is similar to WithStack.
+// However, it will first check with HasStack to see if a stack trace already exists in the causer chain before creating another one.
+func AddStack(err error) error {
+ if HasStack(err) {
+ return err
+ }
+ return WithStack(err)
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+//
+// For most use cases this is deprecated in favor of Annotate.
+// Annotate avoids creating duplicate stack traces.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ hasStack := HasStack(err)
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ causeHasStack: hasStack,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is call, and the format specifier.
+// If err is nil, Wrapf returns nil.
+//
+// For most use cases this is deprecated in favor of Annotatef.
+// Annotatef avoids creating duplicate stack traces.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ hasStack := HasStack(err)
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ causeHasStack: hasStack,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ causeHasStack: HasStack(err),
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+ causeHasStack bool
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+func (w *withMessage) HasStack() bool { return w.causeHasStack }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ cause := Unwrap(err)
+ if cause == nil {
+ return err
+ }
+ return Cause(cause)
+}
+
+// Unwrap uses causer to return the next error in the chain or nil.
+// This goes one-level deeper, whereas Cause goes as far as possible
+func Unwrap(err error) error {
+ type causer interface {
+ Cause() error
+ }
+ if unErr, ok := err.(causer); ok {
+ return unErr.Cause()
+ }
+ return nil
+}
+
+// Find an error in the chain that matches a test function.
+// returns nil if no error is found.
+func Find(origErr error, test func(error) bool) error {
+ var foundErr error
+ WalkDeep(origErr, func(err error) bool {
+ if test(err) {
+ foundErr = err
+ return true
+ }
+ return false
+ })
+ return foundErr
+}
diff --git a/vendor/github.com/pingcap/errors/group.go b/vendor/github.com/pingcap/errors/group.go
new file mode 100644
index 0000000..e5a969a
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/group.go
@@ -0,0 +1,42 @@
+package errors
+
+// ErrorGroup is an interface for multiple errors that are not a chain.
+// This happens for example when executing multiple operations in parallel.
+type ErrorGroup interface {
+ Errors() []error
+}
+
+// Errors uses the ErrorGroup interface to return a slice of errors.
+// If the ErrorGroup interface is not implemented it returns an array containing just the given error.
+func Errors(err error) []error {
+ if eg, ok := err.(ErrorGroup); ok {
+ return eg.Errors()
+ }
+ return []error{err}
+}
+
+// WalkDeep does a depth-first traversal of all errors.
+// Any ErrorGroup is traversed (after going deep).
+// The visitor function can return true to end the traversal early
+// In that case, WalkDeep will return true, otherwise false.
+func WalkDeep(err error, visitor func(err error) bool) bool {
+ // Go deep
+ unErr := err
+ for unErr != nil {
+ if done := visitor(unErr); done {
+ return true
+ }
+ unErr = Unwrap(unErr)
+ }
+
+ // Go wide
+ if group, ok := err.(ErrorGroup); ok {
+ for _, err := range group.Errors() {
+ if early := WalkDeep(err, visitor); early {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/pingcap/errors/juju_adaptor.go b/vendor/github.com/pingcap/errors/juju_adaptor.go
new file mode 100644
index 0000000..ece86cd
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/juju_adaptor.go
@@ -0,0 +1,145 @@
+package errors
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ==================== juju adaptor start ========================
+
+// Trace just calls AddStack.
+func Trace(err error) error {
+ if err == nil {
+ return nil
+ }
+ return AddStack(err)
+}
+
+// Annotate adds a message and ensures there is a stack trace.
+func Annotate(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ hasStack := HasStack(err)
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ causeHasStack: hasStack,
+ }
+ if hasStack {
+ return err
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Annotatef adds a message and ensures there is a stack trace.
+func Annotatef(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ hasStack := HasStack(err)
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ causeHasStack: hasStack,
+ }
+ if hasStack {
+ return err
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+var emptyStack stack
+
+// NewNoStackError creates error without error stack
+// later duplicate trace will no longer generate Stack too.
+func NewNoStackError(msg string) error {
+ return &fundamental{
+ msg: msg,
+ stack: &emptyStack,
+ }
+}
+
+// SuspendStack suspends stack generate for error.
+func SuspendStack(err error) error {
+ if err == nil {
+ return err
+ }
+ cleared := clearStack(err)
+ if cleared {
+ return err
+ }
+ return &withStack{
+ err,
+ &emptyStack,
+ }
+}
+
+func clearStack(err error) (cleared bool) {
+ switch typedErr := err.(type) {
+ case *withMessage:
+ return clearStack(typedErr.Cause())
+ case *fundamental:
+ typedErr.stack = &emptyStack
+ return true
+ case *withStack:
+ typedErr.stack = &emptyStack
+ clearStack(typedErr.Cause())
+ return true
+ default:
+ return false
+ }
+}
+
+// ErrorStack will format a stack trace if it is available, otherwise it will be Error()
+// If the error is nil, the empty string is returned
+// Note that this just calls fmt.Sprintf("%+v", err)
+func ErrorStack(err error) string {
+ if err == nil {
+ return ""
+ }
+ return fmt.Sprintf("%+v", err)
+}
+
+// IsNotFound reports whether err was not found error.
+func IsNotFound(err error) bool {
+ return strings.Contains(err.Error(), "not found")
+}
+
+// NotFoundf represents an error with not found message.
+func NotFoundf(format string, args ...interface{}) error {
+ return Errorf(format+" not found", args...)
+}
+
+// BadRequestf represents an error with bad request message.
+func BadRequestf(format string, args ...interface{}) error {
+ return Errorf(format+" bad request", args...)
+}
+
+// NotSupportedf represents an error with not supported message.
+func NotSupportedf(format string, args ...interface{}) error {
+ return Errorf(format+" not supported", args...)
+}
+
+// NotValidf represents an error with not valid message.
+func NotValidf(format string, args ...interface{}) error {
+ return Errorf(format+" not valid", args...)
+}
+
+// IsAlreadyExists reports whether err was already exists error.
+func IsAlreadyExists(err error) bool {
+ return strings.Contains(err.Error(), "already exists")
+}
+
+// AlreadyExistsf represents an error with already exists message.
+func AlreadyExistsf(format string, args ...interface{}) error {
+ return Errorf(format+" already exists", args...)
+}
+
+// ==================== juju adaptor end ========================
diff --git a/vendor/github.com/pingcap/errors/normalize.go b/vendor/github.com/pingcap/errors/normalize.go
new file mode 100644
index 0000000..5712fe7
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/normalize.go
@@ -0,0 +1,313 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "fmt"
+ "runtime"
+ "strconv"
+
+ "go.uber.org/atomic"
+)
+
+// RedactLogEnabled defines whether the arguments of Error need to be redacted.
+var RedactLogEnabled atomic.Bool
+
+// ErrCode represents a specific error type in a error class.
+// Same error code can be used in different error classes.
+type ErrCode int
+
+// ErrCodeText is a textual error code that represents a specific error type in a error class.
+type ErrCodeText string
+
+type ErrorID string
+type RFCErrorCode string
+
+// Error is the 'prototype' of a type of errors.
+// Use DefineError to make a *Error:
+// var ErrUnavailable = errors.Normalize("Region %d is unavailable", errors.RFCCodeText("Unavailable"))
+//
+// "throw" it at runtime:
+// func Somewhat() error {
+// ...
+// if err != nil {
+// // generate a stackful error use the message template at defining,
+// // also see FastGen(it's stackless), GenWithStack(it uses custom message template).
+// return ErrUnavailable.GenWithStackByArgs(region.ID)
+// }
+// }
+//
+// testing whether an error belongs to a prototype:
+// if ErrUnavailable.Equal(err) {
+// // handle this error.
+// }
+type Error struct {
+ code ErrCode
+ // codeText is the textual describe of the error code
+ codeText ErrCodeText
+ // message is a template of the description of this error.
+ // printf-style formatting is enabled.
+ message string
+ // redactArgsPos defines the positions of arguments in message that need to be redacted.
+ // And it is controlled by the global var RedactLogEnabled.
+ // For example, an original error is `Duplicate entry 'PRIMARY' for key 'key'`,
+ // when RedactLogEnabled is ON and redactArgsPos is [0, 1], the error is `Duplicate entry '?' for key '?'`.
+ redactArgsPos []int
+ // Cause is used to warp some third party error.
+ cause error
+ args []interface{}
+ file string
+ line int
+}
+
+// Code returns the numeric code of this error.
+// ID() will return textual error if there it is,
+// when you just want to get the purely numeric error
+// (e.g., for mysql protocol transmission.), this would be useful.
+func (e *Error) Code() ErrCode {
+ return e.code
+}
+
+// Code returns ErrorCode, by the RFC:
+//
+// The error code is a 3-tuple of abbreviated component name, error class and error code,
+// joined by a colon like {Component}:{ErrorClass}:{InnerErrorCode}.
+func (e *Error) RFCCode() RFCErrorCode {
+ return RFCErrorCode(e.ID())
+}
+
+// ID returns the ID of this error.
+func (e *Error) ID() ErrorID {
+ if e.codeText != "" {
+ return ErrorID(e.codeText)
+ }
+ return ErrorID(strconv.Itoa(int(e.code)))
+}
+
+// Location returns the location where the error is created,
+// implements juju/errors locationer interface.
+func (e *Error) Location() (file string, line int) {
+ return e.file, e.line
+}
+
+// MessageTemplate returns the error message template of this error.
+func (e *Error) MessageTemplate() string {
+ return e.message
+}
+
+// Error implements error interface.
+func (e *Error) Error() string {
+ if e == nil {
+ return ""
+ }
+ describe := e.codeText
+ if len(describe) == 0 {
+ describe = ErrCodeText(strconv.Itoa(int(e.code)))
+ }
+ return fmt.Sprintf("[%s]%s", e.RFCCode(), e.GetMsg())
+}
+
+func (e *Error) GetMsg() string {
+ if len(e.args) > 0 {
+ return fmt.Sprintf(e.message, e.args...)
+ }
+ return e.message
+}
+
+func (e *Error) fillLineAndFile(skip int) {
+ // skip this
+ _, file, line, ok := runtime.Caller(skip + 1)
+ if !ok {
+ e.file = ""
+ e.line = -1
+ return
+ }
+ e.file = file
+ e.line = line
+}
+
+// GenWithStack generates a new *Error with the same class and code, and a new formatted message.
+func (e *Error) GenWithStack(format string, args ...interface{}) error {
+ // TODO: RedactErrorArg
+ err := *e
+ err.message = format
+ err.args = args
+ err.fillLineAndFile(1)
+ return AddStack(&err)
+}
+
+// GenWithStackByArgs generates a new *Error with the same class and code, and new arguments.
+func (e *Error) GenWithStackByArgs(args ...interface{}) error {
+ RedactErrorArg(args, e.redactArgsPos)
+ err := *e
+ err.args = args
+ err.fillLineAndFile(1)
+ return AddStack(&err)
+}
+
+// FastGen generates a new *Error with the same class and code, and a new formatted message.
+// This will not call runtime.Caller to get file and line.
+func (e *Error) FastGen(format string, args ...interface{}) error {
+ // TODO: RedactErrorArg
+ err := *e
+ err.message = format
+ err.args = args
+ return SuspendStack(&err)
+}
+
+// FastGen generates a new *Error with the same class and code, and a new arguments.
+// This will not call runtime.Caller to get file and line.
+func (e *Error) FastGenByArgs(args ...interface{}) error {
+ RedactErrorArg(args, e.redactArgsPos)
+ err := *e
+ err.args = args
+ return SuspendStack(&err)
+}
+
+// Equal checks if err is equal to e.
+func (e *Error) Equal(err error) bool {
+ originErr := Cause(err)
+ if originErr == nil {
+ return false
+ }
+ if error(e) == originErr {
+ return true
+ }
+ inErr, ok := originErr.(*Error)
+ if !ok {
+ return false
+ }
+ idEquals := e.ID() == inErr.ID()
+ return idEquals
+}
+
+// NotEqual checks if err is not equal to e.
+func (e *Error) NotEqual(err error) bool {
+ return !e.Equal(err)
+}
+
+// RedactErrorArg redacts the args by position if RedactLogEnabled is enabled.
+func RedactErrorArg(args []interface{}, position []int) {
+ if RedactLogEnabled.Load() {
+ for _, pos := range position {
+ if len(args) > pos {
+ args[pos] = "?"
+ }
+ }
+ }
+}
+
+// ErrorEqual returns a boolean indicating whether err1 is equal to err2.
+func ErrorEqual(err1, err2 error) bool {
+ e1 := Cause(err1)
+ e2 := Cause(err2)
+
+ if e1 == e2 {
+ return true
+ }
+
+ if e1 == nil || e2 == nil {
+ return e1 == e2
+ }
+
+ te1, ok1 := e1.(*Error)
+ te2, ok2 := e2.(*Error)
+ if ok1 && ok2 {
+ return te1.Equal(te2)
+ }
+
+ return e1.Error() == e2.Error()
+}
+
+// ErrorNotEqual returns a boolean indicating whether err1 isn't equal to err2.
+func ErrorNotEqual(err1, err2 error) bool {
+ return !ErrorEqual(err1, err2)
+}
+
+type jsonError struct {
+ // Deprecated field, please use `RFCCode` instead.
+ Class int `json:"class"`
+ Code int `json:"code"`
+ Msg string `json:"message"`
+ RFCCode string `json:"rfccode"`
+}
+
+func (e *Error) Wrap(err error) *Error {
+ if err != nil {
+ newErr := *e
+ newErr.cause = err
+ return &newErr
+ }
+ return nil
+}
+
+func (e *Error) Cause() error {
+ root := Unwrap(e.cause)
+ if root == nil {
+ return e.cause
+ }
+ return root
+}
+
+func (e *Error) FastGenWithCause(args ...interface{}) error {
+ err := *e
+ if e.cause != nil {
+ err.message = e.cause.Error()
+ }
+ err.args = args
+ return SuspendStack(&err)
+}
+
+func (e *Error) GenWithStackByCause(args ...interface{}) error {
+ err := *e
+ if e.cause != nil {
+ err.message = e.cause.Error()
+ }
+ err.args = args
+ err.fillLineAndFile(1)
+ return AddStack(&err)
+}
+
+type NormalizeOption func(*Error)
+
+func RedactArgs(pos []int) NormalizeOption {
+ return func(e *Error) {
+ e.redactArgsPos = pos
+ }
+}
+
+// RFCCodeText returns a NormalizeOption to set RFC error code.
+func RFCCodeText(codeText string) NormalizeOption {
+ return func(e *Error) {
+ e.codeText = ErrCodeText(codeText)
+ }
+}
+
+// MySQLErrorCode returns a NormalizeOption to set error code.
+func MySQLErrorCode(code int) NormalizeOption {
+ return func(e *Error) {
+ e.code = ErrCode(code)
+ }
+}
+
+// Normalize creates a new Error object.
+func Normalize(message string, opts ...NormalizeOption) *Error {
+ e := &Error{
+ message: message,
+ }
+ for _, opt := range opts {
+ opt(e)
+ }
+ return e
+}
diff --git a/vendor/github.com/pingcap/errors/stack.go b/vendor/github.com/pingcap/errors/stack.go
new file mode 100644
index 0000000..bb1e6a8
--- /dev/null
+++ b/vendor/github.com/pingcap/errors/stack.go
@@ -0,0 +1,226 @@
+package errors
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// StackTracer retrieves the StackTrace
+// Generally you would want to use the GetStackTracer function to do that.
+type StackTracer interface {
+ StackTrace() StackTrace
+}
+
+// GetStackTracer will return the first StackTracer in the causer chain.
+// This function is used by AddStack to avoid creating redundant stack traces.
+//
+// You can also use the StackTracer interface on the returned error to get the stack trace.
+func GetStackTracer(origErr error) StackTracer {
+ var stacked StackTracer
+ WalkDeep(origErr, func(err error) bool {
+ if stackTracer, ok := err.(StackTracer); ok {
+ stacked = stackTracer
+ return true
+ }
+ return false
+ })
+ return stacked
+}
+
+// Frame represents a program counter inside a stack frame.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s function name and path of source file relative to the compile time
+// GOPATH separated by \n\t (\n\t)
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ f.format(s, s, verb)
+}
+
+// format allows stack trace printing calls to be made with a bytes.Buffer.
+func (f Frame) format(w io.Writer, s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ pc := f.pc()
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ io.WriteString(w, "unknown")
+ } else {
+ file, _ := fn.FileLine(pc)
+ io.WriteString(w, fn.Name())
+ io.WriteString(w, "\n\t")
+ io.WriteString(w, file)
+ }
+ default:
+ io.WriteString(w, path.Base(f.file()))
+ }
+ case 'd':
+ io.WriteString(w, strconv.Itoa(f.line()))
+ case 'n':
+ name := runtime.FuncForPC(f.pc()).Name()
+ io.WriteString(w, funcname(name))
+ case 'v':
+ f.format(w, s, 's')
+ io.WriteString(w, ":")
+ f.format(w, s, 'd')
+ }
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ var b bytes.Buffer
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ b.Grow(len(st) * stackMinLen)
+ for _, fr := range st {
+ b.WriteByte('\n')
+ fr.format(&b, s, verb)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(&b, "%#v", []Frame(st))
+ default:
+ st.formatSlice(&b, s, verb)
+ }
+ case 's':
+ st.formatSlice(&b, s, verb)
+ }
+ io.Copy(s, &b)
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(b *bytes.Buffer, s fmt.State, verb rune) {
+ b.WriteByte('[')
+ if len(st) == 0 {
+ b.WriteByte(']')
+ return
+ }
+
+ b.Grow(len(st) * (stackMinLen / 4))
+ st[0].format(b, s, verb)
+ for _, fr := range st[1:] {
+ b.WriteByte(' ')
+ fr.format(b, s, verb)
+ }
+ b.WriteByte(']')
+}
+
+// stackMinLen is a best-guess at the minimum length of a stack trace. It
+// doesn't need to be exact, just give a good enough head start for the buffer
+// to avoid the expensive early growth.
+const stackMinLen = 96
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ var b bytes.Buffer
+ b.Grow(len(*s) * stackMinLen)
+ for _, pc := range *s {
+ f := Frame(pc)
+ b.WriteByte('\n')
+ f.format(&b, st, 'v')
+ }
+ io.Copy(st, &b)
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ return callersSkip(4)
+}
+
+func callersSkip(skip int) *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(skip, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
+
+// NewStack is for library implementers that want to generate a stack trace.
+// Normally you should insted use AddStack to get an error with a stack trace.
+//
+// The result of this function can be turned into a stack trace by calling .StackTrace()
+//
+// This function takes an argument for the number of stack frames to skip.
+// This avoids putting stack generation function calls like this one in the stack trace.
+// A value of 0 will give you the line that called NewStack(0)
+// A library author wrapping this in their own function will want to use a value of at least 1.
+func NewStack(skip int) StackTracer {
+ return callersSkip(skip + 3)
+}
diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml
index 56ab36c..20dd53b 100644
--- a/vendor/github.com/satori/go.uuid/.travis.yml
+++ b/vendor/github.com/satori/go.uuid/.travis.yml
@@ -1,8 +1,23 @@
language: go
+sudo: false
go:
- - 1.0
- - 1.1
- 1.2
- 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - 1.7
+ - 1.8
+ - 1.9
+ - tip
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci
notifications:
email: false
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
index e085b25..926d549 100644
--- a/vendor/github.com/satori/go.uuid/LICENSE
+++ b/vendor/github.com/satori/go.uuid/LICENSE
@@ -1,4 +1,4 @@
-Copyright (C) 2013-2014 by Maxim Bublis
+Copyright (C) 2013-2018 by Maxim Bublis
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
index a863495..7b1a722 100644
--- a/vendor/github.com/satori/go.uuid/README.md
+++ b/vendor/github.com/satori/go.uuid/README.md
@@ -1,6 +1,7 @@
# UUID package for Go language
[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
+[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid)
[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
@@ -22,9 +23,7 @@ Use the `go` command:
## Requirements
-UUID package requires any stable version of Go Programming Language.
-
-It is tested against following versions of Go: 1.0, 1.1, 1.2
+UUID package requires Go >= 1.2.
## Example
@@ -60,7 +59,7 @@ func main() {
## Copyright
-Copyright (C) 2013-2014 by Maxim Bublis .
+Copyright (C) 2013-2018 by Maxim Bublis .
UUID package released under MIT License.
See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/benchmarks_test.go b/vendor/github.com/satori/go.uuid/benchmarks_test.go
deleted file mode 100644
index 6a8bf7e..0000000
--- a/vendor/github.com/satori/go.uuid/benchmarks_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (C) 2013-2014 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "testing"
-)
-
-func BenchmarkFromBytes(b *testing.B) {
- bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- for i := 0; i < b.N; i++ {
- FromBytes(bytes)
- }
-}
-
-func BenchmarkFromString(b *testing.B) {
- s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- for i := 0; i < b.N; i++ {
- FromString(s)
- }
-}
-
-func BenchmarkFromStringUrn(b *testing.B) {
- s := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- for i := 0; i < b.N; i++ {
- FromString(s)
- }
-}
-
-func BenchmarkFromStringWithBrackets(b *testing.B) {
- s := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
- for i := 0; i < b.N; i++ {
- FromString(s)
- }
-}
-
-func BenchmarkNewV1(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV1()
- }
-}
-
-func BenchmarkNewV2(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV2(DomainPerson)
- }
-}
-
-func BenchmarkNewV3(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV3(NamespaceDNS, "www.example.com")
- }
-}
-
-func BenchmarkNewV4(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV4()
- }
-}
-
-func BenchmarkNewV5(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV5(NamespaceDNS, "www.example.com")
- }
-}
-
-func BenchmarkMarshalBinary(b *testing.B) {
- u := NewV4()
- for i := 0; i < b.N; i++ {
- u.MarshalBinary()
- }
-}
-
-func BenchmarkMarshalText(b *testing.B) {
- u := NewV4()
- for i := 0; i < b.N; i++ {
- u.MarshalText()
- }
-}
-
-func BenchmarkUnmarshalBinary(b *testing.B) {
- bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- u := UUID{}
- for i := 0; i < b.N; i++ {
- u.UnmarshalBinary(bytes)
- }
-}
-
-func BenchmarkUnmarshalText(b *testing.B) {
- bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- u := UUID{}
- for i := 0; i < b.N; i++ {
- u.UnmarshalText(bytes)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/codec.go
rename to vendor/github.com/satori/go.uuid/codec.go
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/generator.go
rename to vendor/github.com/satori/go.uuid/generator.go
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/sql.go
rename to vendor/github.com/satori/go.uuid/sql.go
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
index 44a235f..a2b8e2c 100644
--- a/vendor/github.com/satori/go.uuid/uuid.go
+++ b/vendor/github.com/satori/go.uuid/uuid.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2013-2014 by Maxim Bublis
+// Copyright (C) 2013-2018 by Maxim Bublis
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
@@ -26,22 +26,29 @@ package uuid
import (
"bytes"
- "crypto/md5"
- "crypto/rand"
- "crypto/sha1"
- "encoding/binary"
"encoding/hex"
- "fmt"
- "hash"
- "net"
- "os"
- "sync"
- "time"
+)
+
+// Size of a UUID in bytes.
+const Size = 16
+
+// UUID representation compliant with specification
+// described in RFC 4122.
+type UUID [Size]byte
+
+// UUID versions
+const (
+ _ byte = iota
+ V1
+ V2
+ V3
+ V4
+ V5
)
// UUID layout variants.
const (
- VariantNCS = iota
+ VariantNCS byte = iota
VariantRFC4122
VariantMicrosoft
VariantFuture
@@ -54,112 +61,48 @@ const (
DomainOrg
)
-// Difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
-const epochStart = 122192928000000000
-
-// UUID v1/v2 storage.
-var (
- storageMutex sync.Mutex
- clockSequence uint16
- lastTime uint64
- hardwareAddr [6]byte
- posixUID = uint32(os.Getuid())
- posixGID = uint32(os.Getgid())
-)
-
// String parse helpers.
var (
urnPrefix = []byte("urn:uuid:")
byteGroups = []int{8, 4, 4, 4, 12}
)
-// Epoch calculation function
-var epochFunc func() uint64
-
-// Initialize storage
-func init() {
- buf := make([]byte, 2)
- rand.Read(buf)
- clockSequence = binary.BigEndian.Uint16(buf)
-
- // Initialize hardwareAddr randomly in case
- // of real network interfaces absence
- rand.Read(hardwareAddr[:])
-
- // Set multicast bit as recommended in RFC 4122
- hardwareAddr[0] |= 0x01
-
- interfaces, err := net.Interfaces()
- if err == nil {
- for _, iface := range interfaces {
- if len(iface.HardwareAddr) >= 6 {
- copy(hardwareAddr[:], iface.HardwareAddr)
- break
- }
- }
- }
- epochFunc = unixTimeFunc
-}
-
-// Returns difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and current time.
-// This is default epoch calculation function.
-func unixTimeFunc() uint64 {
- return epochStart + uint64(time.Now().UnixNano()/100)
-}
-
-// UUID representation compliant with specification
-// described in RFC 4122.
-type UUID [16]byte
+// Nil is special form of UUID that is specified to have all
+// 128 bits set to zero.
+var Nil = UUID{}
// Predefined namespace UUIDs.
var (
- NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
- NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
- NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+ NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
)
-// And returns result of binary AND of two UUIDs.
-func And(u1 UUID, u2 UUID) UUID {
- u := UUID{}
- for i := 0; i < 16; i++ {
- u[i] = u1[i] & u2[i]
- }
- return u
-}
-
-// Or returns result of binary OR of two UUIDs.
-func Or(u1 UUID, u2 UUID) UUID {
- u := UUID{}
- for i := 0; i < 16; i++ {
- u[i] = u1[i] | u2[i]
- }
- return u
-}
-
// Equal returns true if u1 and u2 equals, otherwise returns false.
func Equal(u1 UUID, u2 UUID) bool {
return bytes.Equal(u1[:], u2[:])
}
// Version returns algorithm version used to generate UUID.
-func (u UUID) Version() uint {
- return uint(u[6] >> 4)
+func (u UUID) Version() byte {
+ return u[6] >> 4
}
// Variant returns UUID layout variant.
-func (u UUID) Variant() uint {
+func (u UUID) Variant() byte {
switch {
- case (u[8] & 0x80) == 0x00:
+ case (u[8] >> 7) == 0x00:
return VariantNCS
- case (u[8]&0xc0)|0x80 == 0x80:
+ case (u[8] >> 6) == 0x02:
return VariantRFC4122
- case (u[8]&0xe0)|0xc0 == 0xc0:
+ case (u[8] >> 5) == 0x06:
return VariantMicrosoft
+ case (u[8] >> 5) == 0x07:
+ fallthrough
+ default:
+ return VariantFuture
}
- return VariantFuture
}
// Bytes returns bytes slice representation of UUID.
@@ -170,8 +113,19 @@ func (u UUID) Bytes() []byte {
// Returns canonical string representation of UUID:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
func (u UUID) String() string {
- return fmt.Sprintf("%x-%x-%x-%x-%x",
- u[:4], u[4:6], u[6:8], u[8:10], u[10:])
+ buf := make([]byte, 36)
+
+ hex.Encode(buf[0:8], u[0:4])
+ buf[8] = '-'
+ hex.Encode(buf[9:13], u[4:6])
+ buf[13] = '-'
+ hex.Encode(buf[14:18], u[6:8])
+ buf[18] = '-'
+ hex.Encode(buf[19:23], u[8:10])
+ buf[23] = '-'
+ hex.Encode(buf[24:], u[10:])
+
+ return string(buf)
}
// SetVersion sets version bits.
@@ -179,183 +133,29 @@ func (u *UUID) SetVersion(v byte) {
u[6] = (u[6] & 0x0f) | (v << 4)
}
-// SetVariant sets variant bits as described in RFC 4122.
-func (u *UUID) SetVariant() {
- u[8] = (u[8] & 0xbf) | 0x80
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The encoding is the same as returned by String.
-func (u UUID) MarshalText() (text []byte, err error) {
- text = []byte(u.String())
- return
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// Following formats are supported:
-// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
-// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
-// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-func (u *UUID) UnmarshalText(text []byte) (err error) {
- if len(text) < 32 {
- err = fmt.Errorf("uuid: invalid UUID string: %s", text)
- return
+// SetVariant sets variant bits.
+func (u *UUID) SetVariant(v byte) {
+ switch v {
+ case VariantNCS:
+ u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
+ case VariantRFC4122:
+ u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
+ case VariantMicrosoft:
+ u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
+ case VariantFuture:
+ fallthrough
+ default:
+ u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
}
+}
- if bytes.Equal(text[:9], urnPrefix) {
- text = text[9:]
- } else if text[0] == '{' {
- text = text[1:]
+// Must is a helper that wraps a call to a function returning (UUID, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initializations such as
+// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"));
+func Must(u UUID, err error) UUID {
+ if err != nil {
+ panic(err)
}
-
- b := u[:]
-
- for _, byteGroup := range byteGroups {
- if text[0] == '-' {
- text = text[1:]
- }
-
- _, err = hex.Decode(b[:byteGroup/2], text[:byteGroup])
-
- if err != nil {
- return
- }
-
- text = text[byteGroup:]
- b = b[byteGroup/2:]
- }
-
- return
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (u UUID) MarshalBinary() (data []byte, err error) {
- data = u.Bytes()
- return
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It will return error if the slice isn't 16 bytes long.
-func (u *UUID) UnmarshalBinary(data []byte) (err error) {
- if len(data) != 16 {
- err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
- return
- }
- copy(u[:], data)
-
- return
-}
-
-// FromBytes returns UUID converted from raw byte slice input.
-// It will return error if the slice isn't 16 bytes long.
-func FromBytes(input []byte) (u UUID, err error) {
- err = u.UnmarshalBinary(input)
- return
-}
-
-// FromString returns UUID parsed from string input.
-// Input is expected in a form accepted by UnmarshalText.
-func FromString(input string) (u UUID, err error) {
- err = u.UnmarshalText([]byte(input))
- return
-}
-
-// Returns UUID v1/v2 storage state.
-// Returns epoch timestamp and clock sequence.
-func getStorage() (uint64, uint16) {
- storageMutex.Lock()
- defer storageMutex.Unlock()
-
- timeNow := epochFunc()
- // Clock changed backwards since last UUID generation.
- // Should increase clock sequence.
- if timeNow <= lastTime {
- clockSequence++
- }
- lastTime = timeNow
-
- return timeNow, clockSequence
-}
-
-// NewV1 returns UUID based on current timestamp and MAC address.
-func NewV1() UUID {
- u := UUID{}
-
- timeNow, clockSeq := getStorage()
-
- binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
-
- copy(u[10:], hardwareAddr[:])
-
- u.SetVersion(1)
- u.SetVariant()
-
- return u
-}
-
-// NewV2 returns DCE Security UUID based on POSIX UID/GID.
-func NewV2(domain byte) UUID {
- u := UUID{}
-
- switch domain {
- case DomainPerson:
- binary.BigEndian.PutUint32(u[0:], posixUID)
- case DomainGroup:
- binary.BigEndian.PutUint32(u[0:], posixGID)
- }
-
- timeNow, clockSeq := getStorage()
-
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
- u[9] = domain
-
- copy(u[10:], hardwareAddr[:])
-
- u.SetVersion(2)
- u.SetVariant()
-
- return u
-}
-
-// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
-func NewV3(ns UUID, name string) UUID {
- u := newFromHash(md5.New(), ns, name)
- u.SetVersion(3)
- u.SetVariant()
-
- return u
-}
-
-// NewV4 returns random generated UUID.
-func NewV4() UUID {
- u := UUID{}
- rand.Read(u[:])
- u.SetVersion(4)
- u.SetVariant()
-
- return u
-}
-
-// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
-func NewV5(ns UUID, name string) UUID {
- u := newFromHash(sha1.New(), ns, name)
- u.SetVersion(5)
- u.SetVariant()
-
- return u
-}
-
-// Returns UUID based on hashing of namespace UUID and name.
-func newFromHash(h hash.Hash, ns UUID, name string) UUID {
- u := UUID{}
- h.Write(ns[:])
- h.Write([]byte(name))
- copy(u[:], h.Sum(nil))
-
return u
}
diff --git a/vendor/github.com/satori/go.uuid/uuid_test.go b/vendor/github.com/satori/go.uuid/uuid_test.go
deleted file mode 100644
index 0bc85b2..0000000
--- a/vendor/github.com/satori/go.uuid/uuid_test.go
+++ /dev/null
@@ -1,399 +0,0 @@
-// Copyright (C) 2013 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "bytes"
- "testing"
-)
-
-func TestBytes(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- if !bytes.Equal(u.Bytes(), bytes1) {
- t.Errorf("Incorrect bytes representation for UUID: %s", u)
- }
-}
-
-func TestString(t *testing.T) {
- if NamespaceDNS.String() != "6ba7b810-9dad-11d1-80b4-00c04fd430c8" {
- t.Errorf("Incorrect string representation for UUID: %s", NamespaceDNS.String())
- }
-}
-
-func TestEqual(t *testing.T) {
- if !Equal(NamespaceDNS, NamespaceDNS) {
- t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceDNS)
- }
-
- if Equal(NamespaceDNS, NamespaceURL) {
- t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceURL)
- }
-}
-
-func TestOr(t *testing.T) {
- u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff}
- u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00}
-
- u := UUID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-
- if !Equal(u, Or(u1, u2)) {
- t.Errorf("Incorrect bitwise OR result %s", Or(u1, u2))
- }
-}
-
-func TestAnd(t *testing.T) {
- u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff}
- u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00}
-
- u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if !Equal(u, And(u1, u2)) {
- t.Errorf("Incorrect bitwise AND result %s", And(u1, u2))
- }
-}
-
-func TestVersion(t *testing.T) {
- u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u.Version() != 1 {
- t.Errorf("Incorrect version for UUID: %d", u.Version())
- }
-}
-
-func TestSetVersion(t *testing.T) {
- u := UUID{}
- u.SetVersion(4)
-
- if u.Version() != 4 {
- t.Errorf("Incorrect version for UUID after u.setVersion(4): %d", u.Version())
- }
-}
-
-func TestVariant(t *testing.T) {
- u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u1.Variant() != VariantNCS {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantNCS, u1.Variant())
- }
-
- u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u2.Variant() != VariantRFC4122 {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantRFC4122, u2.Variant())
- }
-
- u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u3.Variant() != VariantMicrosoft {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantMicrosoft, u3.Variant())
- }
-
- u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u4.Variant() != VariantFuture {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantFuture, u4.Variant())
- }
-}
-
-func TestSetVariant(t *testing.T) {
- u := new(UUID)
- u.SetVariant()
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("Incorrect variant for UUID after u.setVariant(): %d", u.Variant())
- }
-}
-
-func TestFromBytes(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- u1, err := FromBytes(b1)
- if err != nil {
- t.Errorf("Error parsing UUID from bytes: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte{}
-
- _, err = FromBytes(b2)
- if err == nil {
- t.Errorf("Should return error parsing from empty byte slice, got %s", err)
- }
-}
-
-func TestMarshalBinary(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- b2, err := u.MarshalBinary()
- if err != nil {
- t.Errorf("Error marshaling UUID: %s", err)
- }
-
- if !bytes.Equal(b1, b2) {
- t.Errorf("Marshaled UUID should be %s, got %s", b1, b2)
- }
-}
-
-func TestUnmarshalBinary(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- u1 := UUID{}
- err := u1.UnmarshalBinary(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte{}
- u2 := UUID{}
-
- err = u2.UnmarshalBinary(b2)
- if err == nil {
- t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err)
- }
-}
-
-func TestFromString(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
- s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-
- _, err := FromString("")
- if err == nil {
- t.Errorf("Should return error trying to parse empty string, got %s", err)
- }
-
- u1, err := FromString(s1)
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- u2, err := FromString(s2)
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- if !Equal(u, u2) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u2)
- }
-
- u3, err := FromString(s3)
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- if !Equal(u, u3) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u3)
- }
-}
-
-func TestMarshalText(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
- b2, err := u.MarshalText()
- if err != nil {
- t.Errorf("Error marshaling UUID: %s", err)
- }
-
- if !bytes.Equal(b1, b2) {
- t.Errorf("Marshaled UUID should be %s, got %s", b1, b2)
- }
-}
-
-func TestUnmarshalText(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
- u1 := UUID{}
- err := u1.UnmarshalText(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte("")
- u2 := UUID{}
-
- err = u2.UnmarshalText(b2)
- if err == nil {
- t.Errorf("Should return error trying to unmarshal from empty string")
- }
-}
-
-func TestNewV1(t *testing.T) {
- u := NewV1()
-
- if u.Version() != 1 {
- t.Errorf("UUIDv1 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv1 generated with incorrect variant: %d", u.Variant())
- }
-
- u1 := NewV1()
- u2 := NewV1()
-
- if Equal(u1, u2) {
- t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u1, u2)
- }
-
- oldFunc := epochFunc
- epochFunc = func() uint64 { return 0 }
-
- u3 := NewV1()
- u4 := NewV1()
-
- if Equal(u3, u4) {
- t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u3, u4)
- }
-
- epochFunc = oldFunc
-}
-
-func TestNewV2(t *testing.T) {
- u1 := NewV2(DomainPerson)
-
- if u1.Version() != 2 {
- t.Errorf("UUIDv2 generated with incorrect version: %d", u1.Version())
- }
-
- if u1.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv2 generated with incorrect variant: %d", u1.Variant())
- }
-
- u2 := NewV2(DomainGroup)
-
- if u2.Version() != 2 {
- t.Errorf("UUIDv2 generated with incorrect version: %d", u2.Version())
- }
-
- if u2.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv2 generated with incorrect variant: %d", u2.Variant())
- }
-}
-
-func TestNewV3(t *testing.T) {
- u := NewV3(NamespaceDNS, "www.example.com")
-
- if u.Version() != 3 {
- t.Errorf("UUIDv3 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv3 generated with incorrect variant: %d", u.Variant())
- }
-
- if u.String() != "5df41881-3aed-3515-88a7-2f4a814cf09e" {
- t.Errorf("UUIDv3 generated incorrectly: %s", u.String())
- }
-
- u = NewV3(NamespaceDNS, "python.org")
-
- if u.String() != "6fa459ea-ee8a-3ca4-894e-db77e160355e" {
- t.Errorf("UUIDv3 generated incorrectly: %s", u.String())
- }
-
- u1 := NewV3(NamespaceDNS, "golang.org")
- u2 := NewV3(NamespaceDNS, "golang.org")
- if !Equal(u1, u2) {
- t.Errorf("UUIDv3 generated different UUIDs for same namespace and name: %s and %s", u1, u2)
- }
-
- u3 := NewV3(NamespaceDNS, "example.com")
- if Equal(u1, u3) {
- t.Errorf("UUIDv3 generated same UUIDs for different names in same namespace: %s and %s", u1, u2)
- }
-
- u4 := NewV3(NamespaceURL, "golang.org")
- if Equal(u1, u4) {
- t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4)
- }
-}
-
-func TestNewV4(t *testing.T) {
- u := NewV4()
-
- if u.Version() != 4 {
- t.Errorf("UUIDv4 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv4 generated with incorrect variant: %d", u.Variant())
- }
-}
-
-func TestNewV5(t *testing.T) {
- u := NewV5(NamespaceDNS, "www.example.com")
-
- if u.Version() != 5 {
- t.Errorf("UUIDv5 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv5 generated with incorrect variant: %d", u.Variant())
- }
-
- u = NewV5(NamespaceDNS, "python.org")
-
- if u.String() != "886313e1-3b8a-5372-9b90-0c9aee199e5d" {
- t.Errorf("UUIDv5 generated incorrectly: %s", u.String())
- }
-
- u1 := NewV5(NamespaceDNS, "golang.org")
- u2 := NewV5(NamespaceDNS, "golang.org")
- if !Equal(u1, u2) {
- t.Errorf("UUIDv5 generated different UUIDs for same namespace and name: %s and %s", u1, u2)
- }
-
- u3 := NewV5(NamespaceDNS, "example.com")
- if Equal(u1, u3) {
- t.Errorf("UUIDv5 generated same UUIDs for different names in same namespace: %s and %s", u1, u2)
- }
-
- u4 := NewV5(NamespaceURL, "golang.org")
- if Equal(u1, u4) {
- t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4)
- }
-}
diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore
new file mode 100644
index 0000000..5357942
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/.gitignore
@@ -0,0 +1,2 @@
+.git
+*.swp
diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml
new file mode 100644
index 0000000..d2d585c
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - tip
+
+install:
+ - go build .
+
+script:
+ - go test -v
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/shopspring/decimal/LICENSE
rename to vendor/github.com/shopspring/decimal/LICENSE
diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md
new file mode 100644
index 0000000..54c1a6b
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/README.md
@@ -0,0 +1,126 @@
+# decimal
+
+[![Build Status](https://travis-ci.org/shopspring/decimal.png?branch=master)](https://travis-ci.org/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal)
+
+Arbitrary-precision fixed-point decimal numbers in go.
+
+NOTE: can "only" represent numbers with a maximum of 2^31 digits after the decimal point.
+
+## Features
+
+ * the zero-value is 0, and is safe to use without initialization
+ * addition, subtraction, multiplication with no loss of precision
+ * division with specified precision
+ * database/sql serialization/deserialization
+ * json and xml serialization/deserialization
+
+## Install
+
+Run `go get github.com/shopspring/decimal`
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/shopspring/decimal"
+)
+
+func main() {
+ price, err := decimal.NewFromString("136.02")
+ if err != nil {
+ panic(err)
+ }
+
+ quantity := decimal.NewFromFloat(3)
+
+ fee, _ := decimal.NewFromString(".035")
+ taxRate, _ := decimal.NewFromString(".08875")
+
+ subtotal := price.Mul(quantity)
+
+ preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1)))
+
+ total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1)))
+
+ fmt.Println("Subtotal:", subtotal) // Subtotal: 408.06
+ fmt.Println("Pre-tax:", preTax) // Pre-tax: 422.3421
+ fmt.Println("Taxes:", total.Sub(preTax)) // Taxes: 37.482861375
+ fmt.Println("Total:", total) // Total: 459.824961375
+ fmt.Println("Tax rate:", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875
+}
+```
+
+## Documentation
+
+http://godoc.org/github.com/shopspring/decimal
+
+## Production Usage
+
+* [Spring](https://shopspring.com/), since August 14, 2014.
+* If you are using this in production, please let us know!
+
+## FAQ
+
+#### Why don't you just use float64?
+
+Because float64s (or any binary floating point type, actually) can't represent
+numbers such as 0.1 exactly.
+
+Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that
+it prints out `10`, but it actually prints `9.999999999999831`. Over time,
+these small errors can really add up!
+
+#### Why don't you just use big.Rat?
+
+big.Rat is fine for representing rational numbers, but Decimal is better for
+representing money. Why? Here's a (contrived) example:
+
+Let's say you use big.Rat, and you have two numbers, x and y, both
+representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one
+out, the string output has to stop somewhere (let's say it stops at 3 decimal
+digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did
+the other 0.001 go?
+
+Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE
+
+With Decimal, the strings being printed out represent the number exactly. So,
+if you have `x = y = 1/3` (with precision 3), they will actually be equal to
+0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is
+unaccounted for!
+
+You still have to be careful. If you want to split a number `N` 3 ways, you
+can't just send `N/3` to three different people. You have to pick one to send
+`N - (2/3*N)` to. That person will receive the fraction of a penny remainder.
+
+But, it is much easier to be careful with Decimal than with big.Rat.
+
+#### Why isn't the API similar to big.Int's?
+
+big.Int's API is built to reduce the number of memory allocations for maximal
+performance. This makes sense for its use-case, but the trade-off is that the
+API is awkward and easy to misuse.
+
+For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A
+developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This
+modifies `a` and sets `z` as an alias for `a`, which they might not expect. It
+also modifies any other aliases to `a`.
+
+Here's an example of the subtle bugs you can introduce with big.Int's API:
+https://play.golang.org/p/x2R_78pa8r
+
+In contrast, it's difficult to make such mistakes with decimal. Decimals
+behave like other go numbers types: even though `a = b` will not deep copy
+`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods
+return new Decimals and do not modify the originals. The downside is that
+this causes extra allocations, so Decimal is less performant. My assumption
+is that if you're using Decimals, you probably care more about correctness
+than performance.
+
+## License
+
+The MIT License (MIT)
+
+This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/shopspring/decimal/decimal-go.go
rename to vendor/github.com/shopspring/decimal/decimal-go.go
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/shopspring/decimal/decimal.go
rename to vendor/github.com/shopspring/decimal/decimal.go
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/shopspring/decimal/rounding.go
rename to vendor/github.com/shopspring/decimal/rounding.go
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/LICENSE b/vendor/github.com/siddontang/go-log/LICENSE
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/LICENSE
rename to vendor/github.com/siddontang/go-log/LICENSE
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/doc.go b/vendor/github.com/siddontang/go-log/log/doc.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/doc.go
rename to vendor/github.com/siddontang/go-log/log/doc.go
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/filehandler.go b/vendor/github.com/siddontang/go-log/log/filehandler.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/filehandler.go
rename to vendor/github.com/siddontang/go-log/log/filehandler.go
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/handler.go b/vendor/github.com/siddontang/go-log/log/handler.go
similarity index 95%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/handler.go
rename to vendor/github.com/siddontang/go-log/log/handler.go
index 5460f06..30a6c4f 100644
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/handler.go
+++ b/vendor/github.com/siddontang/go-log/log/handler.go
@@ -49,6 +49,6 @@ func (h *NullHandler) Write(b []byte) (n int, err error) {
}
// Close implements Handler interface
-func (h *NullHandler) Close() {
-
+func (h *NullHandler) Close() error {
+ return nil
}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/log.go b/vendor/github.com/siddontang/go-log/log/log.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/log.go
rename to vendor/github.com/siddontang/go-log/log/log.go
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/logger.go b/vendor/github.com/siddontang/go-log/log/logger.go
similarity index 88%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/logger.go
rename to vendor/github.com/siddontang/go-log/log/logger.go
index b2f7ed2..4f44c3e 100644
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/log/logger.go
+++ b/vendor/github.com/siddontang/go-log/log/logger.go
@@ -62,17 +62,13 @@ type Logger struct {
// TODO: support logger.Contextual
loggers.Advanced
- sync.Mutex
-
level Level
flag int
+ hLock sync.Mutex
handler Handler
- quit chan struct{}
- msg chan []byte
-
- bufs [][]byte
+ bufs sync.Pool
}
// New creates a logger with specified handler and flag
@@ -84,13 +80,11 @@ func New(handler Handler, flag int) *Logger {
l.flag = flag
- l.quit = make(chan struct{})
-
- l.msg = make(chan []byte, 1024)
-
- l.bufs = make([][]byte, 0, 16)
-
- go l.run()
+ l.bufs = sync.Pool{
+ New: func() interface{} {
+ return make([]byte, 0, 1024)
+ },
+ }
return l
}
@@ -105,49 +99,11 @@ func newStdHandler() *StreamHandler {
return h
}
-func (l *Logger) run() {
- for {
- select {
- case msg := <-l.msg:
- l.handler.Write(msg)
- l.putBuf(msg)
- case <-l.quit:
- l.handler.Close()
- }
- }
-}
-
-func (l *Logger) popBuf() []byte {
- l.Lock()
- var buf []byte
- if len(l.bufs) == 0 {
- buf = make([]byte, 0, 1024)
- } else {
- buf = l.bufs[len(l.bufs)-1]
- l.bufs = l.bufs[0 : len(l.bufs)-1]
- }
- l.Unlock()
-
- return buf
-}
-
-func (l *Logger) putBuf(buf []byte) {
- l.Lock()
- if len(l.bufs) < maxBufPoolSize {
- buf = buf[0:0]
- l.bufs = append(l.bufs, buf)
- }
- l.Unlock()
-}
-
// Close closes the logger
func (l *Logger) Close() {
- if l.quit == nil {
- return
- }
-
- close(l.quit)
- l.quit = nil
+ l.hLock.Lock()
+ defer l.hLock.Unlock()
+ l.handler.Close()
}
// SetLevel sets log level, any log level less than it will not log
@@ -182,7 +138,9 @@ func (l *Logger) Output(callDepth int, level Level, msg string) {
return
}
- buf := l.popBuf()
+ buf := l.bufs.Get().([]byte)
+ buf = buf[0:0]
+ defer l.bufs.Put(buf)
if l.flag&Ltime > 0 {
now := time.Now().Format(timeFormat)
@@ -222,7 +180,10 @@ func (l *Logger) Output(callDepth int, level Level, msg string) {
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
buf = append(buf, '\n')
}
- l.msg <- buf
+
+ l.hLock.Lock()
+ l.handler.Write(buf)
+ l.hLock.Unlock()
}
// Fatal records the log with fatal level and exits
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/loggers/loggers.go b/vendor/github.com/siddontang/go-log/loggers/loggers.go
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go-log/loggers/loggers.go
rename to vendor/github.com/siddontang/go-log/loggers/loggers.go
diff --git a/vendor/github.com/siddontang/go-mysql/.gitignore b/vendor/github.com/siddontang/go-mysql/.gitignore
deleted file mode 100644
index 68da35e..0000000
--- a/vendor/github.com/siddontang/go-mysql/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-var
-bin
-.idea
\ No newline at end of file
diff --git a/vendor/github.com/siddontang/go-mysql/.travis.yml b/vendor/github.com/siddontang/go-mysql/.travis.yml
deleted file mode 100644
index 8f8eafd..0000000
--- a/vendor/github.com/siddontang/go-mysql/.travis.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-language: go
-
-go:
- - "1.9"
- - "1.10"
-
-addons:
- apt:
- sources:
- - mysql-5.7-trusty
- packages:
- - mysql-server
- - mysql-client
-
-before_install:
- - sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
- - sudo mysql_upgrade
-
- # stop mysql and use row-based format binlog
- - "sudo service mysql stop || true"
- - "echo '[mysqld]' | sudo tee /etc/mysql/conf.d/replication.cnf"
- - "echo 'server-id=1' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
- - "echo 'log-bin=mysql' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
- - "echo 'binlog-format = row' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
-
- # Start mysql (avoid errors to have logs)
- - "sudo service mysql start || true"
- - "sudo tail -1000 /var/log/syslog"
-
- - mysql -e "CREATE DATABASE IF NOT EXISTS test;" -uroot
-
-
-script:
- - make test
diff --git a/vendor/github.com/siddontang/go-mysql/Gopkg.lock b/vendor/github.com/siddontang/go-mysql/Gopkg.lock
deleted file mode 100644
index ae65b1d..0000000
--- a/vendor/github.com/siddontang/go-mysql/Gopkg.lock
+++ /dev/null
@@ -1,78 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- name = "github.com/BurntSushi/toml"
- packages = ["."]
- revision = "b26d9c308763d68093482582cea63d69be07a0f0"
- version = "v0.3.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/go-sql-driver/mysql"
- packages = ["."]
- revision = "99ff426eb706cffe92ff3d058e168b278cabf7c7"
-
-[[projects]]
- branch = "master"
- name = "github.com/jmoiron/sqlx"
- packages = [
- ".",
- "reflectx"
- ]
- revision = "2aeb6a910c2b94f2d5eb53d9895d80e27264ec41"
-
-[[projects]]
- branch = "master"
- name = "github.com/juju/errors"
- packages = ["."]
- revision = "c7d06af17c68cd34c835053720b21f6549d9b0ee"
-
-[[projects]]
- branch = "master"
- name = "github.com/pingcap/check"
- packages = ["."]
- revision = "1c287c953996ab3a0bf535dba9d53d809d3dc0b6"
-
-[[projects]]
- name = "github.com/satori/go.uuid"
- packages = ["."]
- revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
- version = "v1.2.0"
-
-[[projects]]
- name = "github.com/shopspring/decimal"
- packages = ["."]
- revision = "cd690d0c9e2447b1ef2a129a6b7b49077da89b8e"
- version = "1.1.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/siddontang/go"
- packages = [
- "hack",
- "sync2"
- ]
- revision = "2b7082d296ba89ae7ead0f977816bddefb65df9d"
-
-[[projects]]
- branch = "master"
- name = "github.com/siddontang/go-log"
- packages = [
- "log",
- "loggers"
- ]
- revision = "a4d157e46fa3e08b7e7ff329af341fa3ff86c02c"
-
-[[projects]]
- name = "google.golang.org/appengine"
- packages = ["cloudsql"]
- revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
- version = "v1.1.0"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- inputs-digest = "a1f9939938a58551bbb3f19411c9d1386995d36296de6f6fb5d858f5923db85e"
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/github.com/siddontang/go-mysql/Gopkg.toml b/vendor/github.com/siddontang/go-mysql/Gopkg.toml
deleted file mode 100644
index 71df4b3..0000000
--- a/vendor/github.com/siddontang/go-mysql/Gopkg.toml
+++ /dev/null
@@ -1,56 +0,0 @@
-# Gopkg.toml example
-#
-# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-# name = "github.com/user/project"
-# version = "1.0.0"
-#
-# [[constraint]]
-# name = "github.com/user/project2"
-# branch = "dev"
-# source = "github.com/myfork/project2"
-#
-# [[override]]
-# name = "github.com/x/y"
-# version = "2.4.0"
-#
-# [prune]
-# non-go = false
-# go-tests = true
-# unused-packages = true
-
-
-[[constraint]]
- name = "github.com/BurntSushi/toml"
- version = "v0.3.0"
-
-[[constraint]]
- name = "github.com/go-sql-driver/mysql"
- branch = "master"
-
-[[constraint]]
- branch = "master"
- name = "github.com/juju/errors"
-
-[[constraint]]
- name = "github.com/satori/go.uuid"
- version = "v1.2.0"
-
-[[constraint]]
- name = "github.com/shopspring/decimal"
- version = "v1.1.0"
-
-[[constraint]]
- branch = "master"
- name = "github.com/siddontang/go"
-
-[prune]
- go-tests = true
- unused-packages = true
- non-go = true
-
diff --git a/vendor/github.com/siddontang/go-mysql/Makefile b/vendor/github.com/siddontang/go-mysql/Makefile
deleted file mode 100644
index 3decd6c..0000000
--- a/vendor/github.com/siddontang/go-mysql/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-all: build
-
-build:
- go build -o bin/go-mysqlbinlog cmd/go-mysqlbinlog/main.go
- go build -o bin/go-mysqldump cmd/go-mysqldump/main.go
- go build -o bin/go-canal cmd/go-canal/main.go
- go build -o bin/go-binlogparser cmd/go-binlogparser/main.go
-
-test:
- go test --race -timeout 2m ./...
-
-clean:
- go clean -i ./...
- @rm -rf ./bin
\ No newline at end of file
diff --git a/vendor/github.com/siddontang/go-mysql/README.md b/vendor/github.com/siddontang/go-mysql/README.md
deleted file mode 100644
index 0b958c7..0000000
--- a/vendor/github.com/siddontang/go-mysql/README.md
+++ /dev/null
@@ -1,263 +0,0 @@
-# go-mysql
-
-A pure go library to handle MySQL network protocol and replication.
-
-## Replication
-
-Replication package handles MySQL replication protocol like [python-mysql-replication](https://github.com/noplay/python-mysql-replication).
-
-You can use it as a MySQL slave to sync binlog from master then do something, like updating cache, etc...
-
-### Example
-
-```go
-import (
- "github.com/siddontang/go-mysql/replication"
- "os"
-)
-// Create a binlog syncer with a unique server id, the server id must be different from other MySQL's.
-// flavor is mysql or mariadb
-cfg := replication.BinlogSyncerConfig {
- ServerID: 100,
- Flavor: "mysql",
- Host: "127.0.0.1",
- Port: 3306,
- User: "root",
- Password: "",
-}
-syncer := replication.NewBinlogSyncer(cfg)
-
-// Start sync with specified binlog file and position
-streamer, _ := syncer.StartSync(mysql.Position{binlogFile, binlogPos})
-
-// or you can start a gtid replication like
-// streamer, _ := syncer.StartSyncGTID(gtidSet)
-// the mysql GTID set likes this "de278ad0-2106-11e4-9f8e-6edd0ca20947:1-2"
-// the mariadb GTID set likes this "0-1-100"
-
-for {
- ev, _ := streamer.GetEvent(context.Background())
- // Dump event
- ev.Dump(os.Stdout)
-}
-
-// or we can use a timeout context
-for {
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
- ev, err := s.GetEvent(ctx)
- cancel()
-
- if err == context.DeadlineExceeded {
- // meet timeout
- continue
- }
-
- ev.Dump(os.Stdout)
-}
-```
-
-The output looks:
-
-```
-=== RotateEvent ===
-Date: 1970-01-01 08:00:00
-Log position: 0
-Event size: 43
-Position: 4
-Next log name: mysql.000002
-
-=== FormatDescriptionEvent ===
-Date: 2014-12-18 16:36:09
-Log position: 120
-Event size: 116
-Version: 4
-Server version: 5.6.19-log
-Create date: 2014-12-18 16:36:09
-
-=== QueryEvent ===
-Date: 2014-12-18 16:38:24
-Log position: 259
-Event size: 139
-Salve proxy ID: 1
-Execution time: 0
-Error code: 0
-Schema: test
-Query: DROP TABLE IF EXISTS `test_replication` /* generated by server */
-```
-
-## Canal
-
-Canal is a package that can sync your MySQL into everywhere, like Redis, Elasticsearch.
-
-First, canal will dump your MySQL data then sync changed data using binlog incrementally.
-
-You must use ROW format for binlog, full binlog row image is preferred, because we may meet some errors when primary key changed in update for minimal or noblob row image.
-
-A simple example:
-
-```
-cfg := NewDefaultConfig()
-cfg.Addr = "127.0.0.1:3306"
-cfg.User = "root"
-// We only care table canal_test in test db
-cfg.Dump.TableDB = "test"
-cfg.Dump.Tables = []string{"canal_test"}
-
-c, err := NewCanal(cfg)
-
-type MyEventHandler struct {
- DummyEventHandler
-}
-
-func (h *MyEventHandler) OnRow(e *RowsEvent) error {
- log.Infof("%s %v\n", e.Action, e.Rows)
- return nil
-}
-
-func (h *MyEventHandler) String() string {
- return "MyEventHandler"
-}
-
-// Register a handler to handle RowsEvent
-c.SetEventHandler(&MyEventHandler{})
-
-// Start canal
-c.Start()
-```
-
-You can see [go-mysql-elasticsearch](https://github.com/siddontang/go-mysql-elasticsearch) for how to sync MySQL data into Elasticsearch.
-
-## Client
-
-Client package supports a simple MySQL connection driver which you can use it to communicate with MySQL server.
-
-### Example
-
-```go
-import (
- "github.com/siddontang/go-mysql/client"
-)
-
-// Connect MySQL at 127.0.0.1:3306, with user root, an empty password and database test
-conn, _ := client.Connect("127.0.0.1:3306", "root", "", "test")
-
-// Or to use SSL/TLS connection if MySQL server supports TLS
-//conn, _ := client.Connect("127.0.0.1:3306", "root", "", "test", func(c *Conn) {c.UseSSL(true)})
-
-// or to set your own client-side certificates for identity verification for security
-//tlsConfig := NewClientTLSConfig(caPem, certPem, keyPem, false, "your-server-name")
-//conn, _ := client.Connect("127.0.0.1:3306", "root", "", "test", func(c *Conn) {c.SetTLSConfig(tlsConfig)})
-
-conn.Ping()
-
-// Insert
-r, _ := conn.Execute(`insert into table (id, name) values (1, "abc")`)
-
-// Get last insert id
-println(r.InsertId)
-
-// Select
-r, _ := conn.Execute(`select id, name from table where id = 1`)
-
-// Handle resultset
-v, _ := r.GetInt(0, 0)
-v, _ = r.GetIntByName(0, "id")
-```
-
-Tested MySQL versions for the client include:
-- 5.5.x
-- 5.6.x
-- 5.7.x
-- 8.0.x
-
-## Server
-
-Server package supplies a framework to implement a simple MySQL server which can handle the packets from the MySQL client.
-You can use it to build your own MySQL proxy. The server connection is compatible with MySQL 5.5, 5.6, 5.7, and 8.0 versions,
-so that most MySQL clients should be able to connect to the Server without modifications.
-
-### Example
-
-```go
-import (
- "github.com/siddontang/go-mysql/server"
- "net"
-)
-
-l, _ := net.Listen("tcp", "127.0.0.1:4000")
-
-c, _ := l.Accept()
-
-// Create a connection with user root and an empty password.
-// You can use your own handler to handle command here.
-conn, _ := server.NewConn(c, "root", "", server.EmptyHandler{})
-
-for {
- conn.HandleCommand()
-}
-```
-
-Another shell
-
-```
-mysql -h127.0.0.1 -P4000 -uroot -p
-//Becuase empty handler does nothing, so here the MySQL client can only connect the proxy server. :-)
-```
-
-> ```NewConn()``` will use default server configurations:
-> 1. automatically generate default server certificates and enable TLS/SSL support.
-> 2. support three mainstream authentication methods **'mysql_native_password'**, **'caching_sha2_password'**, and **'sha256_password'**
-> and use **'mysql_native_password'** as default.
-> 3. use an in-memory user credential provider to store user and password.
->
-> To customize server configurations, use ```NewServer()``` and create connection via ```NewCustomizedConn()```.
-
-
-## Failover
-
-Failover supports to promote a new master and let other slaves replicate from it automatically when the old master was down.
-
-Failover supports MySQL >= 5.6.9 with GTID mode, if you use lower version, e.g, MySQL 5.0 - 5.5, please use [MHA](http://code.google.com/p/mysql-master-ha/) or [orchestrator](https://github.com/outbrain/orchestrator).
-
-At the same time, Failover supports MariaDB >= 10.0.9 with GTID mode too.
-
-Why only GTID? Supporting failover with no GTID mode is very hard, because slave can not find the proper binlog filename and position with the new master.
-Although there are many companies use MySQL 5.0 - 5.5, I think upgrade MySQL to 5.6 or higher is easy.
-
-## Driver
-
-Driver is the package that you can use go-mysql with go database/sql like other drivers. A simple example:
-
-```
-package main
-
-import (
- "database/sql"
-
- _ "github.com/siddontang/go-mysql/driver"
-)
-
-func main() {
- // dsn format: "user:password@addr?dbname"
- dsn := "root@127.0.0.1:3306?test"
- db, _ := sql.Open(dsn)
- db.Close()
-}
-```
-
-We pass all tests in https://github.com/bradfitz/go-sql-test using go-mysql driver. :-)
-
-## Donate
-
-If you like the project and want to buy me a cola, you can through:
-
-|PayPal|微信|
-|------|---|
-|[![](https://www.paypalobjects.com/webstatic/paypalme/images/pp_logo_small.png)](https://paypal.me/siddontang)|[![](https://github.com/siddontang/blog/blob/master/donate/weixin.png)|
-
-## Feedback
-
-go-mysql is still in development, your feedback is very welcome.
-
-
-Gmail: siddontang@gmail.com
diff --git a/vendor/github.com/siddontang/go-mysql/canal/canal.go b/vendor/github.com/siddontang/go-mysql/canal/canal.go
deleted file mode 100644
index 64d9aec..0000000
--- a/vendor/github.com/siddontang/go-mysql/canal/canal.go
+++ /dev/null
@@ -1,474 +0,0 @@
-package canal
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "os"
- "regexp"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/juju/errors"
- "github.com/siddontang/go-log/log"
- "github.com/siddontang/go-mysql/client"
- "github.com/siddontang/go-mysql/dump"
- "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/replication"
- "github.com/siddontang/go-mysql/schema"
-)
-
-// Canal can sync your MySQL data into everywhere, like Elasticsearch, Redis, etc...
-// MySQL must open row format for binlog
-type Canal struct {
- m sync.Mutex
-
- cfg *Config
-
- master *masterInfo
- dumper *dump.Dumper
- dumped bool
- dumpDoneCh chan struct{}
- syncer *replication.BinlogSyncer
-
- eventHandler EventHandler
-
- connLock sync.Mutex
- conn *client.Conn
-
- tableLock sync.RWMutex
- tables map[string]*schema.Table
- errorTablesGetTime map[string]time.Time
-
- tableMatchCache map[string]bool
- includeTableRegex []*regexp.Regexp
- excludeTableRegex []*regexp.Regexp
-
- ctx context.Context
- cancel context.CancelFunc
-}
-
-// canal will retry fetching unknown table's meta after UnknownTableRetryPeriod
-var UnknownTableRetryPeriod = time.Second * time.Duration(10)
-var ErrExcludedTable = errors.New("excluded table meta")
-
-func NewCanal(cfg *Config) (*Canal, error) {
- c := new(Canal)
- c.cfg = cfg
-
- c.ctx, c.cancel = context.WithCancel(context.Background())
-
- c.dumpDoneCh = make(chan struct{})
- c.eventHandler = &DummyEventHandler{}
-
- c.tables = make(map[string]*schema.Table)
- if c.cfg.DiscardNoMetaRowEvent {
- c.errorTablesGetTime = make(map[string]time.Time)
- }
- c.master = &masterInfo{}
-
- var err error
-
- if err = c.prepareDumper(); err != nil {
- return nil, errors.Trace(err)
- }
-
- if err = c.prepareSyncer(); err != nil {
- return nil, errors.Trace(err)
- }
-
- if err := c.checkBinlogRowFormat(); err != nil {
- return nil, errors.Trace(err)
- }
-
- // init table filter
- if n := len(c.cfg.IncludeTableRegex); n > 0 {
- c.includeTableRegex = make([]*regexp.Regexp, n)
- for i, val := range c.cfg.IncludeTableRegex {
- reg, err := regexp.Compile(val)
- if err != nil {
- return nil, errors.Trace(err)
- }
- c.includeTableRegex[i] = reg
- }
- }
-
- if n := len(c.cfg.ExcludeTableRegex); n > 0 {
- c.excludeTableRegex = make([]*regexp.Regexp, n)
- for i, val := range c.cfg.ExcludeTableRegex {
- reg, err := regexp.Compile(val)
- if err != nil {
- return nil, errors.Trace(err)
- }
- c.excludeTableRegex[i] = reg
- }
- }
-
- if c.includeTableRegex != nil || c.excludeTableRegex != nil {
- c.tableMatchCache = make(map[string]bool)
- }
-
- return c, nil
-}
-
-func (c *Canal) prepareDumper() error {
- var err error
- dumpPath := c.cfg.Dump.ExecutionPath
- if len(dumpPath) == 0 {
- // ignore mysqldump, use binlog only
- return nil
- }
-
- if c.dumper, err = dump.NewDumper(dumpPath,
- c.cfg.Addr, c.cfg.User, c.cfg.Password); err != nil {
- return errors.Trace(err)
- }
-
- if c.dumper == nil {
- //no mysqldump, use binlog only
- return nil
- }
-
- dbs := c.cfg.Dump.Databases
- tables := c.cfg.Dump.Tables
- tableDB := c.cfg.Dump.TableDB
-
- if len(tables) == 0 {
- c.dumper.AddDatabases(dbs...)
- } else {
- c.dumper.AddTables(tableDB, tables...)
- }
-
- charset := c.cfg.Charset
- c.dumper.SetCharset(charset)
-
- c.dumper.SetWhere(c.cfg.Dump.Where)
- c.dumper.SkipMasterData(c.cfg.Dump.SkipMasterData)
- c.dumper.SetMaxAllowedPacket(c.cfg.Dump.MaxAllowedPacketMB)
- // Use hex blob for mysqldump
- c.dumper.SetHexBlob(true)
-
- for _, ignoreTable := range c.cfg.Dump.IgnoreTables {
- if seps := strings.Split(ignoreTable, ","); len(seps) == 2 {
- c.dumper.AddIgnoreTables(seps[0], seps[1])
- }
- }
-
- if c.cfg.Dump.DiscardErr {
- c.dumper.SetErrOut(ioutil.Discard)
- } else {
- c.dumper.SetErrOut(os.Stderr)
- }
-
- return nil
-}
-
-// Run will first try to dump all data from MySQL master `mysqldump`,
-// then sync from the binlog position in the dump data.
-// It will run forever until meeting an error or Canal closed.
-func (c *Canal) Run() error {
- return c.run()
-}
-
-// RunFrom will sync from the binlog position directly, ignore mysqldump.
-func (c *Canal) RunFrom(pos mysql.Position) error {
- c.master.Update(pos)
-
- return c.Run()
-}
-
-func (c *Canal) StartFromGTID(set mysql.GTIDSet) error {
- c.master.UpdateGTIDSet(set)
-
- return c.Run()
-}
-
-// Dump all data from MySQL master `mysqldump`, ignore sync binlog.
-func (c *Canal) Dump() error {
- if c.dumped {
- return errors.New("the method Dump can't be called twice")
- }
- c.dumped = true
- defer close(c.dumpDoneCh)
- return c.dump()
-}
-
-func (c *Canal) run() error {
- defer func() {
- c.cancel()
- }()
-
- c.master.UpdateTimestamp(uint32(time.Now().Unix()))
-
- if !c.dumped {
- c.dumped = true
-
- err := c.tryDump()
- close(c.dumpDoneCh)
-
- if err != nil {
- log.Errorf("canal dump mysql err: %v", err)
- return errors.Trace(err)
- }
- }
-
- if err := c.runSyncBinlog(); err != nil {
- log.Errorf("canal start sync binlog err: %v", err)
- return errors.Trace(err)
- }
-
- return nil
-}
-
-func (c *Canal) Close() {
- log.Infof("closing canal")
-
- c.m.Lock()
- defer c.m.Unlock()
-
- c.cancel()
- c.connLock.Lock()
- c.conn.Close()
- c.conn = nil
- c.connLock.Unlock()
- c.syncer.Close()
-
- c.eventHandler.OnPosSynced(c.master.Position(), true)
-}
-
-func (c *Canal) WaitDumpDone() <-chan struct{} {
- return c.dumpDoneCh
-}
-
-func (c *Canal) Ctx() context.Context {
- return c.ctx
-}
-
-func (c *Canal) checkTableMatch(key string) bool {
- // no filter, return true
- if c.tableMatchCache == nil {
- return true
- }
-
- c.tableLock.RLock()
- rst, ok := c.tableMatchCache[key]
- c.tableLock.RUnlock()
- if ok {
- // cache hit
- return rst
- }
- matchFlag := false
- // check include
- if c.includeTableRegex != nil {
- for _, reg := range c.includeTableRegex {
- if reg.MatchString(key) {
- matchFlag = true
- break
- }
- }
- }
- // check exclude
- if matchFlag && c.excludeTableRegex != nil {
- for _, reg := range c.excludeTableRegex {
- if reg.MatchString(key) {
- matchFlag = false
- break
- }
- }
- }
- c.tableLock.Lock()
- c.tableMatchCache[key] = matchFlag
- c.tableLock.Unlock()
- return matchFlag
-}
-
-func (c *Canal) GetTable(db string, table string) (*schema.Table, error) {
- key := fmt.Sprintf("%s.%s", db, table)
- // if table is excluded, return error and skip parsing event or dump
- if !c.checkTableMatch(key) {
- return nil, ErrExcludedTable
- }
- c.tableLock.RLock()
- t, ok := c.tables[key]
- c.tableLock.RUnlock()
-
- if ok {
- return t, nil
- }
-
- if c.cfg.DiscardNoMetaRowEvent {
- c.tableLock.RLock()
- lastTime, ok := c.errorTablesGetTime[key]
- c.tableLock.RUnlock()
- if ok && time.Now().Sub(lastTime) < UnknownTableRetryPeriod {
- return nil, schema.ErrMissingTableMeta
- }
- }
-
- t, err := schema.NewTable(c, db, table)
- if err != nil {
- // check table not exists
- if ok, err1 := schema.IsTableExist(c, db, table); err1 == nil && !ok {
- return nil, schema.ErrTableNotExist
- }
- // work around : RDS HAHeartBeat
- // ref : https://github.com/alibaba/canal/blob/master/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java#L385
- // issue : https://github.com/alibaba/canal/issues/222
- // This is a common error in RDS that canal can't get HAHealthCheckSchema's meta, so we mock a table meta.
- // If canal just skip and log error, as RDS HA heartbeat interval is very short, so too many HAHeartBeat errors will be logged.
- if key == schema.HAHealthCheckSchema {
- // mock ha_health_check meta
- ta := &schema.Table{
- Schema: db,
- Name: table,
- Columns: make([]schema.TableColumn, 0, 2),
- Indexes: make([]*schema.Index, 0),
- }
- ta.AddColumn("id", "bigint(20)", "", "")
- ta.AddColumn("type", "char(1)", "", "")
- c.tableLock.Lock()
- c.tables[key] = ta
- c.tableLock.Unlock()
- return ta, nil
- }
- // if DiscardNoMetaRowEvent is true, we just log this error
- if c.cfg.DiscardNoMetaRowEvent {
- c.tableLock.Lock()
- c.errorTablesGetTime[key] = time.Now()
- c.tableLock.Unlock()
- // log error and return ErrMissingTableMeta
- log.Errorf("canal get table meta err: %v", errors.Trace(err))
- return nil, schema.ErrMissingTableMeta
- }
- return nil, err
- }
-
- c.tableLock.Lock()
- c.tables[key] = t
- if c.cfg.DiscardNoMetaRowEvent {
- // if get table info success, delete this key from errorTablesGetTime
- delete(c.errorTablesGetTime, key)
- }
- c.tableLock.Unlock()
-
- return t, nil
-}
-
-// ClearTableCache clear table cache
-func (c *Canal) ClearTableCache(db []byte, table []byte) {
- key := fmt.Sprintf("%s.%s", db, table)
- c.tableLock.Lock()
- delete(c.tables, key)
- if c.cfg.DiscardNoMetaRowEvent {
- delete(c.errorTablesGetTime, key)
- }
- c.tableLock.Unlock()
-}
-
-// Check MySQL binlog row image, must be in FULL, MINIMAL, NOBLOB
-func (c *Canal) CheckBinlogRowImage(image string) error {
- // need to check MySQL binlog row image? full, minimal or noblob?
- // now only log
- if c.cfg.Flavor == mysql.MySQLFlavor {
- if res, err := c.Execute(`SHOW GLOBAL VARIABLES LIKE "binlog_row_image"`); err != nil {
- return errors.Trace(err)
- } else {
- // MySQL has binlog row image from 5.6, so older will return empty
- rowImage, _ := res.GetString(0, 1)
- if rowImage != "" && !strings.EqualFold(rowImage, image) {
- return errors.Errorf("MySQL uses %s binlog row image, but we want %s", rowImage, image)
- }
- }
- }
-
- return nil
-}
-
-func (c *Canal) checkBinlogRowFormat() error {
- res, err := c.Execute(`SHOW GLOBAL VARIABLES LIKE "binlog_format";`)
- if err != nil {
- return errors.Trace(err)
- } else if f, _ := res.GetString(0, 1); f != "ROW" {
- return errors.Errorf("binlog must ROW format, but %s now", f)
- }
-
- return nil
-}
-
-func (c *Canal) prepareSyncer() error {
- cfg := replication.BinlogSyncerConfig{
- ServerID: c.cfg.ServerID,
- Flavor: c.cfg.Flavor,
- User: c.cfg.User,
- Password: c.cfg.Password,
- Charset: c.cfg.Charset,
- HeartbeatPeriod: c.cfg.HeartbeatPeriod,
- ReadTimeout: c.cfg.ReadTimeout,
- UseDecimal: c.cfg.UseDecimal,
- ParseTime: c.cfg.ParseTime,
- SemiSyncEnabled: c.cfg.SemiSyncEnabled,
- }
-
- if strings.Contains(c.cfg.Addr, "/") {
- cfg.Host = c.cfg.Addr
- } else {
- seps := strings.Split(c.cfg.Addr, ":")
- if len(seps) != 2 {
- return errors.Errorf("invalid mysql addr format %s, must host:port", c.cfg.Addr)
- }
-
- port, err := strconv.ParseUint(seps[1], 10, 16)
- if err != nil {
- return errors.Trace(err)
- }
-
- cfg.Host = seps[0]
- cfg.Port = uint16(port)
- }
-
- c.syncer = replication.NewBinlogSyncer(cfg)
-
- return nil
-}
-
-// Execute a SQL
-func (c *Canal) Execute(cmd string, args ...interface{}) (rr *mysql.Result, err error) {
- c.connLock.Lock()
- defer c.connLock.Unlock()
-
- retryNum := 3
- for i := 0; i < retryNum; i++ {
- if c.conn == nil {
- c.conn, err = client.Connect(c.cfg.Addr, c.cfg.User, c.cfg.Password, "")
- if err != nil {
- return nil, errors.Trace(err)
- }
- }
-
- rr, err = c.conn.Execute(cmd, args...)
- if err != nil && !mysql.ErrorEqual(err, mysql.ErrBadConn) {
- return
- } else if mysql.ErrorEqual(err, mysql.ErrBadConn) {
- c.conn.Close()
- c.conn = nil
- continue
- } else {
- return
- }
- }
- return
-}
-
-func (c *Canal) SyncedPosition() mysql.Position {
- return c.master.Position()
-}
-
-func (c *Canal) SyncedTimestamp() uint32 {
- return c.master.timestamp
-}
-
-func (c *Canal) SyncedGTIDSet() mysql.GTIDSet {
- return c.master.GTIDSet()
-}
diff --git a/vendor/github.com/siddontang/go-mysql/canal/canal_test.go b/vendor/github.com/siddontang/go-mysql/canal/canal_test.go
deleted file mode 100755
index bd16bd2..0000000
--- a/vendor/github.com/siddontang/go-mysql/canal/canal_test.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package canal
-
-import (
- "bytes"
- "flag"
- "fmt"
- "testing"
- "time"
-
- "github.com/juju/errors"
- . "github.com/pingcap/check"
- "github.com/siddontang/go-log/log"
- "github.com/siddontang/go-mysql/mysql"
-)
-
-var testHost = flag.String("host", "127.0.0.1", "MySQL host")
-
-func Test(t *testing.T) {
- TestingT(t)
-}
-
-type canalTestSuite struct {
- c *Canal
-}
-
-var _ = Suite(&canalTestSuite{})
-
-func (s *canalTestSuite) SetUpSuite(c *C) {
- cfg := NewDefaultConfig()
- cfg.Addr = fmt.Sprintf("%s:3306", *testHost)
- cfg.User = "root"
- cfg.HeartbeatPeriod = 200 * time.Millisecond
- cfg.ReadTimeout = 300 * time.Millisecond
- cfg.Dump.ExecutionPath = "mysqldump"
- cfg.Dump.TableDB = "test"
- cfg.Dump.Tables = []string{"canal_test"}
- cfg.Dump.Where = "id>0"
-
- // include & exclude config
- cfg.IncludeTableRegex = make([]string, 1)
- cfg.IncludeTableRegex[0] = ".*\\.canal_test"
- cfg.ExcludeTableRegex = make([]string, 2)
- cfg.ExcludeTableRegex[0] = "mysql\\..*"
- cfg.ExcludeTableRegex[1] = ".*\\..*_inner"
-
- var err error
- s.c, err = NewCanal(cfg)
- c.Assert(err, IsNil)
- s.execute(c, "DROP TABLE IF EXISTS test.canal_test")
- sql := `
- CREATE TABLE IF NOT EXISTS test.canal_test (
- id int AUTO_INCREMENT,
- content blob DEFAULT NULL,
- name varchar(100),
- PRIMARY KEY(id)
- )ENGINE=innodb;
- `
-
- s.execute(c, sql)
-
- s.execute(c, "DELETE FROM test.canal_test")
- s.execute(c, "INSERT INTO test.canal_test (content, name) VALUES (?, ?), (?, ?), (?, ?)", "1", "a", `\0\ndsfasdf`, "b", "", "c")
-
- s.execute(c, "SET GLOBAL binlog_format = 'ROW'")
-
- s.c.SetEventHandler(&testEventHandler{c: c})
- go func() {
- err = s.c.Run()
- c.Assert(err, IsNil)
- }()
-}
-
-func (s *canalTestSuite) TearDownSuite(c *C) {
- // To test the heartbeat and read timeout,so need to sleep 1 seconds without data transmission
- c.Logf("Start testing the heartbeat and read timeout")
- time.Sleep(time.Second)
-
- if s.c != nil {
- s.c.Close()
- s.c = nil
- }
-}
-
-func (s *canalTestSuite) execute(c *C, query string, args ...interface{}) *mysql.Result {
- r, err := s.c.Execute(query, args...)
- c.Assert(err, IsNil)
- return r
-}
-
-type testEventHandler struct {
- DummyEventHandler
-
- c *C
-}
-
-func (h *testEventHandler) OnRow(e *RowsEvent) error {
- log.Infof("OnRow %s %v\n", e.Action, e.Rows)
- return nil
-}
-
-func (h *testEventHandler) String() string {
- return "testEventHandler"
-}
-
-func (s *canalTestSuite) TestCanal(c *C) {
- <-s.c.WaitDumpDone()
-
- for i := 1; i < 10; i++ {
- s.execute(c, "INSERT INTO test.canal_test (name) VALUES (?)", fmt.Sprintf("%d", i))
- }
- s.execute(c, "ALTER TABLE test.canal_test ADD `age` INT(5) NOT NULL AFTER `name`")
- s.execute(c, "INSERT INTO test.canal_test (name,age) VALUES (?,?)", "d", "18")
-
- err := s.c.CatchMasterPos(10 * time.Second)
- c.Assert(err, IsNil)
-}
-
-func (s *canalTestSuite) TestCanalFilter(c *C) {
- // included
- sch, err := s.c.GetTable("test", "canal_test")
- c.Assert(err, IsNil)
- c.Assert(sch, NotNil)
- _, err = s.c.GetTable("not_exist_db", "canal_test")
- c.Assert(errors.Trace(err), Not(Equals), ErrExcludedTable)
- // excluded
- sch, err = s.c.GetTable("test", "canal_test_inner")
- c.Assert(errors.Cause(err), Equals, ErrExcludedTable)
- c.Assert(sch, IsNil)
- sch, err = s.c.GetTable("mysql", "canal_test")
- c.Assert(errors.Cause(err), Equals, ErrExcludedTable)
- c.Assert(sch, IsNil)
- sch, err = s.c.GetTable("not_exist_db", "not_canal_test")
- c.Assert(errors.Cause(err), Equals, ErrExcludedTable)
- c.Assert(sch, IsNil)
-}
-
-func TestCreateTableExp(t *testing.T) {
- cases := []string{
- "CREATE TABLE `mydb.mytable` (`id` int(10)) ENGINE=InnoDB",
- "CREATE TABLE `mytable` (`id` int(10)) ENGINE=InnoDB",
- "CREATE TABLE IF NOT EXISTS `mytable` (`id` int(10)) ENGINE=InnoDB",
- "CREATE TABLE IF NOT EXISTS mytable (`id` int(10)) ENGINE=InnoDB",
- }
- table := []byte("mytable")
- db := []byte("mydb")
- for _, s := range cases {
- m := expCreateTable.FindSubmatch([]byte(s))
- mLen := len(m)
- if m == nil || !bytes.Equal(m[mLen-1], table) || (len(m[mLen-2]) > 0 && !bytes.Equal(m[mLen-2], db)) {
- t.Fatalf("TestCreateTableExp: case %s failed\n", s)
- }
- }
-}
-
-func TestAlterTableExp(t *testing.T) {
- cases := []string{
- "ALTER TABLE `mydb`.`mytable` ADD `field2` DATE NULL AFTER `field1`;",
- "ALTER TABLE `mytable` ADD `field2` DATE NULL AFTER `field1`;",
- "ALTER TABLE mydb.mytable ADD `field2` DATE NULL AFTER `field1`;",
- "ALTER TABLE mytable ADD `field2` DATE NULL AFTER `field1`;",
- "ALTER TABLE mydb.mytable ADD field2 DATE NULL AFTER `field1`;",
- }
-
- table := []byte("mytable")
- db := []byte("mydb")
- for _, s := range cases {
- m := expAlterTable.FindSubmatch([]byte(s))
- mLen := len(m)
- if m == nil || !bytes.Equal(m[mLen-1], table) || (len(m[mLen-2]) > 0 && !bytes.Equal(m[mLen-2], db)) {
- t.Fatalf("TestAlterTableExp: case %s failed\n", s)
- }
- }
-}
-
-func TestRenameTableExp(t *testing.T) {
- cases := []string{
- "rename table `mydb`.`mytable` to `mydb`.`mytable1`",
- "rename table `mytable` to `mytable1`",
- "rename table mydb.mytable to mydb.mytable1",
- "rename table mytable to mytable1",
-
- "rename table `mydb`.`mytable` to `mydb`.`mytable2`, `mydb`.`mytable3` to `mydb`.`mytable1`",
- "rename table `mytable` to `mytable2`, `mytable3` to `mytable1`",
- "rename table mydb.mytable to mydb.mytable2, mydb.mytable3 to mydb.mytable1",
- "rename table mytable to mytable2, mytable3 to mytable1",
- }
- table := []byte("mytable")
- db := []byte("mydb")
- for _, s := range cases {
- m := expRenameTable.FindSubmatch([]byte(s))
- mLen := len(m)
- if m == nil || !bytes.Equal(m[mLen-1], table) || (len(m[mLen-2]) > 0 && !bytes.Equal(m[mLen-2], db)) {
- t.Fatalf("TestRenameTableExp: case %s failed\n", s)
- }
- }
-}
-
-func TestDropTableExp(t *testing.T) {
- cases := []string{
- "drop table test1",
- "DROP TABLE test1",
- "DROP TABLE test1",
- "DROP table IF EXISTS test.test1",
- "drop table `test1`",
- "DROP TABLE `test1`",
- "DROP table IF EXISTS `test`.`test1`",
- "DROP TABLE `test1` /* generated by server */",
- "DROP table if exists test1",
- "DROP table if exists `test1`",
- "DROP table if exists test.test1",
- "DROP table if exists `test`.test1",
- "DROP table if exists `test`.`test1`",
- "DROP table if exists test.`test1`",
- "DROP table if exists test.`test1`",
- }
-
- table := []byte("test1")
- for _, s := range cases {
- m := expDropTable.FindSubmatch([]byte(s))
- mLen := len(m)
- if m == nil {
- t.Fatalf("TestDropTableExp: case %s failed\n", s)
- return
- }
- if mLen < 4 {
- t.Fatalf("TestDropTableExp: case %s failed\n", s)
- return
- }
- if !bytes.Equal(m[mLen-1], table) {
- t.Fatalf("TestDropTableExp: case %s failed\n", s)
- }
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/canal/config.go b/vendor/github.com/siddontang/go-mysql/canal/config.go
deleted file mode 100644
index d10513c..0000000
--- a/vendor/github.com/siddontang/go-mysql/canal/config.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package canal
-
-import (
- "io/ioutil"
- "math/rand"
- "time"
-
- "github.com/BurntSushi/toml"
- "github.com/juju/errors"
- "github.com/siddontang/go-mysql/mysql"
-)
-
-type DumpConfig struct {
- // mysqldump execution path, like mysqldump or /usr/bin/mysqldump, etc...
- // If not set, ignore using mysqldump.
- ExecutionPath string `toml:"mysqldump"`
-
- // Will override Databases, tables is in database table_db
- Tables []string `toml:"tables"`
- TableDB string `toml:"table_db"`
-
- Databases []string `toml:"dbs"`
-
- // Ignore table format is db.table
- IgnoreTables []string `toml:"ignore_tables"`
-
- // Dump only selected records. Quotes are mandatory
- Where string `toml:"where"`
-
- // If true, discard error msg, else, output to stderr
- DiscardErr bool `toml:"discard_err"`
-
- // Set true to skip --master-data if we have no privilege to do
- // 'FLUSH TABLES WITH READ LOCK'
- SkipMasterData bool `toml:"skip_master_data"`
-
- // Set to change the default max_allowed_packet size
- MaxAllowedPacketMB int `toml:"max_allowed_packet_mb"`
-}
-
-type Config struct {
- Addr string `toml:"addr"`
- User string `toml:"user"`
- Password string `toml:"password"`
-
- Charset string `toml:"charset"`
- ServerID uint32 `toml:"server_id"`
- Flavor string `toml:"flavor"`
- HeartbeatPeriod time.Duration `toml:"heartbeat_period"`
- ReadTimeout time.Duration `toml:"read_timeout"`
-
- // IncludeTableRegex or ExcludeTableRegex should contain database name
- // Only a table which matches IncludeTableRegex and dismatches ExcludeTableRegex will be processed
- // eg, IncludeTableRegex : [".*\\.canal"], ExcludeTableRegex : ["mysql\\..*"]
- // this will include all database's 'canal' table, except database 'mysql'
- // Default IncludeTableRegex and ExcludeTableRegex are empty, this will include all tables
- IncludeTableRegex []string `toml:"include_table_regex"`
- ExcludeTableRegex []string `toml:"exclude_table_regex"`
-
- // discard row event without table meta
- DiscardNoMetaRowEvent bool `toml:"discard_no_meta_row_event"`
-
- Dump DumpConfig `toml:"dump"`
-
- UseDecimal bool `toml:"use_decimal"`
- ParseTime bool `toml:"parse_time"`
-
- // SemiSyncEnabled enables semi-sync or not.
- SemiSyncEnabled bool `toml:"semi_sync_enabled"`
-}
-
-func NewConfigWithFile(name string) (*Config, error) {
- data, err := ioutil.ReadFile(name)
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- return NewConfig(string(data))
-}
-
-func NewConfig(data string) (*Config, error) {
- var c Config
-
- _, err := toml.Decode(data, &c)
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- return &c, nil
-}
-
-func NewDefaultConfig() *Config {
- c := new(Config)
-
- c.Addr = "127.0.0.1:3306"
- c.User = "root"
- c.Password = ""
-
- c.Charset = mysql.DEFAULT_CHARSET
- c.ServerID = uint32(rand.New(rand.NewSource(time.Now().Unix())).Intn(1000)) + 1001
-
- c.Flavor = "mysql"
-
- c.Dump.ExecutionPath = "mysqldump"
- c.Dump.DiscardErr = true
- c.Dump.SkipMasterData = false
-
- return c
-}
diff --git a/vendor/github.com/siddontang/go-mysql/canal/dump.go b/vendor/github.com/siddontang/go-mysql/canal/dump.go
deleted file mode 100644
index 8dcac2b..0000000
--- a/vendor/github.com/siddontang/go-mysql/canal/dump.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package canal
-
-import (
- "encoding/hex"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- "github.com/juju/errors"
- "github.com/shopspring/decimal"
- "github.com/siddontang/go-log/log"
- "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/schema"
-)
-
-type dumpParseHandler struct {
- c *Canal
- name string
- pos uint64
- gset mysql.GTIDSet
-}
-
-func (h *dumpParseHandler) BinLog(name string, pos uint64) error {
- h.name = name
- h.pos = pos
- return nil
-}
-
-func (h *dumpParseHandler) Data(db string, table string, values []string) error {
- if err := h.c.ctx.Err(); err != nil {
- return err
- }
-
- tableInfo, err := h.c.GetTable(db, table)
- if err != nil {
- e := errors.Cause(err)
- if e == ErrExcludedTable ||
- e == schema.ErrTableNotExist ||
- e == schema.ErrMissingTableMeta {
- return nil
- }
- log.Errorf("get %s.%s information err: %v", db, table, err)
- return errors.Trace(err)
- }
-
- vs := make([]interface{}, len(values))
-
- for i, v := range values {
- if v == "NULL" {
- vs[i] = nil
- } else if v == "_binary ''" {
- vs[i] = []byte{}
- } else if v[0] != '\'' {
- if tableInfo.Columns[i].Type == schema.TYPE_NUMBER {
- n, err := strconv.ParseInt(v, 10, 64)
- if err != nil {
- return fmt.Errorf("parse row %v at %d error %v, int expected", values, i, err)
- }
- vs[i] = n
- } else if tableInfo.Columns[i].Type == schema.TYPE_FLOAT {
- f, err := strconv.ParseFloat(v, 64)
- if err != nil {
- return fmt.Errorf("parse row %v at %d error %v, float expected", values, i, err)
- }
- vs[i] = f
- } else if tableInfo.Columns[i].Type == schema.TYPE_DECIMAL {
- if h.c.cfg.UseDecimal {
- d, err := decimal.NewFromString(v)
- if err != nil {
- return fmt.Errorf("parse row %v at %d error %v, decimal expected", values, i, err)
- }
- vs[i] = d
- } else {
- f, err := strconv.ParseFloat(v, 64)
- if err != nil {
- return fmt.Errorf("parse row %v at %d error %v, float expected", values, i, err)
- }
- vs[i] = f
- }
- } else if strings.HasPrefix(v, "0x") {
- buf, err := hex.DecodeString(v[2:])
- if err != nil {
- return fmt.Errorf("parse row %v at %d error %v, hex literal expected", values, i, err)
- }
- vs[i] = string(buf)
- } else {
- return fmt.Errorf("parse row %v error, invalid type at %d", values, i)
- }
- } else {
- vs[i] = v[1 : len(v)-1]
- }
- }
-
- events := newRowsEvent(tableInfo, InsertAction, [][]interface{}{vs}, nil)
- return h.c.eventHandler.OnRow(events)
-}
-
-func (c *Canal) AddDumpDatabases(dbs ...string) {
- if c.dumper == nil {
- return
- }
-
- c.dumper.AddDatabases(dbs...)
-}
-
-func (c *Canal) AddDumpTables(db string, tables ...string) {
- if c.dumper == nil {
- return
- }
-
- c.dumper.AddTables(db, tables...)
-}
-
-func (c *Canal) AddDumpIgnoreTables(db string, tables ...string) {
- if c.dumper == nil {
- return
- }
-
- c.dumper.AddIgnoreTables(db, tables...)
-}
-
-func (c *Canal) dump() error {
- if c.dumper == nil {
- return errors.New("mysqldump does not exist")
- }
-
- c.master.UpdateTimestamp(uint32(time.Now().Unix()))
-
- h := &dumpParseHandler{c: c}
- // If users call StartFromGTID with empty position to start dumping with gtid,
- // we record the current gtid position before dump starts.
- //
- // See tryDump() to see when dump is skipped.
- if c.master.GTIDSet() != nil {
- gset, err := c.GetMasterGTIDSet()
- if err != nil {
- return errors.Trace(err)
- }
- h.gset = gset
- }
-
- if c.cfg.Dump.SkipMasterData {
- pos, err := c.GetMasterPos()
- if err != nil {
- return errors.Trace(err)
- }
- log.Infof("skip master data, get current binlog position %v", pos)
- h.name = pos.Name
- h.pos = uint64(pos.Pos)
- }
-
- start := time.Now()
- log.Info("try dump MySQL and parse")
- if err := c.dumper.DumpAndParse(h); err != nil {
- return errors.Trace(err)
- }
-
- pos := mysql.Position{Name: h.name, Pos: uint32(h.pos)}
- c.master.Update(pos)
- if err := c.eventHandler.OnPosSynced(pos, true); err != nil {
- return errors.Trace(err)
- }
- var startPos fmt.Stringer = pos
- if h.gset != nil {
- c.master.UpdateGTIDSet(h.gset)
- startPos = h.gset
- }
- log.Infof("dump MySQL and parse OK, use %0.2f seconds, start binlog replication at %s",
- time.Now().Sub(start).Seconds(), startPos)
- return nil
-}
-
-func (c *Canal) tryDump() error {
- pos := c.master.Position()
- gset := c.master.GTIDSet()
- if (len(pos.Name) > 0 && pos.Pos > 0) ||
- (gset != nil && gset.String() != "") {
- // we will sync with binlog name and position
- log.Infof("skip dump, use last binlog replication pos %s or GTID set %s", pos, gset)
- return nil
- }
-
- if c.dumper == nil {
- log.Info("skip dump, no mysqldump")
- return nil
- }
-
- return c.dump()
-}
diff --git a/vendor/github.com/siddontang/go-mysql/canal/handler.go b/vendor/github.com/siddontang/go-mysql/canal/handler.go
deleted file mode 100644
index 4e47cb9..0000000
--- a/vendor/github.com/siddontang/go-mysql/canal/handler.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package canal
-
-import (
- "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/replication"
-)
-
-type EventHandler interface {
- OnRotate(roateEvent *replication.RotateEvent) error
- // OnTableChanged is called when the table is created, altered, renamed or dropped.
- // You need to clear the associated data like cache with the table.
- // It will be called before OnDDL.
- OnTableChanged(schema string, table string) error
- OnDDL(nextPos mysql.Position, queryEvent *replication.QueryEvent) error
- OnRow(e *RowsEvent) error
- OnXID(nextPos mysql.Position) error
- OnGTID(gtid mysql.GTIDSet) error
- // OnPosSynced Use your own way to sync position. When force is true, sync position immediately.
- OnPosSynced(pos mysql.Position, force bool) error
- String() string
-}
-
-type DummyEventHandler struct {
-}
-
-func (h *DummyEventHandler) OnRotate(*replication.RotateEvent) error { return nil }
-func (h *DummyEventHandler) OnTableChanged(schema string, table string) error { return nil }
-func (h *DummyEventHandler) OnDDL(nextPos mysql.Position, queryEvent *replication.QueryEvent) error {
- return nil
-}
-func (h *DummyEventHandler) OnRow(*RowsEvent) error { return nil }
-func (h *DummyEventHandler) OnXID(mysql.Position) error { return nil }
-func (h *DummyEventHandler) OnGTID(mysql.GTIDSet) error { return nil }
-func (h *DummyEventHandler) OnPosSynced(mysql.Position, bool) error { return nil }
-func (h *DummyEventHandler) String() string { return "DummyEventHandler" }
-
-// `SetEventHandler` registers the sync handler, you must register your
-// own handler before starting Canal.
-func (c *Canal) SetEventHandler(h EventHandler) {
- c.eventHandler = h
-}
diff --git a/vendor/github.com/siddontang/go-mysql/canal/master.go b/vendor/github.com/siddontang/go-mysql/canal/master.go
deleted file mode 100644
index 10a230b..0000000
--- a/vendor/github.com/siddontang/go-mysql/canal/master.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package canal
-
-import (
- "sync"
-
- "github.com/siddontang/go-log/log"
- "github.com/siddontang/go-mysql/mysql"
-)
-
-type masterInfo struct {
- sync.RWMutex
-
- pos mysql.Position
-
- gset mysql.GTIDSet
-
- timestamp uint32
-}
-
-func (m *masterInfo) Update(pos mysql.Position) {
- log.Debugf("update master position %s", pos)
-
- m.Lock()
- m.pos = pos
- m.Unlock()
-}
-
-func (m *masterInfo) UpdateTimestamp(ts uint32) {
- log.Debugf("update master timestamp %s", ts)
-
- m.Lock()
- m.timestamp = ts
- m.Unlock()
-}
-
-func (m *masterInfo) UpdateGTIDSet(gset mysql.GTIDSet) {
- log.Debugf("update master gtid set %s", gset)
-
- m.Lock()
- m.gset = gset
- m.Unlock()
-}
-
-func (m *masterInfo) Position() mysql.Position {
- m.RLock()
- defer m.RUnlock()
-
- return m.pos
-}
-
-func (m *masterInfo) Timestamp() uint32 {
- m.RLock()
- defer m.RUnlock()
-
- return m.timestamp
-}
-
-func (m *masterInfo) GTIDSet() mysql.GTIDSet {
- m.RLock()
- defer m.RUnlock()
-
- if m.gset == nil {
- return nil
- }
- return m.gset.Clone()
-}
diff --git a/vendor/github.com/siddontang/go-mysql/canal/rows.go b/vendor/github.com/siddontang/go-mysql/canal/rows.go
deleted file mode 100644
index e246ee5..0000000
--- a/vendor/github.com/siddontang/go-mysql/canal/rows.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package canal
-
-import (
- "fmt"
-
- "github.com/siddontang/go-mysql/replication"
- "github.com/siddontang/go-mysql/schema"
-)
-
-// The action name for sync.
-const (
- UpdateAction = "update"
- InsertAction = "insert"
- DeleteAction = "delete"
-)
-
-// RowsEvent is the event for row replication.
-type RowsEvent struct {
- Table *schema.Table
- Action string
- // changed row list
- // binlog has three update event version, v0, v1 and v2.
- // for v1 and v2, the rows number must be even.
- // Two rows for one event, format is [before update row, after update row]
- // for update v0, only one row for a event, and we don't support this version.
- Rows [][]interface{}
- // Header can be used to inspect the event
- Header *replication.EventHeader
-}
-
-func newRowsEvent(table *schema.Table, action string, rows [][]interface{}, header *replication.EventHeader) *RowsEvent {
- e := new(RowsEvent)
-
- e.Table = table
- e.Action = action
- e.Rows = rows
- e.Header = header
-
- e.handleUnsigned()
-
- return e
-}
-
-func (r *RowsEvent) handleUnsigned() {
- // Handle Unsigned Columns here, for binlog replication, we can't know the integer is unsigned or not,
- // so we use int type but this may cause overflow outside sometimes, so we must convert to the really .
- // unsigned type
- if len(r.Table.UnsignedColumns) == 0 {
- return
- }
-
- for i := 0; i < len(r.Rows); i++ {
- for _, index := range r.Table.UnsignedColumns {
- switch t := r.Rows[i][index].(type) {
- case int8:
- r.Rows[i][index] = uint8(t)
- case int16:
- r.Rows[i][index] = uint16(t)
- case int32:
- r.Rows[i][index] = uint32(t)
- case int64:
- r.Rows[i][index] = uint64(t)
- case int:
- r.Rows[i][index] = uint(t)
- default:
- // nothing to do
- }
- }
- }
-}
-
-// String implements fmt.Stringer interface.
-func (r *RowsEvent) String() string {
- return fmt.Sprintf("%s %s %v", r.Action, r.Table, r.Rows)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/canal/sync.go b/vendor/github.com/siddontang/go-mysql/canal/sync.go
deleted file mode 100644
index 4146a2d..0000000
--- a/vendor/github.com/siddontang/go-mysql/canal/sync.go
+++ /dev/null
@@ -1,275 +0,0 @@
-package canal
-
-import (
- "fmt"
- "regexp"
- "time"
-
- "github.com/juju/errors"
- "github.com/satori/go.uuid"
- "github.com/siddontang/go-log/log"
- "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/replication"
- "github.com/siddontang/go-mysql/schema"
-)
-
-var (
- expCreateTable = regexp.MustCompile("(?i)^CREATE\\sTABLE(\\sIF\\sNOT\\sEXISTS)?\\s`{0,1}(.*?)`{0,1}\\.{0,1}`{0,1}([^`\\.]+?)`{0,1}\\s.*")
- expAlterTable = regexp.MustCompile("(?i)^ALTER\\sTABLE\\s.*?`{0,1}(.*?)`{0,1}\\.{0,1}`{0,1}([^`\\.]+?)`{0,1}\\s.*")
- expRenameTable = regexp.MustCompile("(?i)^RENAME\\sTABLE\\s.*?`{0,1}(.*?)`{0,1}\\.{0,1}`{0,1}([^`\\.]+?)`{0,1}\\s{1,}TO\\s.*?")
- expDropTable = regexp.MustCompile("(?i)^DROP\\sTABLE(\\sIF\\sEXISTS){0,1}\\s`{0,1}(.*?)`{0,1}\\.{0,1}`{0,1}([^`\\.]+?)`{0,1}(?:$|\\s)")
- expTruncateTable = regexp.MustCompile("(?i)^TRUNCATE\\s+(?:TABLE\\s+)?(?:`?([^`\\s]+)`?\\.`?)?([^`\\s]+)`?")
-)
-
-func (c *Canal) startSyncer() (*replication.BinlogStreamer, error) {
- gset := c.master.GTIDSet()
- if gset == nil {
- pos := c.master.Position()
- s, err := c.syncer.StartSync(pos)
- if err != nil {
- return nil, errors.Errorf("start sync replication at binlog %v error %v", pos, err)
- }
- log.Infof("start sync binlog at binlog file %v", pos)
- return s, nil
- } else {
- s, err := c.syncer.StartSyncGTID(gset)
- if err != nil {
- return nil, errors.Errorf("start sync replication at GTID set %v error %v", gset, err)
- }
- log.Infof("start sync binlog at GTID set %v", gset)
- return s, nil
- }
-}
-
-func (c *Canal) runSyncBinlog() error {
- s, err := c.startSyncer()
- if err != nil {
- return err
- }
-
- savePos := false
- force := false
- for {
- ev, err := s.GetEvent(c.ctx)
-
- if err != nil {
- return errors.Trace(err)
- }
- savePos = false
- force = false
- pos := c.master.Position()
-
- curPos := pos.Pos
- //next binlog pos
- pos.Pos = ev.Header.LogPos
-
- // We only save position with RotateEvent and XIDEvent.
- // For RowsEvent, we can't save the position until meeting XIDEvent
- // which tells the whole transaction is over.
- // TODO: If we meet any DDL query, we must save too.
- switch e := ev.Event.(type) {
- case *replication.RotateEvent:
- pos.Name = string(e.NextLogName)
- pos.Pos = uint32(e.Position)
- log.Infof("rotate binlog to %s", pos)
- savePos = true
- force = true
- if err = c.eventHandler.OnRotate(e); err != nil {
- return errors.Trace(err)
- }
- case *replication.RowsEvent:
- // we only focus row based event
- err = c.handleRowsEvent(ev)
- if err != nil {
- e := errors.Cause(err)
- // if error is not ErrExcludedTable or ErrTableNotExist or ErrMissingTableMeta, stop canal
- if e != ErrExcludedTable &&
- e != schema.ErrTableNotExist &&
- e != schema.ErrMissingTableMeta {
- log.Errorf("handle rows event at (%s, %d) error %v", pos.Name, curPos, err)
- return errors.Trace(err)
- }
- }
- continue
- case *replication.XIDEvent:
- if e.GSet != nil {
- c.master.UpdateGTIDSet(e.GSet)
- }
- savePos = true
- // try to save the position later
- if err := c.eventHandler.OnXID(pos); err != nil {
- return errors.Trace(err)
- }
- case *replication.MariadbGTIDEvent:
- // try to save the GTID later
- gtid, err := mysql.ParseMariadbGTIDSet(e.GTID.String())
- if err != nil {
- return errors.Trace(err)
- }
- if err := c.eventHandler.OnGTID(gtid); err != nil {
- return errors.Trace(err)
- }
- case *replication.GTIDEvent:
- u, _ := uuid.FromBytes(e.SID)
- gtid, err := mysql.ParseMysqlGTIDSet(fmt.Sprintf("%s:%d", u.String(), e.GNO))
- if err != nil {
- return errors.Trace(err)
- }
- if err := c.eventHandler.OnGTID(gtid); err != nil {
- return errors.Trace(err)
- }
- case *replication.QueryEvent:
- if e.GSet != nil {
- c.master.UpdateGTIDSet(e.GSet)
- }
- var (
- mb [][]byte
- db []byte
- table []byte
- )
- regexps := []regexp.Regexp{*expCreateTable, *expAlterTable, *expRenameTable, *expDropTable, *expTruncateTable}
- for _, reg := range regexps {
- mb = reg.FindSubmatch(e.Query)
- if len(mb) != 0 {
- break
- }
- }
- mbLen := len(mb)
- if mbLen == 0 {
- continue
- }
-
- // the first last is table name, the second last is database name(if exists)
- if len(mb[mbLen-2]) == 0 {
- db = e.Schema
- } else {
- db = mb[mbLen-2]
- }
- table = mb[mbLen-1]
-
- savePos = true
- force = true
- c.ClearTableCache(db, table)
- log.Infof("table structure changed, clear table cache: %s.%s\n", db, table)
- if err = c.eventHandler.OnTableChanged(string(db), string(table)); err != nil && errors.Cause(err) != schema.ErrTableNotExist {
- return errors.Trace(err)
- }
-
- // Now we only handle Table Changed DDL, maybe we will support more later.
- if err = c.eventHandler.OnDDL(pos, e); err != nil {
- return errors.Trace(err)
- }
- default:
- continue
- }
-
- if savePos {
- c.master.Update(pos)
- c.master.UpdateTimestamp(ev.Header.Timestamp)
- if err := c.eventHandler.OnPosSynced(pos, force); err != nil {
- return errors.Trace(err)
- }
- }
- }
-
- return nil
-}
-
-func (c *Canal) handleRowsEvent(e *replication.BinlogEvent) error {
- ev := e.Event.(*replication.RowsEvent)
-
- // Caveat: table may be altered at runtime.
- schema := string(ev.Table.Schema)
- table := string(ev.Table.Table)
-
- t, err := c.GetTable(schema, table)
- if err != nil {
- return err
- }
- var action string
- switch e.Header.EventType {
- case replication.WRITE_ROWS_EVENTv1, replication.WRITE_ROWS_EVENTv2:
- action = InsertAction
- case replication.DELETE_ROWS_EVENTv1, replication.DELETE_ROWS_EVENTv2:
- action = DeleteAction
- case replication.UPDATE_ROWS_EVENTv1, replication.UPDATE_ROWS_EVENTv2:
- action = UpdateAction
- default:
- return errors.Errorf("%s not supported now", e.Header.EventType)
- }
- events := newRowsEvent(t, action, ev.Rows, e.Header)
- return c.eventHandler.OnRow(events)
-}
-
-func (c *Canal) FlushBinlog() error {
- _, err := c.Execute("FLUSH BINARY LOGS")
- return errors.Trace(err)
-}
-
-func (c *Canal) WaitUntilPos(pos mysql.Position, timeout time.Duration) error {
- timer := time.NewTimer(timeout)
- for {
- select {
- case <-timer.C:
- return errors.Errorf("wait position %v too long > %s", pos, timeout)
- default:
- err := c.FlushBinlog()
- if err != nil {
- return errors.Trace(err)
- }
- curPos := c.master.Position()
- if curPos.Compare(pos) >= 0 {
- return nil
- } else {
- log.Debugf("master pos is %v, wait catching %v", curPos, pos)
- time.Sleep(100 * time.Millisecond)
- }
- }
- }
-
- return nil
-}
-
-func (c *Canal) GetMasterPos() (mysql.Position, error) {
- rr, err := c.Execute("SHOW MASTER STATUS")
- if err != nil {
- return mysql.Position{}, errors.Trace(err)
- }
-
- name, _ := rr.GetString(0, 0)
- pos, _ := rr.GetInt(0, 1)
-
- return mysql.Position{Name: name, Pos: uint32(pos)}, nil
-}
-
-func (c *Canal) GetMasterGTIDSet() (mysql.GTIDSet, error) {
- query := ""
- switch c.cfg.Flavor {
- case mysql.MariaDBFlavor:
- query = "SELECT @@GLOBAL.gtid_current_pos"
- default:
- query = "SELECT @@GLOBAL.GTID_EXECUTED"
- }
- rr, err := c.Execute(query)
- if err != nil {
- return nil, errors.Trace(err)
- }
- gx, err := rr.GetString(0, 0)
- if err != nil {
- return nil, errors.Trace(err)
- }
- gset, err := mysql.ParseGTIDSet(c.cfg.Flavor, gx)
- if err != nil {
- return nil, errors.Trace(err)
- }
- return gset, nil
-}
-
-func (c *Canal) CatchMasterPos(timeout time.Duration) error {
- pos, err := c.GetMasterPos()
- if err != nil {
- return errors.Trace(err)
- }
-
- return c.WaitUntilPos(pos, timeout)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/clear_vendor.sh b/vendor/github.com/siddontang/go-mysql/clear_vendor.sh
deleted file mode 100755
index 81ba6b1..0000000
--- a/vendor/github.com/siddontang/go-mysql/clear_vendor.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-find vendor \( -type f -or -type l \) -not -name "*.go" -not -name "LICENSE" -not -name "*.s" -not -name "PATENTS" -not -name "*.h" -not -name "*.c" | xargs -I {} rm {}
-# delete all test files
-find vendor -type f -name "*_generated.go" | xargs -I {} rm {}
-find vendor -type f -name "*_test.go" | xargs -I {} rm {}
-find vendor -type d -name "_vendor" | xargs -I {} rm -rf {}
-find vendor -type d -empty | xargs -I {} rm -rf {}
\ No newline at end of file
diff --git a/vendor/github.com/siddontang/go-mysql/client/client_test.go b/vendor/github.com/siddontang/go-mysql/client/client_test.go
deleted file mode 100644
index 04bfdb2..0000000
--- a/vendor/github.com/siddontang/go-mysql/client/client_test.go
+++ /dev/null
@@ -1,397 +0,0 @@
-package client
-
-import (
- "flag"
- "fmt"
- "strings"
- "testing"
-
- "github.com/juju/errors"
- . "github.com/pingcap/check"
- "github.com/siddontang/go-mysql/test_util/test_keys"
-
- "github.com/siddontang/go-mysql/mysql"
-)
-
-var testHost = flag.String("host", "127.0.0.1", "MySQL server host")
-// We cover the whole range of MySQL server versions using docker-compose to bind them to different ports for testing.
-// MySQL is constantly updating auth plugin to make it secure:
-// starting from MySQL 8.0.4, a new auth plugin is introduced, causing plain password auth to fail with error:
-// ERROR 1251 (08004): Client does not support authentication protocol requested by server; consider upgrading MySQL client
-// Hint: use docker-compose to start corresponding MySQL docker containers and add the their ports here
-var testPort = flag.String("port", "3306", "MySQL server port") // choose one or more form 5561,5641,3306,5722,8003,8012,8013, e.g. '3306,5722,8003'
-var testUser = flag.String("user", "root", "MySQL user")
-var testPassword = flag.String("pass", "", "MySQL password")
-var testDB = flag.String("db", "test", "MySQL test database")
-
-func Test(t *testing.T) {
- segs := strings.Split(*testPort, ",")
- for _, seg := range segs {
- Suite(&clientTestSuite{port: seg})
- }
- TestingT(t)
-}
-
-type clientTestSuite struct {
- c *Conn
- port string
-}
-
-func (s *clientTestSuite) SetUpSuite(c *C) {
- var err error
- addr := fmt.Sprintf("%s:%s", *testHost, s.port)
- s.c, err = Connect(addr, *testUser, *testPassword, "")
- if err != nil {
- c.Fatal(err)
- }
-
- _, err = s.c.Execute("CREATE DATABASE IF NOT EXISTS " + *testDB)
- c.Assert(err, IsNil)
-
- _, err = s.c.Execute("USE " + *testDB)
- c.Assert(err, IsNil)
-
- s.testConn_CreateTable(c)
- s.testStmt_CreateTable(c)
-}
-
-func (s *clientTestSuite) TearDownSuite(c *C) {
- if s.c == nil {
- return
- }
-
- s.testConn_DropTable(c)
- s.testStmt_DropTable(c)
-
- if s.c != nil {
- s.c.Close()
- }
-}
-
-func (s *clientTestSuite) testConn_DropTable(c *C) {
- _, err := s.c.Execute("drop table if exists mixer_test_conn")
- c.Assert(err, IsNil)
-}
-
-func (s *clientTestSuite) testConn_CreateTable(c *C) {
- str := `CREATE TABLE IF NOT EXISTS mixer_test_conn (
- id BIGINT(64) UNSIGNED NOT NULL,
- str VARCHAR(256),
- f DOUBLE,
- e enum("test1", "test2"),
- u tinyint unsigned,
- i tinyint,
- PRIMARY KEY (id)
- ) ENGINE=InnoDB DEFAULT CHARSET=utf8`
-
- _, err := s.c.Execute(str)
- c.Assert(err, IsNil)
-}
-
-func (s *clientTestSuite) TestConn_Ping(c *C) {
- err := s.c.Ping()
- c.Assert(err, IsNil)
-}
-
-// NOTE for MySQL 5.5 and 5.6, server side has to config SSL to pass the TLS test, otherwise, it will throw error that
-// MySQL server does not support TLS required by the client. However, for MySQL 5.7 and above, auto generated certificates
-// are used by default so that manual config is no longer necessary.
-func (s *clientTestSuite) TestConn_TLS_Verify(c *C) {
- // Verify that the provided tls.Config is used when attempting to connect to mysql.
- // An empty tls.Config will result in a connection error.
- addr := fmt.Sprintf("%s:%s", *testHost, s.port)
- _, err := Connect(addr, *testUser, *testPassword, *testDB, func(c *Conn) {
- c.UseSSL(false)
- })
- if err == nil {
- c.Fatal("expected error")
- }
-
- expected := "either ServerName or InsecureSkipVerify must be specified in the tls.Config"
- if !strings.Contains(err.Error(), expected) {
- c.Fatalf("expected '%s' to contain '%s'", err.Error(), expected)
- }
-}
-
-func (s *clientTestSuite) TestConn_TLS_Skip_Verify(c *C) {
- // An empty tls.Config will result in a connection error but we can configure to skip it.
- addr := fmt.Sprintf("%s:%s", *testHost, s.port)
- _, err := Connect(addr, *testUser, *testPassword, *testDB, func(c *Conn) {
- c.UseSSL(true)
- })
- c.Assert(err, Equals, nil)
-}
-
-func (s *clientTestSuite) TestConn_TLS_Certificate(c *C) {
- // This test uses the TLS suite in 'go-mysql/docker/resources'. The certificates are not valid for any names.
- // And if server uses auto-generated certificates, it will be an error like:
- // "x509: certificate is valid for MySQL_Server_8.0.12_Auto_Generated_Server_Certificate, not not-a-valid-name"
- tlsConfig := NewClientTLSConfig(test_keys.CaPem, test_keys.CertPem, test_keys.KeyPem, false, "not-a-valid-name")
- addr := fmt.Sprintf("%s:%s", *testHost, s.port)
- _, err := Connect(addr, *testUser, *testPassword, *testDB, func(c *Conn) {
- c.SetTLSConfig(tlsConfig)
- })
- if err == nil {
- c.Fatal("expected error")
- }
- if !strings.Contains(errors.Details(err), "certificate is not valid for any names") &&
- !strings.Contains(errors.Details(err), "certificate is valid for") {
- c.Fatalf("expected errors for server name verification, but got unknown error: %s", errors.Details(err))
- }
-}
-
-func (s *clientTestSuite) TestConn_Insert(c *C) {
- str := `insert into mixer_test_conn (id, str, f, e) values(1, "a", 3.14, "test1")`
-
- pkg, err := s.c.Execute(str)
- c.Assert(err, IsNil)
- c.Assert(pkg.AffectedRows, Equals, uint64(1))
-}
-
-func (s *clientTestSuite) TestConn_Select(c *C) {
- str := `select str, f, e from mixer_test_conn where id = 1`
-
- result, err := s.c.Execute(str)
- c.Assert(err, IsNil)
- c.Assert(result.Fields, HasLen, 3)
- c.Assert(result.Values, HasLen, 1)
-
- ss, _ := result.GetString(0, 0)
- c.Assert(ss, Equals, "a")
-
- f, _ := result.GetFloat(0, 1)
- c.Assert(f, Equals, float64(3.14))
-
- e, _ := result.GetString(0, 2)
- c.Assert(e, Equals, "test1")
-
- ss, _ = result.GetStringByName(0, "str")
- c.Assert(ss, Equals, "a")
-
- f, _ = result.GetFloatByName(0, "f")
- c.Assert(f, Equals, float64(3.14))
-
- e, _ = result.GetStringByName(0, "e")
- c.Assert(e, Equals, "test1")
-}
-
-func (s *clientTestSuite) TestConn_Escape(c *C) {
- e := `""''\abc`
- str := fmt.Sprintf(`insert into mixer_test_conn (id, str) values(5, "%s")`,
- mysql.Escape(e))
-
- _, err := s.c.Execute(str)
- c.Assert(err, IsNil)
-
- str = `select str from mixer_test_conn where id = ?`
-
- r, err := s.c.Execute(str, 5)
- c.Assert(err, IsNil)
-
- ss, _ := r.GetString(0, 0)
- c.Assert(ss, Equals, e)
-}
-
-func (s *clientTestSuite) TestConn_SetCharset(c *C) {
- err := s.c.SetCharset("gb2312")
- c.Assert(err, IsNil)
-
- err = s.c.SetCharset("utf8")
- c.Assert(err, IsNil)
-}
-
-func (s *clientTestSuite) testStmt_DropTable(c *C) {
- str := `drop table if exists mixer_test_stmt`
-
- stmt, err := s.c.Prepare(str)
- c.Assert(err, IsNil)
-
- defer stmt.Close()
-
- _, err = stmt.Execute()
- c.Assert(err, IsNil)
-}
-
-func (s *clientTestSuite) testStmt_CreateTable(c *C) {
- str := `CREATE TABLE IF NOT EXISTS mixer_test_stmt (
- id BIGINT(64) UNSIGNED NOT NULL,
- str VARCHAR(256),
- f DOUBLE,
- e enum("test1", "test2"),
- u tinyint unsigned,
- i tinyint,
- PRIMARY KEY (id)
- ) ENGINE=InnoDB DEFAULT CHARSET=utf8`
-
- stmt, err := s.c.Prepare(str)
- c.Assert(err, IsNil)
-
- defer stmt.Close()
-
- _, err = stmt.Execute()
- c.Assert(err, IsNil)
-}
-
-func (s *clientTestSuite) TestStmt_Delete(c *C) {
- str := `delete from mixer_test_stmt`
-
- stmt, err := s.c.Prepare(str)
- c.Assert(err, IsNil)
-
- defer stmt.Close()
-
- _, err = stmt.Execute()
- c.Assert(err, IsNil)
-}
-
-func (s *clientTestSuite) TestStmt_Insert(c *C) {
- str := `insert into mixer_test_stmt (id, str, f, e, u, i) values (?, ?, ?, ?, ?, ?)`
-
- stmt, err := s.c.Prepare(str)
- c.Assert(err, IsNil)
-
- defer stmt.Close()
-
- r, err := stmt.Execute(1, "a", 3.14, "test1", 255, -127)
- c.Assert(err, IsNil)
-
- c.Assert(r.AffectedRows, Equals, uint64(1))
-}
-
-func (s *clientTestSuite) TestStmt_Select(c *C) {
- str := `select str, f, e from mixer_test_stmt where id = ?`
-
- stmt, err := s.c.Prepare(str)
- c.Assert(err, IsNil)
-
- defer stmt.Close()
-
- result, err := stmt.Execute(1)
- c.Assert(err, IsNil)
- c.Assert(result.Values, HasLen, 1)
- c.Assert(result.Fields, HasLen, 3)
-
- ss, _ := result.GetString(0, 0)
- c.Assert(ss, Equals, "a")
-
- f, _ := result.GetFloat(0, 1)
- c.Assert(f, Equals, float64(3.14))
-
- e, _ := result.GetString(0, 2)
- c.Assert(e, Equals, "test1")
-
- ss, _ = result.GetStringByName(0, "str")
- c.Assert(ss, Equals, "a")
-
- f, _ = result.GetFloatByName(0, "f")
- c.Assert(f, Equals, float64(3.14))
-
- e, _ = result.GetStringByName(0, "e")
- c.Assert(e, Equals, "test1")
-
-}
-
-func (s *clientTestSuite) TestStmt_NULL(c *C) {
- str := `insert into mixer_test_stmt (id, str, f, e) values (?, ?, ?, ?)`
-
- stmt, err := s.c.Prepare(str)
- c.Assert(err, IsNil)
-
- defer stmt.Close()
-
- result, err := stmt.Execute(2, nil, 3.14, nil)
- c.Assert(err, IsNil)
-
- c.Assert(result.AffectedRows, Equals, uint64(1))
-
- stmt.Close()
-
- str = `select * from mixer_test_stmt where id = ?`
- stmt, err = s.c.Prepare(str)
- defer stmt.Close()
-
- c.Assert(err, IsNil)
-
- result, err = stmt.Execute(2)
- b, err := result.IsNullByName(0, "id")
- c.Assert(err, IsNil)
- c.Assert(b, Equals, false)
-
- b, err = result.IsNullByName(0, "str")
- c.Assert(err, IsNil)
- c.Assert(b, Equals, true)
-
- b, err = result.IsNullByName(0, "f")
- c.Assert(err, IsNil)
- c.Assert(b, Equals, false)
-
- b, err = result.IsNullByName(0, "e")
- c.Assert(err, IsNil)
- c.Assert(b, Equals, true)
-}
-
-func (s *clientTestSuite) TestStmt_Unsigned(c *C) {
- str := `insert into mixer_test_stmt (id, u) values (?, ?)`
-
- stmt, err := s.c.Prepare(str)
- c.Assert(err, IsNil)
- defer stmt.Close()
-
- result, err := stmt.Execute(3, uint8(255))
- c.Assert(err, IsNil)
- c.Assert(result.AffectedRows, Equals, uint64(1))
-
- str = `select u from mixer_test_stmt where id = ?`
-
- stmt, err = s.c.Prepare(str)
- c.Assert(err, IsNil)
- defer stmt.Close()
-
- result, err = stmt.Execute(3)
- c.Assert(err, IsNil)
-
- u, err := result.GetUint(0, 0)
- c.Assert(err, IsNil)
- c.Assert(u, Equals, uint64(255))
-}
-
-func (s *clientTestSuite) TestStmt_Signed(c *C) {
- str := `insert into mixer_test_stmt (id, i) values (?, ?)`
-
- stmt, err := s.c.Prepare(str)
- c.Assert(err, IsNil)
- defer stmt.Close()
-
- _, err = stmt.Execute(4, 127)
- c.Assert(err, IsNil)
-
- _, err = stmt.Execute(uint64(18446744073709551516), int8(-128))
- c.Assert(err, IsNil)
-}
-
-func (s *clientTestSuite) TestStmt_Trans(c *C) {
- _, err := s.c.Execute(`insert into mixer_test_stmt (id, str) values (1002, "abc")`)
- c.Assert(err, IsNil)
-
- err = s.c.Begin()
- c.Assert(err, IsNil)
-
- str := `select str from mixer_test_stmt where id = ?`
-
- stmt, err := s.c.Prepare(str)
- c.Assert(err, IsNil)
-
- defer stmt.Close()
-
- _, err = stmt.Execute(1002)
- c.Assert(err, IsNil)
-
- err = s.c.Commit()
- c.Assert(err, IsNil)
-
- r, err := stmt.Execute(1002)
- c.Assert(err, IsNil)
-
- str, _ = r.GetString(0, 0)
- c.Assert(str, Equals, `abc`)
-}
\ No newline at end of file
diff --git a/vendor/github.com/siddontang/go-mysql/cmd/go-binlogparser/main.go b/vendor/github.com/siddontang/go-mysql/cmd/go-binlogparser/main.go
deleted file mode 100644
index fa5b5cf..0000000
--- a/vendor/github.com/siddontang/go-mysql/cmd/go-binlogparser/main.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package main
-
-import (
- "flag"
- "os"
-
- "github.com/siddontang/go-mysql/replication"
-)
-
-var name = flag.String("name", "", "binlog file name")
-var offset = flag.Int64("offset", 0, "parse start offset")
-
-func main() {
- flag.Parse()
-
- p := replication.NewBinlogParser()
-
- f := func(e *replication.BinlogEvent) error {
- e.Dump(os.Stdout)
- return nil
- }
-
- err := p.ParseFile(*name, *offset, f)
-
- if err != nil {
- println(err.Error())
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/cmd/go-canal/main.go b/vendor/github.com/siddontang/go-mysql/cmd/go-canal/main.go
deleted file mode 100644
index 6f60a4a..0000000
--- a/vendor/github.com/siddontang/go-mysql/cmd/go-canal/main.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
- "os"
- "os/signal"
- "strings"
- "syscall"
- "time"
-
- "github.com/siddontang/go-mysql/canal"
- "github.com/siddontang/go-mysql/mysql"
-)
-
-var host = flag.String("host", "127.0.0.1", "MySQL host")
-var port = flag.Int("port", 3306, "MySQL port")
-var user = flag.String("user", "root", "MySQL user, must have replication privilege")
-var password = flag.String("password", "", "MySQL password")
-
-var flavor = flag.String("flavor", "mysql", "Flavor: mysql or mariadb")
-
-var serverID = flag.Int("server-id", 101, "Unique Server ID")
-var mysqldump = flag.String("mysqldump", "mysqldump", "mysqldump execution path")
-
-var dbs = flag.String("dbs", "test", "dump databases, seperated by comma")
-var tables = flag.String("tables", "", "dump tables, seperated by comma, will overwrite dbs")
-var tableDB = flag.String("table_db", "test", "database for dump tables")
-var ignoreTables = flag.String("ignore_tables", "", "ignore tables, must be database.table format, separated by comma")
-
-var startName = flag.String("bin_name", "", "start sync from binlog name")
-var startPos = flag.Uint("bin_pos", 0, "start sync from binlog position of")
-
-var heartbeatPeriod = flag.Duration("heartbeat", 60*time.Second, "master heartbeat period")
-var readTimeout = flag.Duration("read_timeout", 90*time.Second, "connection read timeout")
-
-func main() {
- flag.Parse()
-
- cfg := canal.NewDefaultConfig()
- cfg.Addr = fmt.Sprintf("%s:%d", *host, *port)
- cfg.User = *user
- cfg.Password = *password
- cfg.Flavor = *flavor
- cfg.UseDecimal = true
-
- cfg.ReadTimeout = *readTimeout
- cfg.HeartbeatPeriod = *heartbeatPeriod
- cfg.ServerID = uint32(*serverID)
- cfg.Dump.ExecutionPath = *mysqldump
- cfg.Dump.DiscardErr = false
-
- c, err := canal.NewCanal(cfg)
- if err != nil {
- fmt.Printf("create canal err %v", err)
- os.Exit(1)
- }
-
- if len(*ignoreTables) == 0 {
- subs := strings.Split(*ignoreTables, ",")
- for _, sub := range subs {
- if seps := strings.Split(sub, "."); len(seps) == 2 {
- c.AddDumpIgnoreTables(seps[0], seps[1])
- }
- }
- }
-
- if len(*tables) > 0 && len(*tableDB) > 0 {
- subs := strings.Split(*tables, ",")
- c.AddDumpTables(*tableDB, subs...)
- } else if len(*dbs) > 0 {
- subs := strings.Split(*dbs, ",")
- c.AddDumpDatabases(subs...)
- }
-
- c.SetEventHandler(&handler{})
-
- startPos := mysql.Position{
- Name: *startName,
- Pos: uint32(*startPos),
- }
-
- go func() {
- err = c.RunFrom(startPos)
- if err != nil {
- fmt.Printf("start canal err %v", err)
- }
- }()
-
- sc := make(chan os.Signal, 1)
- signal.Notify(sc,
- os.Kill,
- os.Interrupt,
- syscall.SIGHUP,
- syscall.SIGINT,
- syscall.SIGTERM,
- syscall.SIGQUIT)
-
- <-sc
-
- c.Close()
-}
-
-type handler struct {
- canal.DummyEventHandler
-}
-
-func (h *handler) OnRow(e *canal.RowsEvent) error {
- fmt.Printf("%v\n", e)
-
- return nil
-}
-
-func (h *handler) String() string {
- return "TestHandler"
-}
diff --git a/vendor/github.com/siddontang/go-mysql/cmd/go-mysqlbinlog/main.go b/vendor/github.com/siddontang/go-mysql/cmd/go-mysqlbinlog/main.go
deleted file mode 100644
index 2c19c87..0000000
--- a/vendor/github.com/siddontang/go-mysql/cmd/go-mysqlbinlog/main.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// go-mysqlbinlog: a simple binlog tool to sync remote MySQL binlog.
-// go-mysqlbinlog supports semi-sync mode like facebook mysqlbinlog.
-// see http://yoshinorimatsunobu.blogspot.com/2014/04/semi-synchronous-replication-at-facebook.html
-package main
-
-import (
- "context"
- "flag"
- "fmt"
- "os"
-
- "github.com/juju/errors"
- "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/replication"
-)
-
-var host = flag.String("host", "127.0.0.1", "MySQL host")
-var port = flag.Int("port", 3306, "MySQL port")
-var user = flag.String("user", "root", "MySQL user, must have replication privilege")
-var password = flag.String("password", "", "MySQL password")
-
-var flavor = flag.String("flavor", "mysql", "Flavor: mysql or mariadb")
-
-var file = flag.String("file", "", "Binlog filename")
-var pos = flag.Int("pos", 4, "Binlog position")
-
-var semiSync = flag.Bool("semisync", false, "Support semi sync")
-var backupPath = flag.String("backup_path", "", "backup path to store binlog files")
-
-var rawMode = flag.Bool("raw", false, "Use raw mode")
-
-func main() {
- flag.Parse()
-
- cfg := replication.BinlogSyncerConfig{
- ServerID: 101,
- Flavor: *flavor,
-
- Host: *host,
- Port: uint16(*port),
- User: *user,
- Password: *password,
- RawModeEnabled: *rawMode,
- SemiSyncEnabled: *semiSync,
- UseDecimal: true,
- }
-
- b := replication.NewBinlogSyncer(cfg)
-
- pos := mysql.Position{Name: *file, Pos: uint32(*pos)}
- if len(*backupPath) > 0 {
- // Backup will always use RawMode.
- err := b.StartBackup(*backupPath, pos, 0)
- if err != nil {
- fmt.Printf("Start backup error: %v\n", errors.ErrorStack(err))
- return
- }
- } else {
- s, err := b.StartSync(pos)
- if err != nil {
- fmt.Printf("Start sync error: %v\n", errors.ErrorStack(err))
- return
- }
-
- for {
- e, err := s.GetEvent(context.Background())
- if err != nil {
- // Try to output all left events
- events := s.DumpEvents()
- for _, e := range events {
- e.Dump(os.Stdout)
- }
- fmt.Printf("Get event error: %v\n", errors.ErrorStack(err))
- return
- }
-
- e.Dump(os.Stdout)
- }
- }
-
-}
diff --git a/vendor/github.com/siddontang/go-mysql/cmd/go-mysqldump/main.go b/vendor/github.com/siddontang/go-mysql/cmd/go-mysqldump/main.go
deleted file mode 100644
index 80a145a..0000000
--- a/vendor/github.com/siddontang/go-mysql/cmd/go-mysqldump/main.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
- "os"
- "strings"
-
- "github.com/juju/errors"
- "github.com/siddontang/go-mysql/dump"
-)
-
-var addr = flag.String("addr", "127.0.0.1:3306", "MySQL addr")
-var user = flag.String("user", "root", "MySQL user")
-var password = flag.String("password", "", "MySQL password")
-var execution = flag.String("exec", "mysqldump", "mysqldump execution path")
-var output = flag.String("o", "", "dump output, empty for stdout")
-
-var dbs = flag.String("dbs", "", "dump databases, seperated by comma")
-var tables = flag.String("tables", "", "dump tables, seperated by comma, will overwrite dbs")
-var tableDB = flag.String("table_db", "", "database for dump tables")
-var ignoreTables = flag.String("ignore_tables", "", "ignore tables, must be database.table format, separated by comma")
-
-func main() {
- flag.Parse()
-
- d, err := dump.NewDumper(*execution, *addr, *user, *password)
- if err != nil {
- fmt.Printf("Create Dumper error %v\n", errors.ErrorStack(err))
- os.Exit(1)
- }
-
- if len(*ignoreTables) == 0 {
- subs := strings.Split(*ignoreTables, ",")
- for _, sub := range subs {
- if seps := strings.Split(sub, "."); len(seps) == 2 {
- d.AddIgnoreTables(seps[0], seps[1])
- }
- }
- }
-
- if len(*tables) > 0 && len(*tableDB) > 0 {
- subs := strings.Split(*tables, ",")
- d.AddTables(*tableDB, subs...)
- } else if len(*dbs) > 0 {
- subs := strings.Split(*dbs, ",")
- d.AddDatabases(subs...)
- }
-
- var f = os.Stdout
-
- if len(*output) > 0 {
- f, err = os.OpenFile(*output, os.O_CREATE|os.O_WRONLY, 0644)
- if err != nil {
- fmt.Printf("Open file error %v\n", errors.ErrorStack(err))
- os.Exit(1)
- }
- }
-
- defer f.Close()
-
- if err = d.Dump(f); err != nil {
- fmt.Printf("Dump MySQL error %v\n", errors.ErrorStack(err))
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/docker/Makefile b/vendor/github.com/siddontang/go-mysql/docker/Makefile
deleted file mode 100644
index afce000..0000000
--- a/vendor/github.com/siddontang/go-mysql/docker/Makefile
+++ /dev/null
@@ -1,53 +0,0 @@
-MYSQL_IMAGE=siddontang/mysql:latest
-MARIADB_IMAGE=siddontang/mariadb:latest
-
-all: run_mysql1 run_mysql2 run_mysql3 run_mariadb1 run_mariadb2 run_mariadb3
-
-run_mysql1:
- @docker run -d -p 3306:3306 --name=mysql1 -e "GTID_MODE=on" -e "SERVER_ID=1" ${MYSQL_IMAGE}
-
-run_mysql2:
- @docker run -d -p 3307:3306 --name=mysql2 -e "GTID_MODE=on" -e "SERVER_ID=2" ${MYSQL_IMAGE}
-
-run_mysql3:
- @docker run -d -p 3308:3306 --name=mysql3 -e "GTID_MODE=on" -e "SERVER_ID=3" ${MYSQL_IMAGE}
-
-run_mariadb1:
- @docker run -d -p 3316:3306 --name=mariadb1 -e "SERVER_ID=4" ${MARIADB_IMAGE}
-
-run_mariadb2:
- @docker run -d -p 3317:3306 --name=mariadb2 -e "SERVER_ID=5" ${MARIADB_IMAGE}
-
-run_mariadb3:
- @docker run -d -p 3318:3306 --name=mariadb3 -e "SERVER_ID=6" ${MARIADB_IMAGE}
-
-image_mysql:
- @docker pull ${MYSQL_IMAGE}
-
-image_maridb:
- @docker pull ${MARIADB_IMAGE}
-
-image: image_mysql image_maridb
-
-stop_mysql1:
- @docker stop mysql1
-
-stop_mysql2:
- @docker stop mysql2
-
-stop_mysql3:
- @docker stop mysql3
-
-stop_mariadb1:
- @docker stop mariadb1
-
-stop_mariadb2:
- @docker stop mariadb2
-
-stop_mariadb3:
- @docker stop mariadb3
-
-stop: stop_mysql1 stop_mysql2 stop_mysql3 stop_mariadb1 stop_mariadb2 stop_mariadb3
-
-clean:
- @docker rm -f mysql1 mysql2 mysql3 mariadb1 mariadb2 mariadb3
diff --git a/vendor/github.com/siddontang/go-mysql/docker/docker-compose.yaml b/vendor/github.com/siddontang/go-mysql/docker/docker-compose.yaml
deleted file mode 100644
index 151786e..0000000
--- a/vendor/github.com/siddontang/go-mysql/docker/docker-compose.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-version: '3'
-services:
-
- mysql-5.5.61:
- image: "mysql:5.5.61"
- container_name: "mysql-server-5.5.61"
- ports:
- - "5561:3306"
- command: --ssl=TRUE --ssl-ca=/usr/local/mysql/ca.pem --ssl-cert=/usr/local/mysql/server-cert.pem --ssl-key=/usr/local/mysql/server-key.pem
- volumes:
- - ./resources/ca.pem:/usr/local/mysql/ca.pem
- - ./resources/server-cert.pem:/usr/local/mysql/server-cert.pem
- - ./resources/server-key.pem:/usr/local/mysql/server-key.pem
- environment:
- - MYSQL_ALLOW_EMPTY_PASSWORD=true
- - bind-address=0.0.0.0
-
- mysql-5.6.41:
- image: "mysql:5.6.41"
- container_name: "mysql-server-5.6.41"
- ports:
- - "5641:3306"
- command: --ssl=TRUE --ssl-ca=/usr/local/mysql/ca.pem --ssl-cert=/usr/local/mysql/server-cert.pem --ssl-key=/usr/local/mysql/server-key.pem
- volumes:
- - ./resources/ca.pem:/usr/local/mysql/ca.pem
- - ./resources/server-cert.pem:/usr/local/mysql/server-cert.pem
- - ./resources/server-key.pem:/usr/local/mysql/server-key.pem
- environment:
- - MYSQL_ALLOW_EMPTY_PASSWORD=true
- - bind-address=0.0.0.0
-
- mysql-default:
- image: "mysql:5.7.22"
- container_name: "mysql-server-default"
- ports:
- - "3306:3306"
- command: ["mysqld", "--log-bin=mysql-bin", "--server-id=1"]
- environment:
- - MYSQL_ALLOW_EMPTY_PASSWORD=true
- - bind-address=0.0.0.0
-
- mysql-5.7.22:
- image: "mysql:5.7.22"
- container_name: "mysql-server-5.7.22"
- ports:
- - "5722:3306"
- environment:
- - MYSQL_ALLOW_EMPTY_PASSWORD=true
- - bind-address=0.0.0.0
-
- mysql-8.0.3:
- image: "mysql:8.0.3"
- container_name: "mysql-server-8.0.3"
- ports:
- - "8003:3306"
- environment:
- - MYSQL_ALLOW_EMPTY_PASSWORD=true
- - bind-address=0.0.0.0
-
- mysql-8.0.12:
- image: "mysql:8.0.12"
- container_name: "mysql-server-8.0.12"
- ports:
- - "8012:3306"
- environment:
- #- MYSQL_ROOT_PASSWORD=abc123
- - MYSQL_ALLOW_EMPTY_PASSWORD=true
- - bind-address=0.0.0.0
-
- mysql-8.0.12-sha256:
- image: "mysql:8.0.12"
- container_name: "mysql-server-8.0.12-sha256"
- ports:
- - "8013:3306"
- entrypoint: ['/entrypoint.sh', '--default-authentication-plugin=sha256_password']
- environment:
- #- MYSQL_ROOT_PASSWORD=abc123
- - MYSQL_ALLOW_EMPTY_PASSWORD=true
- - bind-address=0.0.0.0
-
diff --git a/vendor/github.com/siddontang/go-mysql/docker/resources/ca.key b/vendor/github.com/siddontang/go-mysql/docker/resources/ca.key
deleted file mode 100644
index 8344ed2..0000000
--- a/vendor/github.com/siddontang/go-mysql/docker/resources/ca.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAsV6xlhFxMn14Pn7XBRGLt8/HXmhVVu20IKFgIOyX7gAZr0QL
-suT1fGf5zH9HrlgOMkfdhV847U03KPfUnBsi9lS6/xOxnH/OzTYM0WW0eNMGF7eo
-xrS64GSbPVX4pLi5+uwrrZT5HmDgZi49ANmuX6UYmH/eRRvSIoYUTV6t0aYsLyKv
-lpEAtRAe4AlKB236j5ggmJ36QUhTFTbeNbeOOgloTEdPK8Y/kgpnhiqzMdPqqIc7
-IeXUc456yX8MJUgniTM2qCNTFdEw+C2Ok0RbU6TI2SuEgVF4jtCcVEKxZ8kYbioO
-NaePQKFR/EhdXO+/ag1IEdXElH9knLOfB+zCgwIDAQABAoIBAC2U0jponRiGmgIl
-gohw6+D+6pNeaKAAUkwYbKXJZ3noWLFr4T3GDTg9WDqvcvJg+rT9NvZxdCW3tDc5
-CVBcwO1g9PVcUEaRqcme3EhrxKdQQ76QmjUGeQf1ktd+YnmiZ1kOnGLtZ9/gsYpQ
-06iGSIOX3+xA4BQOhEAPCOShMjYv+pWvWGhZCSmeulKulNVPBbG2H1I9EoT5Wd+Q
-8LUfgZOuUXrtcsuvEf2XeacCo0pUbjx8ErhDHP6aPasFAXq15Bm8DnsUOrrsjcLy
-sPy/mHwpd6kTw+O3EzjTdaYSFRoDSpfpIS5Bk+yicdxOmTwp1pzDu6HyYnuOnc9Q
-JQ8HvlECgYEA2z+1HKVz5k7NYyRihW4l30vAcAGcgG1RObB6DmLbGu4MPvMymgLO
-1QhYjlCcKfRHhVS2864op3Oba2fIgCc2am0DIQQ6kZ23ick78aj9G2ZXYpdpIPLu
-Kl1AZHj6XDrOPVqidwcE6iYHLLWp9x4Atgw5d44XmhQ0kwrqAfccOX8CgYEAzxnl
-7Uu+v5WI3hBVJpxS6eoS1TdztVIJaumyE43pBoHEuJrp4MRf0Lu2DiDpH8R3/RoE
-o+ykn6xzphYwUopYaCWzYTKoXvxCvmLkDjHcpdzLtwWbKG+MJih2nTADEDI7sK4e
-a3IU8miK6FeqkQHfs/5dlQa8q31yxiukw0qQEP0CgYAtLg6jTZD5l6mJUZkfx9f0
-EMciDaLzcBN54Nz2E/b0sLNDUZhO1l9K1QJyqTfVCWqnlhJxWqU0BIW1d1iA2BPF
-kJtBdX6gPTDyKs64eMtXlxpQzcSzLnxXrIm1apyk3tVbHU83WfHwUk/OLc1NiBg7
-a394HIbOkHVZC7m3F/Xv/wKBgQDHrM2du8D+kJs0l4SxxFjAxPlBb8R01tLTrNwP
-tGwu5OEZp+rE1jEXXFRMTPjXsyKI+hPtRJT4ilm6kXwnqNFSIL9RgHkLk6Z6T3hY
-I0T8+ePD43jURLBYffzW0tqxO+2HDGmx6H0/twHuv89pHehkb2Qk8ijoIvyNCrlB
-vVsntQKBgCK04nbb+G45D6TKCcZ6XKT/+qneJQE5cfvHl5EqrfjSmlnEUpJjJfyc
-6Q1PtXtWOtOScU93f1JKL7+JBbWDn9uBlboM8BSkAVVd/2vyg88RuEtIru1syxcW
-d1rMxqaMRJuhuqaS33CoPUpn30b4zVrPhQJ2+TwDAol4qIGHaie8
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/siddontang/go-mysql/docker/resources/ca.pem b/vendor/github.com/siddontang/go-mysql/docker/resources/ca.pem
deleted file mode 100644
index e251bd6..0000000
--- a/vendor/github.com/siddontang/go-mysql/docker/resources/ca.pem
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDtTCCAp2gAwIBAgIJANeS1FOzWXlZMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
-BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
-aWRnaXRzIFB0eSBMdGQwHhcNMTgwODE2MTUxNDE5WhcNMjEwNjA1MTUxNDE5WjBF
-MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50
-ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
-CgKCAQEAsV6xlhFxMn14Pn7XBRGLt8/HXmhVVu20IKFgIOyX7gAZr0QLsuT1fGf5
-zH9HrlgOMkfdhV847U03KPfUnBsi9lS6/xOxnH/OzTYM0WW0eNMGF7eoxrS64GSb
-PVX4pLi5+uwrrZT5HmDgZi49ANmuX6UYmH/eRRvSIoYUTV6t0aYsLyKvlpEAtRAe
-4AlKB236j5ggmJ36QUhTFTbeNbeOOgloTEdPK8Y/kgpnhiqzMdPqqIc7IeXUc456
-yX8MJUgniTM2qCNTFdEw+C2Ok0RbU6TI2SuEgVF4jtCcVEKxZ8kYbioONaePQKFR
-/EhdXO+/ag1IEdXElH9knLOfB+zCgwIDAQABo4GnMIGkMB0GA1UdDgQWBBQgHiwD
-00upIbCOunlK4HRw89DhjjB1BgNVHSMEbjBsgBQgHiwD00upIbCOunlK4HRw89Dh
-jqFJpEcwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNV
-BAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZIIJANeS1FOzWXlZMAwGA1UdEwQF
-MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAFMZFQTFKU5tWIpWh8BbVZeVZcng0Kiq
-qwbhVwaTkqtfmbqw8/w+faOWylmLncQEMmgvnUltGMQlQKBwQM2byzPkz9phal3g
-uI0JWJYqtcMyIQUB9QbbhrDNC9kdt/ji/x6rrIqzaMRuiBXqH5LQ9h856yXzArqd
-cAQGzzYpbUCIv7ciSB93cKkU73fQLZVy5ZBy1+oAa1V9U4cb4G/20/PDmT+G3Gxz
-pEjeDKtz8XINoWgA2cSdfAhNZt5vqJaCIZ8qN0z6C7SUKwUBderERUMLUXdhUldC
-KTVHyEPvd0aULd5S5vEpKCnHcQmFcLdoN8t9k9pR9ZgwqXbyJHlxWFo=
------END CERTIFICATE-----
diff --git a/vendor/github.com/siddontang/go-mysql/docker/resources/client-cert.pem b/vendor/github.com/siddontang/go-mysql/docker/resources/client-cert.pem
deleted file mode 100644
index e478e78..0000000
--- a/vendor/github.com/siddontang/go-mysql/docker/resources/client-cert.pem
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDBjCCAe4CCQDg06wCf7hcuTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE4MDgxOTA4NDY0N1oXDTI4MDgxNjA4NDY0N1owRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-AMmivNyk3Rc1ZvLPhb3WPNkf9f2G4g9nMc0+eMrR1IKJ1U1A98ojeIBT+pfk1bSq
-Ol0UDm66Vd3YQ+4HpyYHaYV6mwoTEulL9Quk8RLa7TRwQu3PLi3o567RhVIrx8Z3
-umuWb9UUzJfSFH04Uy9+By4CJCqIQXU4BocLIKHhIkNjmAQ9fWO1hZ8zmPHSEfvu
-Wqa/DYKGvF0MJr4Lnkm/sKUd+O94p9suvwM6OGIDibACiKRF2H+JbgQLbA58zkLv
-DHtXOqsCL7HxiONX8VDrQjN/66Nh9omk/Bx2Ec8IqappHvWf768HSH79x/znaial
-VEV+6K0gP+voJHfnA10laWMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAPD+Fn1qj
-HN62GD3eIgx6wJxYuemhdbgmEwrZZf4V70lS6e9Iloif0nBiISDxJUpXVWNRCN3Z
-3QVC++F7deDmWL/3dSpXRvWsapzbCUhVQ2iBcnZ7QCOdvAqYR1ecZx70zvXCwBcd
-6XKmRtdeNV6B211KRFmTYtVyPq4rcWrkTPGwPBncJI1eQQmyFv2T9SwVVp96Nbrq
-sf7zrJGmuVCdXGPRi/ALVHtJCz6oPoft3I707eMe+ijnFqwGbmMD4fMD6Ync/hEz
-PyR5FMZkXSXHS0gkA5pfwW7wJ2WSWDhI6JMS1gbatY7QzgHbKoQpxBPUXlnzzj2h
-7O9cgFTh/XOZXQ==
------END CERTIFICATE-----
diff --git a/vendor/github.com/siddontang/go-mysql/docker/resources/client-key.pem b/vendor/github.com/siddontang/go-mysql/docker/resources/client-key.pem
deleted file mode 100644
index 996a97b..0000000
--- a/vendor/github.com/siddontang/go-mysql/docker/resources/client-key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAyaK83KTdFzVm8s+FvdY82R/1/YbiD2cxzT54ytHUgonVTUD3
-yiN4gFP6l+TVtKo6XRQObrpV3dhD7genJgdphXqbChMS6Uv1C6TxEtrtNHBC7c8u
-LejnrtGFUivHxne6a5Zv1RTMl9IUfThTL34HLgIkKohBdTgGhwsgoeEiQ2OYBD19
-Y7WFnzOY8dIR++5apr8Ngoa8XQwmvgueSb+wpR3473in2y6/Azo4YgOJsAKIpEXY
-f4luBAtsDnzOQu8Me1c6qwIvsfGI41fxUOtCM3/ro2H2iaT8HHYRzwipqmke9Z/v
-rwdIfv3H/OdqJqVURX7orSA/6+gkd+cDXSVpYwIDAQABAoIBAAGLY5L1GFRzLkSx
-3j5kA7dODV5RyC2CBtmhnt8+2DffwmiDFOLRfrzM5+B9+j0WCLhpzOqANuQqIesS
-1+7so5xIIiPjnYN393qNWuNgFe0O5xRXP+1OGWg3ZqQIfdFBXYYxcs3ZCPAoxctn
-wQteFcP+dDR3MrkpIrOqHCfhR5foieOMP+9k5kCjk+aZqhEmFyko+X+xVO/32xs+
-+3qXhUrHt3Op5on30QMOFguniQlYwLJkd9qVjGuGMIrVPxoUz0rya4SKrGKgkAr8
-mvQe2+sZo7cc6zC2ceaGMJU7z1RalTrCObbg5mynlu+Vf0E/YiES0abkQhSbcSB9
-mAkJC7ECgYEA/H1NDEiO164yYK9ji4HM/8CmHegWS4qsgrzAs8lU0yAcgdg9e19A
-mNi8yssfIBCw62RRE4UGWS5F82myhmvq/mXbf8eCJ2CMgdCHQh1rT7WFD/Uc5Pe/
-8Lv2jNMQ61POguPyq6D0qcf8iigKIMHa1MIgAOmrgWrxulfbSUhm370CgYEAzHBu
-J9p4dAqW32+Hrtv2XE0KUjH72TXr13WErosgeGTfsIW2exXByvLasxOJSY4Wb8oS
-OLZ7bgp/EBchAc7my+nF8n5uOJxipWQUB5BoeB9aUJZ9AnWF4RDl94Jlm5PYBG/J
-lRXrMtSTTIgmSw3Ft2A1vRMOQaHX89lNwOZL758CgYAXOT84/gOFexRPKFKzpkDA
-1WtyHMLQN/UeIVZoMwCGWtHEb6tYCa7bYDQdQwmd3Wsoe5WpgfbPhR4SAYrWKl72
-/09tNWCXVp4V4qRORH52Wm/ew+Dgfpk8/0zyLwfDXXYFPAo6Fxfp9ecYng4wbSQ/
-pYtkChooUTniteoJl4s+0QKBgHbFEpoAqF3yEPi52L/TdmrlLwvVkhT86IkB8xVc
-Kn8HS5VH+V3EpBN9x2SmAupCq/JCGRftnAOwAWWdqkVcqGTq6V8Z6HrnD8A6RhCm
-6qpuvI94/iNBl4fLw25pyRH7cFITh68fTsb3DKQ3rNeJpsYEFPRFb9Ddb5JxOmTI
-5nDNAoGBAM+SyOhUGU+0Uw2WJaGWzmEutjeMRr5Z+cZ8keC/ZJNdji/faaQoeOQR
-OXI8O6RBTBwVNQMyDyttT8J8BkISwfAhSdPkjgPw9GZ1pGREl53uCFDIlX2nvtQM
-ioNzG5WHB7Gd7eUUTA91kRF9MZJTHPqNiNGR0Udj/trGyGqJebni
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/siddontang/go-mysql/docker/resources/server-cert.pem b/vendor/github.com/siddontang/go-mysql/docker/resources/server-cert.pem
deleted file mode 100644
index 3cb3b9c..0000000
--- a/vendor/github.com/siddontang/go-mysql/docker/resources/server-cert.pem
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDBjCCAe4CCQDg06wCf7hcuDANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE4MDgxOTA4NDUyNVoXDTI4MDgxNjA4NDUyNVowRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-ALK2gqK4uvTlxJANO2JKdibvmh899z6oCo9Km0mz5unj4dpnq9hljsQuKtcHUcM4
-HXcE06knaJ4TOF7lcsjaqoDO7r/SaFgjjXCqNvHD0Su4B+7qe52BZZTRV1AANP10
-PvebarXSEzaZUCyHHhSF8+Qb4vX04XKX/TOqinTVGtlnduKzP5+qsaFBtpLAw1V0
-At9EQB5BgnTYtdIsmvD4/2WhBvOjVxab75yx0R4oof4F3u528tbEegcWhBtmy2Xd
-HI3S+TLljj3kOOdB+pgrVUl+KaDavWK3T+F1vTNDe56HEVNKeWlLy1scul61E0j9
-IkZAu6aRDxtKdl7bKu0BkzMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAma3yFqR7
-xkeaZBg4/1I3jSlaNe5+2JB4iybAkMOu77fG5zytLomTbzdhewsuBwpTVMJdga8T
-IdPeIFCin1U+5SkbjSMlpKf+krE+5CyrNJ5jAgO9ATIqx66oCTYXfGlNapGRLfSE
-sa0iMqCe/dr4GPU+flW2DZFWiyJVDSF1JjReQnfrWY+SD2SpP/lmlgltnY8MJngd
-xBLG5nsZCpUXGB713Q8ZyIm2ThVAMiskcxBleIZDDghLuhGvY/9eFJhZpvOkjWa6
-XGEi4E1G/SA+zVKFl41nHKCdqXdmIOnpcLlFBUVloQok5a95Kqc1TYw3f+WbdFff
-99dAgk3gWwWZQA==
------END CERTIFICATE-----
\ No newline at end of file
diff --git a/vendor/github.com/siddontang/go-mysql/docker/resources/server-key.pem b/vendor/github.com/siddontang/go-mysql/docker/resources/server-key.pem
deleted file mode 100644
index babaaae..0000000
--- a/vendor/github.com/siddontang/go-mysql/docker/resources/server-key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAsraCori69OXEkA07Ykp2Ju+aHz33PqgKj0qbSbPm6ePh2mer
-2GWOxC4q1wdRwzgddwTTqSdonhM4XuVyyNqqgM7uv9JoWCONcKo28cPRK7gH7up7
-nYFllNFXUAA0/XQ+95tqtdITNplQLIceFIXz5Bvi9fThcpf9M6qKdNUa2Wd24rM/
-n6qxoUG2ksDDVXQC30RAHkGCdNi10iya8Pj/ZaEG86NXFpvvnLHRHiih/gXe7nby
-1sR6BxaEG2bLZd0cjdL5MuWOPeQ450H6mCtVSX4poNq9YrdP4XW9M0N7nocRU0p5
-aUvLWxy6XrUTSP0iRkC7ppEPG0p2Xtsq7QGTMwIDAQABAoIBAGh1m8hHWCg7gXh9
-838RbRx3IswuKS27hWiaQEiFWmzOIb7KqDy1qAxtu+ayRY1paHegH6QY/+Kd824s
-ibpzbgQacJ04/HrAVTVMmQ8Z2VLHoAN7lcPL1bd14aZGaLLZVtDeTDJ413grhxxv
-4ho27gcgcbo4Z+rWgk7H2WRPCAGYqWYAycm3yF5vy9QaO6edU+T588YsEQOos5iy
-5pVFSGDGZkcUp1ukL3BJYR+jvygn6WPCobQ/LScUdi+ucitaI9i+UdlLokZARVRG
-M/msqcTM73thR8yVRcexU6NUDxRBfZ/f7moSAEbBmGDXuxDcIyH9KGMQ2rMtN1X3
-lK8UNwkCgYEA2STJq/IUQHjdqd3Dqh/Q7Zm8/pMWFqLJSkqpnFtFsXPyUOx9zDOy
-KqkIfGeyKwvsj9X9BcZ0FUKj9zoct1/WpPY+h7i7+z0MIujBh4AMjAcDrt4o76yK
-UHuVmG2xKTdJoAbqOdToQeX6E82Ioal5pbB2W7AbCQScNBPZ52jxgtcCgYEA0rE7
-2dFiRm0YmuszFYxft2+GP6NgP3R2TQNEooi1uCXG2xgwObie1YCHzpZ5CfSqJIxP
-XB7DXpIWi7PxJoeai2F83LnmdFz6F1BPRobwDoSFNdaSKLg4Yf856zpgYNKhL1fE
-OoOXj4VBWBZh1XDfZV44fgwlMIf7edOF1XOagwUCgYAw953O+7FbdKYwF0V3iOM5
-oZDAK/UwN5eC/GFRVDfcM5RycVJRCVtlSWcTfuLr2C2Jpiz/72fgH34QU3eEVsV1
-v94MBznFB1hESw7ReqvZq/9FoO3EVrl+OtBaZmosLD6bKtQJJJ0Xtz/01UW5hxla
-pveZ55XBK9v51nwuNjk4UwKBgHD8fJUllSchUCWb5cwzeAz98Kdl7LJ6uQo5q2/i
-EllLYOWThiEeIYdrIuklholRPIDXAaPsF2c6vn5yo+q+o6EFSZlw0+YpCjDAb5Lp
-wAh5BprFk6HkkM/0t9Guf4rMyYWC8odSlE9x7YXYkuSMYDCTI4Zs6vCoq7I8PbQn
-B4AlAoGAZ6Ee5m/ph5UVp/3+cR6jCY7aHBUU/M3pbJSkVjBW+ymEBVJ6sUdz8k3P
-x8BiPEQggNN7faWBqRWP7KXPnDYHh6shYUgPJwI5HX6NE/ZDnnXjeysHRyf0oCo5
-S6tHXwHNKB5HS1c/KDyyNGjP2oi/MF4o/MGWNWEcK6TJA3RGOYM=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/siddontang/go-mysql/driver/dirver_test.go b/vendor/github.com/siddontang/go-mysql/driver/dirver_test.go
deleted file mode 100644
index d43580f..0000000
--- a/vendor/github.com/siddontang/go-mysql/driver/dirver_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package driver
-
-import (
- "flag"
- "fmt"
- "testing"
-
- "github.com/jmoiron/sqlx"
- . "github.com/pingcap/check"
-)
-
-// Use docker mysql to test, mysql is 3306
-var testHost = flag.String("host", "127.0.0.1", "MySQL master host")
-// possible choices for different MySQL versions are: 5561,5641,3306,5722,8003,8012
-var testPort = flag.Int("port", 3306, "MySQL server port")
-var testUser = flag.String("user", "root", "MySQL user")
-var testPassword = flag.String("pass", "", "MySQL password")
-var testDB = flag.String("db", "test", "MySQL test database")
-
-func TestDriver(t *testing.T) {
- TestingT(t)
-}
-
-type testDriverSuite struct {
- db *sqlx.DB
-}
-
-var _ = Suite(&testDriverSuite{})
-
-func (s *testDriverSuite) SetUpSuite(c *C) {
- addr := fmt.Sprintf("%s:%d", *testHost, *testPort)
- dsn := fmt.Sprintf("%s:%s@%s?%s", *testUser, *testPassword, addr, *testDB)
-
- var err error
- s.db, err = sqlx.Open("mysql", dsn)
- c.Assert(err, IsNil)
-}
-
-func (s *testDriverSuite) TearDownSuite(c *C) {
- if s.db != nil {
- s.db.Close()
- }
-}
-
-func (s *testDriverSuite) TestConn(c *C) {
- var n int
- err := s.db.Get(&n, "SELECT 1")
- c.Assert(err, IsNil)
- c.Assert(n, Equals, 1)
-
- _, err = s.db.Exec("USE test")
- c.Assert(err, IsNil)
-}
-
-func (s *testDriverSuite) TestStmt(c *C) {
- stmt, err := s.db.Preparex("SELECT ? + ?")
- c.Assert(err, IsNil)
-
- var n int
- err = stmt.Get(&n, 1, 1)
- c.Assert(err, IsNil)
- c.Assert(n, Equals, 2)
-
- err = stmt.Close()
- c.Assert(err, IsNil)
-}
-
-func (s *testDriverSuite) TestTransaction(c *C) {
- tx, err := s.db.Beginx()
- c.Assert(err, IsNil)
-
- var n int
- err = tx.Get(&n, "SELECT 1")
- c.Assert(err, IsNil)
- c.Assert(n, Equals, 1)
-
- err = tx.Commit()
- c.Assert(err, IsNil)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/driver/driver.go b/vendor/github.com/siddontang/go-mysql/driver/driver.go
deleted file mode 100644
index e131548..0000000
--- a/vendor/github.com/siddontang/go-mysql/driver/driver.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// This package implements database/sql/driver interface,
-// so we can use go-mysql with database/sql
-package driver
-
-import (
- "database/sql"
- sqldriver "database/sql/driver"
- "fmt"
- "io"
- "strings"
-
- "github.com/juju/errors"
- "github.com/siddontang/go-mysql/client"
- "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go/hack"
-)
-
-type driver struct {
-}
-
-// DSN user:password@addr[?db]
-func (d driver) Open(dsn string) (sqldriver.Conn, error) {
- lastIndex := strings.LastIndex(dsn, "@")
- seps := []string{dsn[:lastIndex], dsn[lastIndex+1:]}
- if len(seps) != 2 {
- return nil, errors.Errorf("invalid dsn, must user:password@addr[?db]")
- }
-
- var user string
- var password string
- var addr string
- var db string
-
- if ss := strings.Split(seps[0], ":"); len(ss) == 2 {
- user, password = ss[0], ss[1]
- } else if len(ss) == 1 {
- user = ss[0]
- } else {
- return nil, errors.Errorf("invalid dsn, must user:password@addr[?db]")
- }
-
- if ss := strings.Split(seps[1], "?"); len(ss) == 2 {
- addr, db = ss[0], ss[1]
- } else if len(ss) == 1 {
- addr = ss[0]
- } else {
- return nil, errors.Errorf("invalid dsn, must user:password@addr[?db]")
- }
-
- c, err := client.Connect(addr, user, password, db)
- if err != nil {
- return nil, err
- }
-
- return &conn{c}, nil
-}
-
-type conn struct {
- *client.Conn
-}
-
-func (c *conn) Prepare(query string) (sqldriver.Stmt, error) {
- st, err := c.Conn.Prepare(query)
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- return &stmt{st}, nil
-}
-
-func (c *conn) Close() error {
- return c.Conn.Close()
-}
-
-func (c *conn) Begin() (sqldriver.Tx, error) {
- err := c.Conn.Begin()
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- return &tx{c.Conn}, nil
-}
-
-func buildArgs(args []sqldriver.Value) []interface{} {
- a := make([]interface{}, len(args))
-
- for i, arg := range args {
- a[i] = arg
- }
-
- return a
-}
-
-func replyError(err error) error {
- if mysql.ErrorEqual(err, mysql.ErrBadConn) {
- return sqldriver.ErrBadConn
- } else {
- return errors.Trace(err)
- }
-}
-
-func (c *conn) Exec(query string, args []sqldriver.Value) (sqldriver.Result, error) {
- a := buildArgs(args)
- r, err := c.Conn.Execute(query, a...)
- if err != nil {
- return nil, replyError(err)
- }
- return &result{r}, nil
-}
-
-func (c *conn) Query(query string, args []sqldriver.Value) (sqldriver.Rows, error) {
- a := buildArgs(args)
- r, err := c.Conn.Execute(query, a...)
- if err != nil {
- return nil, replyError(err)
- }
- return newRows(r.Resultset)
-}
-
-type stmt struct {
- *client.Stmt
-}
-
-func (s *stmt) Close() error {
- return s.Stmt.Close()
-}
-
-func (s *stmt) NumInput() int {
- return s.Stmt.ParamNum()
-}
-
-func (s *stmt) Exec(args []sqldriver.Value) (sqldriver.Result, error) {
- a := buildArgs(args)
- r, err := s.Stmt.Execute(a...)
- if err != nil {
- return nil, replyError(err)
- }
- return &result{r}, nil
-}
-
-func (s *stmt) Query(args []sqldriver.Value) (sqldriver.Rows, error) {
- a := buildArgs(args)
- r, err := s.Stmt.Execute(a...)
- if err != nil {
- return nil, replyError(err)
- }
- return newRows(r.Resultset)
-}
-
-type tx struct {
- *client.Conn
-}
-
-func (t *tx) Commit() error {
- return t.Conn.Commit()
-}
-
-func (t *tx) Rollback() error {
- return t.Conn.Rollback()
-}
-
-type result struct {
- *mysql.Result
-}
-
-func (r *result) LastInsertId() (int64, error) {
- return int64(r.Result.InsertId), nil
-}
-
-func (r *result) RowsAffected() (int64, error) {
- return int64(r.Result.AffectedRows), nil
-}
-
-type rows struct {
- *mysql.Resultset
-
- columns []string
- step int
-}
-
-func newRows(r *mysql.Resultset) (*rows, error) {
- if r == nil {
- return nil, fmt.Errorf("invalid mysql query, no correct result")
- }
-
- rs := new(rows)
- rs.Resultset = r
-
- rs.columns = make([]string, len(r.Fields))
-
- for i, f := range r.Fields {
- rs.columns[i] = hack.String(f.Name)
- }
- rs.step = 0
-
- return rs, nil
-}
-
-func (r *rows) Columns() []string {
- return r.columns
-}
-
-func (r *rows) Close() error {
- r.step = -1
- return nil
-}
-
-func (r *rows) Next(dest []sqldriver.Value) error {
- if r.step >= r.Resultset.RowNumber() {
- return io.EOF
- } else if r.step == -1 {
- return io.ErrUnexpectedEOF
- }
-
- for i := 0; i < r.Resultset.ColumnNumber(); i++ {
- value, err := r.Resultset.GetValue(r.step, i)
- if err != nil {
- return err
- }
-
- dest[i] = sqldriver.Value(value)
- }
-
- r.step++
-
- return nil
-}
-
-func init() {
- sql.Register("mysql", driver{})
-}
diff --git a/vendor/github.com/siddontang/go-mysql/dump/dump.go b/vendor/github.com/siddontang/go-mysql/dump/dump.go
deleted file mode 100644
index 1f8384d..0000000
--- a/vendor/github.com/siddontang/go-mysql/dump/dump.go
+++ /dev/null
@@ -1,220 +0,0 @@
-package dump
-
-import (
- "fmt"
- "io"
- "os"
- "os/exec"
- "strings"
-
- "github.com/juju/errors"
- "github.com/siddontang/go-log/log"
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-// Unlick mysqldump, Dumper is designed for parsing and syning data easily.
-type Dumper struct {
- // mysqldump execution path, like mysqldump or /usr/bin/mysqldump, etc...
- ExecutionPath string
-
- Addr string
- User string
- Password string
-
- // Will override Databases
- Tables []string
- TableDB string
-
- Databases []string
-
- Where string
- Charset string
-
- IgnoreTables map[string][]string
-
- ErrOut io.Writer
-
- masterDataSkipped bool
- maxAllowedPacket int
- hexBlob bool
-}
-
-func NewDumper(executionPath string, addr string, user string, password string) (*Dumper, error) {
- if len(executionPath) == 0 {
- return nil, nil
- }
-
- path, err := exec.LookPath(executionPath)
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- d := new(Dumper)
- d.ExecutionPath = path
- d.Addr = addr
- d.User = user
- d.Password = password
- d.Tables = make([]string, 0, 16)
- d.Databases = make([]string, 0, 16)
- d.Charset = DEFAULT_CHARSET
- d.IgnoreTables = make(map[string][]string)
- d.masterDataSkipped = false
-
- d.ErrOut = os.Stderr
-
- return d, nil
-}
-
-func (d *Dumper) SetCharset(charset string) {
- d.Charset = charset
-}
-
-func (d *Dumper) SetWhere(where string) {
- d.Where = where
-}
-
-func (d *Dumper) SetErrOut(o io.Writer) {
- d.ErrOut = o
-}
-
-// In some cloud MySQL, we have no privilege to use `--master-data`.
-func (d *Dumper) SkipMasterData(v bool) {
- d.masterDataSkipped = v
-}
-
-func (d *Dumper) SetMaxAllowedPacket(i int) {
- d.maxAllowedPacket = i
-}
-
-func (d *Dumper) SetHexBlob(v bool) {
- d.hexBlob = v
-}
-
-func (d *Dumper) AddDatabases(dbs ...string) {
- d.Databases = append(d.Databases, dbs...)
-}
-
-func (d *Dumper) AddTables(db string, tables ...string) {
- if d.TableDB != db {
- d.TableDB = db
- d.Tables = d.Tables[0:0]
- }
-
- d.Tables = append(d.Tables, tables...)
-}
-
-func (d *Dumper) AddIgnoreTables(db string, tables ...string) {
- t, _ := d.IgnoreTables[db]
- t = append(t, tables...)
- d.IgnoreTables[db] = t
-}
-
-func (d *Dumper) Reset() {
- d.Tables = d.Tables[0:0]
- d.TableDB = ""
- d.IgnoreTables = make(map[string][]string)
- d.Databases = d.Databases[0:0]
- d.Where = ""
-}
-
-func (d *Dumper) Dump(w io.Writer) error {
- args := make([]string, 0, 16)
-
- // Common args
- if strings.Contains(d.Addr, "/") {
- args = append(args, fmt.Sprintf("--socket=%s", d.Addr))
- } else {
- seps := strings.SplitN(d.Addr, ":", 2)
- args = append(args, fmt.Sprintf("--host=%s", seps[0]))
- if len(seps) > 1 {
- args = append(args, fmt.Sprintf("--port=%s", seps[1]))
- }
- }
-
- args = append(args, fmt.Sprintf("--user=%s", d.User))
- args = append(args, fmt.Sprintf("--password=%s", d.Password))
-
- if !d.masterDataSkipped {
- args = append(args, "--master-data")
- }
-
- if d.maxAllowedPacket > 0 {
- // mysqldump param should be --max-allowed-packet=%dM not be --max_allowed_packet=%dM
- args = append(args, fmt.Sprintf("--max-allowed-packet=%dM", d.maxAllowedPacket))
- }
-
- args = append(args, "--single-transaction")
- args = append(args, "--skip-lock-tables")
-
- // Disable uncessary data
- args = append(args, "--compact")
- args = append(args, "--skip-opt")
- args = append(args, "--quick")
-
- // We only care about data
- args = append(args, "--no-create-info")
-
- // Multi row is easy for us to parse the data
- args = append(args, "--skip-extended-insert")
-
- if d.hexBlob {
- // Use hex for the binary type
- args = append(args, "--hex-blob")
- }
-
- for db, tables := range d.IgnoreTables {
- for _, table := range tables {
- args = append(args, fmt.Sprintf("--ignore-table=%s.%s", db, table))
- }
- }
-
- if len(d.Charset) != 0 {
- args = append(args, fmt.Sprintf("--default-character-set=%s", d.Charset))
- }
-
- if len(d.Where) != 0 {
- args = append(args, fmt.Sprintf("--where=%s", d.Where))
- }
-
- if len(d.Tables) == 0 && len(d.Databases) == 0 {
- args = append(args, "--all-databases")
- } else if len(d.Tables) == 0 {
- args = append(args, "--databases")
- args = append(args, d.Databases...)
- } else {
- args = append(args, d.TableDB)
- args = append(args, d.Tables...)
-
- // If we only dump some tables, the dump data will not have database name
- // which makes us hard to parse, so here we add it manually.
-
- w.Write([]byte(fmt.Sprintf("USE `%s`;\n", d.TableDB)))
- }
-
- log.Infof("exec mysqldump with %v", args)
- cmd := exec.Command(d.ExecutionPath, args...)
-
- cmd.Stderr = d.ErrOut
- cmd.Stdout = w
-
- return cmd.Run()
-}
-
-// Dump MySQL and parse immediately
-func (d *Dumper) DumpAndParse(h ParseHandler) error {
- r, w := io.Pipe()
-
- done := make(chan error, 1)
- go func() {
- err := Parse(r, h, !d.masterDataSkipped)
- r.CloseWithError(err)
- done <- err
- }()
-
- err := d.Dump(w)
- w.CloseWithError(err)
-
- err = <-done
-
- return errors.Trace(err)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/dump/dump_test.go b/vendor/github.com/siddontang/go-mysql/dump/dump_test.go
deleted file mode 100644
index eed4c75..0000000
--- a/vendor/github.com/siddontang/go-mysql/dump/dump_test.go
+++ /dev/null
@@ -1,226 +0,0 @@
-package dump
-
-import (
- "bytes"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "strings"
- "testing"
-
- . "github.com/pingcap/check"
- "github.com/siddontang/go-mysql/client"
-)
-
-// use docker mysql for test
-var host = flag.String("host", "127.0.0.1", "MySQL host")
-var port = flag.Int("port", 3306, "MySQL host")
-
-var execution = flag.String("exec", "mysqldump", "mysqldump execution path")
-
-func Test(t *testing.T) {
- TestingT(t)
-}
-
-type schemaTestSuite struct {
- conn *client.Conn
- d *Dumper
-}
-
-var _ = Suite(&schemaTestSuite{})
-
-func (s *schemaTestSuite) SetUpSuite(c *C) {
- var err error
- s.conn, err = client.Connect(fmt.Sprintf("%s:%d", *host, *port), "root", "", "")
- c.Assert(err, IsNil)
-
- s.d, err = NewDumper(*execution, fmt.Sprintf("%s:%d", *host, *port), "root", "")
- c.Assert(err, IsNil)
- c.Assert(s.d, NotNil)
-
- s.d.SetCharset("utf8")
- s.d.SetErrOut(os.Stderr)
-
- _, err = s.conn.Execute("CREATE DATABASE IF NOT EXISTS test1")
- c.Assert(err, IsNil)
-
- _, err = s.conn.Execute("CREATE DATABASE IF NOT EXISTS test2")
- c.Assert(err, IsNil)
-
- str := `CREATE TABLE IF NOT EXISTS test%d.t%d (
- id int AUTO_INCREMENT,
- name varchar(256),
- PRIMARY KEY(id)
- ) ENGINE=INNODB`
- _, err = s.conn.Execute(fmt.Sprintf(str, 1, 1))
- c.Assert(err, IsNil)
-
- _, err = s.conn.Execute(fmt.Sprintf(str, 2, 1))
- c.Assert(err, IsNil)
-
- _, err = s.conn.Execute(fmt.Sprintf(str, 1, 2))
- c.Assert(err, IsNil)
-
- _, err = s.conn.Execute(fmt.Sprintf(str, 2, 2))
- c.Assert(err, IsNil)
-
- str = `INSERT INTO test%d.t%d (name) VALUES ("a"), ("b"), ("\\"), ("''")`
-
- _, err = s.conn.Execute(fmt.Sprintf(str, 1, 1))
- c.Assert(err, IsNil)
-
- _, err = s.conn.Execute(fmt.Sprintf(str, 2, 1))
- c.Assert(err, IsNil)
-
- _, err = s.conn.Execute(fmt.Sprintf(str, 1, 2))
- c.Assert(err, IsNil)
-
- _, err = s.conn.Execute(fmt.Sprintf(str, 2, 2))
- c.Assert(err, IsNil)
-}
-
-func (s *schemaTestSuite) TearDownSuite(c *C) {
- if s.conn != nil {
- _, err := s.conn.Execute("DROP DATABASE IF EXISTS test1")
- c.Assert(err, IsNil)
-
- _, err = s.conn.Execute("DROP DATABASE IF EXISTS test2")
- c.Assert(err, IsNil)
-
- s.conn.Close()
- }
-}
-
-func (s *schemaTestSuite) TestDump(c *C) {
- // Using mysql 5.7 can't work, error:
- // mysqldump: Error 1412: Table definition has changed,
- // please retry transaction when dumping table `test_replication` at row: 0
- // err := s.d.Dump(ioutil.Discard)
- // c.Assert(err, IsNil)
-
- s.d.AddDatabases("test1", "test2")
-
- s.d.AddIgnoreTables("test1", "t2")
-
- err := s.d.Dump(ioutil.Discard)
- c.Assert(err, IsNil)
-
- s.d.AddTables("test1", "t1")
-
- err = s.d.Dump(ioutil.Discard)
- c.Assert(err, IsNil)
-}
-
-type testParseHandler struct {
-}
-
-func (h *testParseHandler) BinLog(name string, pos uint64) error {
- return nil
-}
-
-func (h *testParseHandler) Data(schema string, table string, values []string) error {
- return nil
-}
-
-func (s *parserTestSuite) TestParseFindTable(c *C) {
- tbl := []struct {
- sql string
- table string
- }{
- {"INSERT INTO `note` VALUES ('title', 'here is sql: INSERT INTO `table` VALUES (\\'some value\\')');", "note"},
- {"INSERT INTO `note` VALUES ('1', '2', '3');", "note"},
- {"INSERT INTO `a.b` VALUES ('1');", "a.b"},
- }
-
- for _, t := range tbl {
- res := valuesExp.FindAllStringSubmatch(t.sql, -1)[0][1]
- c.Assert(res, Equals, t.table)
- }
-}
-
-type parserTestSuite struct {
-}
-
-var _ = Suite(&parserTestSuite{})
-
-func (s *parserTestSuite) TestUnescape(c *C) {
- tbl := []struct {
- escaped string
- expected string
- }{
- {`\\n`, `\n`},
- {`\\t`, `\t`},
- {`\\"`, `\"`},
- {`\\'`, `\'`},
- {`\\0`, `\0`},
- {`\\b`, `\b`},
- {`\\Z`, `\Z`},
- {`\\r`, `\r`},
- {`abc`, `abc`},
- {`abc\`, `abc`},
- {`ab\c`, `abc`},
- {`\abc`, `abc`},
- }
-
- for _, t := range tbl {
- unesacped := unescapeString(t.escaped)
- c.Assert(unesacped, Equals, t.expected)
- }
-}
-
-func (s *schemaTestSuite) TestParse(c *C) {
- var buf bytes.Buffer
-
- s.d.Reset()
-
- s.d.AddDatabases("test1", "test2")
-
- err := s.d.Dump(&buf)
- c.Assert(err, IsNil)
-
- err = Parse(&buf, new(testParseHandler), true)
- c.Assert(err, IsNil)
-}
-
-func (s *parserTestSuite) TestParseValue(c *C) {
- str := `'abc\\',''`
- values, err := parseValues(str)
- c.Assert(err, IsNil)
- c.Assert(values, DeepEquals, []string{`'abc\'`, `''`})
-
- str = `123,'\Z#÷QÎx£. Æ‘ÇoPâÅ_\r—\\','','qn'`
- values, err = parseValues(str)
- c.Assert(err, IsNil)
- c.Assert(values, HasLen, 4)
-
- str = `123,'\Z#÷QÎx£. Æ‘ÇoPâÅ_\r—\\','','qn\'`
- values, err = parseValues(str)
- c.Assert(err, NotNil)
-}
-
-func (s *parserTestSuite) TestParseLine(c *C) {
- lines := []struct {
- line string
- expected string
- }{
- {line: "INSERT INTO `test` VALUES (1, 'first', 'hello mysql; 2', 'e1', 'a,b');",
- expected: "1, 'first', 'hello mysql; 2', 'e1', 'a,b'"},
- {line: "INSERT INTO `test` VALUES (0x22270073646661736661736466, 'first', 'hello mysql; 2', 'e1', 'a,b');",
- expected: "0x22270073646661736661736466, 'first', 'hello mysql; 2', 'e1', 'a,b'"},
- }
-
- f := func(c rune) bool {
- return c == '\r' || c == '\n'
- }
-
- for _, t := range lines {
- l := strings.TrimRightFunc(t.line, f)
-
- m := valuesExp.FindAllStringSubmatch(l, -1)
-
- c.Assert(m, HasLen, 1)
- c.Assert(m[0][1], Matches, "test")
- c.Assert(m[0][2], Matches, t.expected)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/dump/parser.go b/vendor/github.com/siddontang/go-mysql/dump/parser.go
deleted file mode 100644
index ad40925..0000000
--- a/vendor/github.com/siddontang/go-mysql/dump/parser.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package dump
-
-import (
- "bufio"
- "fmt"
- "io"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/juju/errors"
- "github.com/siddontang/go-mysql/mysql"
-)
-
-var (
- ErrSkip = errors.New("Handler error, but skipped")
-)
-
-type ParseHandler interface {
- // Parse CHANGE MASTER TO MASTER_LOG_FILE=name, MASTER_LOG_POS=pos;
- BinLog(name string, pos uint64) error
-
- Data(schema string, table string, values []string) error
-}
-
-var binlogExp *regexp.Regexp
-var useExp *regexp.Regexp
-var valuesExp *regexp.Regexp
-
-func init() {
- binlogExp = regexp.MustCompile("^CHANGE MASTER TO MASTER_LOG_FILE='(.+)', MASTER_LOG_POS=(\\d+);")
- useExp = regexp.MustCompile("^USE `(.+)`;")
- valuesExp = regexp.MustCompile("^INSERT INTO `(.+?)` VALUES \\((.+)\\);$")
-}
-
-// Parse the dump data with Dumper generate.
-// It can not parse all the data formats with mysqldump outputs
-func Parse(r io.Reader, h ParseHandler, parseBinlogPos bool) error {
- rb := bufio.NewReaderSize(r, 1024*16)
-
- var db string
- var binlogParsed bool
-
- for {
- line, err := rb.ReadString('\n')
- if err != nil && err != io.EOF {
- return errors.Trace(err)
- } else if mysql.ErrorEqual(err, io.EOF) {
- break
- }
-
- // Ignore '\n' on Linux or '\r\n' on Windows
- line = strings.TrimRightFunc(line, func(c rune) bool {
- return c == '\r' || c == '\n'
- })
-
- if parseBinlogPos && !binlogParsed {
- if m := binlogExp.FindAllStringSubmatch(line, -1); len(m) == 1 {
- name := m[0][1]
- pos, err := strconv.ParseUint(m[0][2], 10, 64)
- if err != nil {
- return errors.Errorf("parse binlog %v err, invalid number", line)
- }
-
- if err = h.BinLog(name, pos); err != nil && err != ErrSkip {
- return errors.Trace(err)
- }
-
- binlogParsed = true
- }
- }
-
- if m := useExp.FindAllStringSubmatch(line, -1); len(m) == 1 {
- db = m[0][1]
- }
-
- if m := valuesExp.FindAllStringSubmatch(line, -1); len(m) == 1 {
- table := m[0][1]
-
- values, err := parseValues(m[0][2])
- if err != nil {
- return errors.Errorf("parse values %v err", line)
- }
-
- if err = h.Data(db, table, values); err != nil && err != ErrSkip {
- return errors.Trace(err)
- }
- }
- }
-
- return nil
-}
-
-func parseValues(str string) ([]string, error) {
- // values are seperated by comma, but we can not split using comma directly
- // string is enclosed by single quote
-
- // a simple implementation, may be more robust later.
-
- values := make([]string, 0, 8)
-
- i := 0
- for i < len(str) {
- if str[i] != '\'' {
- // no string, read until comma
- j := i + 1
- for ; j < len(str) && str[j] != ','; j++ {
- }
- values = append(values, str[i:j])
- // skip ,
- i = j + 1
- } else {
- // read string until another single quote
- j := i + 1
-
- escaped := false
- for j < len(str) {
- if str[j] == '\\' {
- // skip escaped character
- j += 2
- escaped = true
- continue
- } else if str[j] == '\'' {
- break
- } else {
- j++
- }
- }
-
- if j >= len(str) {
- return nil, fmt.Errorf("parse quote values error")
- }
-
- value := str[i : j+1]
- if escaped {
- value = unescapeString(value)
- }
- values = append(values, value)
- // skip ' and ,
- i = j + 2
- }
-
- // need skip blank???
- }
-
- return values, nil
-}
-
-// unescapeString un-escapes the string.
-// mysqldump will escape the string when dumps,
-// Refer http://dev.mysql.com/doc/refman/5.7/en/string-literals.html
-func unescapeString(s string) string {
- i := 0
-
- value := make([]byte, 0, len(s))
- for i < len(s) {
- if s[i] == '\\' {
- j := i + 1
- if j == len(s) {
- // The last char is \, remove
- break
- }
-
- value = append(value, unescapeChar(s[j]))
- i += 2
- } else {
- value = append(value, s[i])
- i++
- }
- }
-
- return string(value)
-}
-
-func unescapeChar(ch byte) byte {
- // \" \' \\ \n \0 \b \Z \r \t ==> escape to one char
- switch ch {
- case 'n':
- ch = '\n'
- case '0':
- ch = 0
- case 'b':
- ch = 8
- case 'Z':
- ch = 26
- case 'r':
- ch = '\r'
- case 't':
- ch = '\t'
- }
- return ch
-}
diff --git a/vendor/github.com/siddontang/go-mysql/failover/const.go b/vendor/github.com/siddontang/go-mysql/failover/const.go
deleted file mode 100644
index 15e27d2..0000000
--- a/vendor/github.com/siddontang/go-mysql/failover/const.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package failover
-
-const (
- IOThreadType = "IO_THREAD"
- SQLThreadType = "SQL_THREAD"
-)
-
-const (
- GTIDModeOn = "ON"
- GTIDModeOff = "OFF"
-)
diff --git a/vendor/github.com/siddontang/go-mysql/failover/doc.go b/vendor/github.com/siddontang/go-mysql/failover/doc.go
deleted file mode 100644
index feb037d..0000000
--- a/vendor/github.com/siddontang/go-mysql/failover/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Failover supports to promote a new master and let other slaves
-// replicate from it automatically.
-//
-// Failover does not support monitoring whether a master is alive or not,
-// and will think the master is down.
-//
-// This package is still in development and could not be used in production environment.
-package failover
diff --git a/vendor/github.com/siddontang/go-mysql/failover/failover.go b/vendor/github.com/siddontang/go-mysql/failover/failover.go
deleted file mode 100644
index 945c046..0000000
--- a/vendor/github.com/siddontang/go-mysql/failover/failover.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package failover
-
-import (
- "github.com/juju/errors"
- "github.com/siddontang/go-mysql/mysql"
-)
-
-// Failover will do below things after the master down
-// 1. Elect a slave which has the most up-to-date data with old master
-// 2. Promote the slave to new master
-// 3. Change other slaves to the new master
-//
-// Limitation:
-// 1, All slaves must have the same master before, Failover will check using master server id or uuid
-// 2, If the failover error, the whole topology may be wrong, we must handle this error manually
-// 3, Slaves must have same replication mode, all use GTID or not
-//
-func Failover(flavor string, slaves []*Server) ([]*Server, error) {
- var h Handler
- var err error
-
- switch flavor {
- case mysql.MySQLFlavor:
- h = new(MysqlGTIDHandler)
- case mysql.MariaDBFlavor:
- return nil, errors.Errorf("MariaDB failover is not supported now")
- default:
- return nil, errors.Errorf("invalid flavor %s", flavor)
- }
-
- // First check slaves use gtid or not
- if err := h.CheckGTIDMode(slaves); err != nil {
- return nil, errors.Trace(err)
- }
-
- // Stop all slave IO_THREAD and wait the relay log done
- for _, slave := range slaves {
- if err = h.WaitRelayLogDone(slave); err != nil {
- return nil, errors.Trace(err)
- }
- }
-
- var bestSlave *Server
- // Find best slave which has the most up-to-data data
- if bestSlaves, err := h.FindBestSlaves(slaves); err != nil {
- return nil, errors.Trace(err)
- } else {
- bestSlave = bestSlaves[0]
- }
-
- // Promote the best slave to master
- if err = h.Promote(bestSlave); err != nil {
- return nil, errors.Trace(err)
- }
-
- // Change master
- for i := 0; i < len(slaves); i++ {
- if bestSlave == slaves[i] {
- continue
- }
-
- if err = h.ChangeMasterTo(slaves[i], bestSlave); err != nil {
- return nil, errors.Trace(err)
- }
- }
-
- return slaves, nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/failover/failover_test.go b/vendor/github.com/siddontang/go-mysql/failover/failover_test.go
deleted file mode 100644
index f3e4f19..0000000
--- a/vendor/github.com/siddontang/go-mysql/failover/failover_test.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package failover
-
-import (
- "flag"
- "fmt"
- "testing"
-
- . "github.com/pingcap/check"
-)
-
-// We will use go-mysql docker to test
-// go-mysql docker will build mysql 1-3 instances
-var host = flag.String("host", "127.0.0.1", "go-mysql docker container address")
-var enable_failover_test = flag.Bool("test-failover", false, "enable test failover")
-
-func Test(t *testing.T) {
- TestingT(t)
-}
-
-type failoverTestSuite struct {
- s []*Server
-}
-
-var _ = Suite(&failoverTestSuite{})
-
-func (s *failoverTestSuite) SetUpSuite(c *C) {
- if !*enable_failover_test {
- c.Skip("skip test failover")
- }
-
- ports := []int{3306, 3307, 3308, 3316, 3317, 3318}
-
- s.s = make([]*Server, len(ports))
-
- for i := 0; i < len(ports); i++ {
- s.s[i] = NewServer(fmt.Sprintf("%s:%d", *host, ports[i]), User{"root", ""}, User{"root", ""})
- }
-
- var err error
- for i := 0; i < len(ports); i++ {
- err = s.s[i].StopSlave()
- c.Assert(err, IsNil)
-
- err = s.s[i].ResetSlaveALL()
- c.Assert(err, IsNil)
-
- _, err = s.s[i].Execute(`SET GLOBAL BINLOG_FORMAT = "ROW"`)
- c.Assert(err, IsNil)
-
- _, err = s.s[i].Execute("DROP TABLE IF EXISTS test.go_mysql_test")
- c.Assert(err, IsNil)
-
- _, err = s.s[i].Execute("CREATE TABLE IF NOT EXISTS test.go_mysql_test (id INT AUTO_INCREMENT, name VARCHAR(256), PRIMARY KEY(id)) engine=innodb")
- c.Assert(err, IsNil)
-
- err = s.s[i].ResetMaster()
- c.Assert(err, IsNil)
- }
-}
-
-func (s *failoverTestSuite) TearDownSuite(c *C) {
-}
-
-func (s *failoverTestSuite) TestMysqlFailover(c *C) {
- h := new(MysqlGTIDHandler)
-
- m := s.s[0]
- s1 := s.s[1]
- s2 := s.s[2]
-
- s.testFailover(c, h, m, s1, s2)
-}
-
-func (s *failoverTestSuite) TestMariadbFailover(c *C) {
- h := new(MariadbGTIDHandler)
-
- for i := 3; i <= 5; i++ {
- _, err := s.s[i].Execute("SET GLOBAL gtid_slave_pos = ''")
- c.Assert(err, IsNil)
- }
-
- m := s.s[3]
- s1 := s.s[4]
- s2 := s.s[5]
-
- s.testFailover(c, h, m, s1, s2)
-}
-
-func (s *failoverTestSuite) testFailover(c *C, h Handler, m *Server, s1 *Server, s2 *Server) {
- var err error
- err = h.ChangeMasterTo(s1, m)
- c.Assert(err, IsNil)
-
- err = h.ChangeMasterTo(s2, m)
- c.Assert(err, IsNil)
-
- id := s.checkInsert(c, m, "a")
-
- err = h.WaitCatchMaster(s1, m)
- c.Assert(err, IsNil)
-
- err = h.WaitCatchMaster(s2, m)
- c.Assert(err, IsNil)
-
- s.checkSelect(c, s1, id, "a")
- s.checkSelect(c, s2, id, "a")
-
- err = s2.StopSlaveIOThread()
- c.Assert(err, IsNil)
-
- id = s.checkInsert(c, m, "b")
- id = s.checkInsert(c, m, "c")
-
- err = h.WaitCatchMaster(s1, m)
- c.Assert(err, IsNil)
-
- s.checkSelect(c, s1, id, "c")
-
- best, err := h.FindBestSlaves([]*Server{s1, s2})
- c.Assert(err, IsNil)
- c.Assert(best, DeepEquals, []*Server{s1})
-
- // promote s1 to master
- err = h.Promote(s1)
- c.Assert(err, IsNil)
-
- // change s2 to master s1
- err = h.ChangeMasterTo(s2, s1)
- c.Assert(err, IsNil)
-
- err = h.WaitCatchMaster(s2, s1)
- c.Assert(err, IsNil)
-
- s.checkSelect(c, s2, id, "c")
-
- // change m to master s1
- err = h.ChangeMasterTo(m, s1)
- c.Assert(err, IsNil)
-
- m, s1 = s1, m
- id = s.checkInsert(c, m, "d")
-
- err = h.WaitCatchMaster(s1, m)
- c.Assert(err, IsNil)
-
- err = h.WaitCatchMaster(s2, m)
- c.Assert(err, IsNil)
-
- best, err = h.FindBestSlaves([]*Server{s1, s2})
- c.Assert(err, IsNil)
- c.Assert(best, DeepEquals, []*Server{s1, s2})
-
- err = s2.StopSlaveIOThread()
- c.Assert(err, IsNil)
-
- id = s.checkInsert(c, m, "e")
- err = h.WaitCatchMaster(s1, m)
-
- best, err = h.FindBestSlaves([]*Server{s1, s2})
- c.Assert(err, IsNil)
- c.Assert(best, DeepEquals, []*Server{s1})
-}
-
-func (s *failoverTestSuite) checkSelect(c *C, m *Server, id uint64, name string) {
- rr, err := m.Execute("SELECT name FROM test.go_mysql_test WHERE id = ?", id)
- c.Assert(err, IsNil)
- str, _ := rr.GetString(0, 0)
- c.Assert(str, Equals, name)
-}
-
-func (s *failoverTestSuite) checkInsert(c *C, m *Server, name string) uint64 {
- r, err := m.Execute("INSERT INTO test.go_mysql_test (name) VALUES (?)", name)
- c.Assert(err, IsNil)
-
- return r.InsertId
-}
diff --git a/vendor/github.com/siddontang/go-mysql/failover/handler.go b/vendor/github.com/siddontang/go-mysql/failover/handler.go
deleted file mode 100644
index f750767..0000000
--- a/vendor/github.com/siddontang/go-mysql/failover/handler.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package failover
-
-type Handler interface {
- // Promote slave s to master
- Promote(s *Server) error
-
- // Change slave s to master m and replicate from it
- ChangeMasterTo(s *Server, m *Server) error
-
- // Ensure all relay log done, it will stop slave IO_THREAD
- // You must start slave again if you want to do replication continuatively
- WaitRelayLogDone(s *Server) error
-
- // Wait until slave s catch all data from master m at current time
- WaitCatchMaster(s *Server, m *Server) error
-
- // Find best slave which has the most up-to-date data from master
- FindBestSlaves(slaves []*Server) ([]*Server, error)
-
- // Check all slaves have gtid enabled
- CheckGTIDMode(slaves []*Server) error
-}
diff --git a/vendor/github.com/siddontang/go-mysql/failover/mariadb_gtid_handler.go b/vendor/github.com/siddontang/go-mysql/failover/mariadb_gtid_handler.go
deleted file mode 100644
index 2798241..0000000
--- a/vendor/github.com/siddontang/go-mysql/failover/mariadb_gtid_handler.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package failover
-
-import (
- "fmt"
- "net"
-
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-// Limiatation
-// + Multi source replication is not supported
-// + Slave can not handle write transactions, so maybe readonly or strict_mode = 1 is better
-type MariadbGTIDHandler struct {
- Handler
-}
-
-func (h *MariadbGTIDHandler) Promote(s *Server) error {
- if err := h.WaitRelayLogDone(s); err != nil {
- return errors.Trace(err)
- }
-
- if err := s.StopSlave(); err != nil {
- return errors.Trace(err)
- }
-
- return nil
-}
-
-func (h *MariadbGTIDHandler) FindBestSlaves(slaves []*Server) ([]*Server, error) {
- bestSlaves := []*Server{}
-
- ps := make([]uint64, len(slaves))
-
- lastIndex := -1
- var seq uint64
-
- for i, slave := range slaves {
- rr, err := slave.Execute("SELECT @@gtid_current_pos")
-
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- str, _ := rr.GetString(0, 0)
- if len(str) == 0 {
- seq = 0
- } else {
- g, err := ParseMariadbGTID(str)
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- seq = g.SequenceNumber
- }
-
- ps[i] = seq
-
- if lastIndex == -1 {
- lastIndex = i
- bestSlaves = []*Server{slave}
- } else {
- if ps[lastIndex] < seq {
- lastIndex = i
- bestSlaves = []*Server{slave}
- } else if ps[lastIndex] == seq {
- // these two slaves have same data,
- bestSlaves = append(bestSlaves, slave)
- }
- }
- }
-
- return bestSlaves, nil
-}
-
-const changeMasterToWithCurrentPos = `CHANGE MASTER TO
- MASTER_HOST = "%s", MASTER_PORT = %s,
- MASTER_USER = "%s", MASTER_PASSWORD = "%s",
- MASTER_USE_GTID = current_pos`
-
-func (h *MariadbGTIDHandler) ChangeMasterTo(s *Server, m *Server) error {
- if err := h.WaitRelayLogDone(s); err != nil {
- return errors.Trace(err)
- }
-
- if err := s.StopSlave(); err != nil {
- return errors.Trace(err)
- }
-
- if err := s.ResetSlave(); err != nil {
- return errors.Trace(err)
- }
-
- host, port, _ := net.SplitHostPort(m.Addr)
-
- if _, err := s.Execute(fmt.Sprintf(changeMasterToWithCurrentPos,
- host, port, m.ReplUser.Name, m.ReplUser.Password)); err != nil {
- return errors.Trace(err)
- }
-
- if err := s.StartSlave(); err != nil {
- return errors.Trace(err)
- }
-
- return nil
-}
-
-func (h *MariadbGTIDHandler) WaitRelayLogDone(s *Server) error {
- if err := s.StopSlaveIOThread(); err != nil {
- return errors.Trace(err)
- }
-
- r, err := s.SlaveStatus()
- if err != nil {
- return errors.Trace(err)
- }
-
- fname, _ := r.GetStringByName(0, "Master_Log_File")
- pos, _ := r.GetIntByName(0, "Read_Master_Log_Pos")
-
- return s.MasterPosWait(Position{Name: fname, Pos: uint32(pos)}, 0)
-}
-
-func (h *MariadbGTIDHandler) WaitCatchMaster(s *Server, m *Server) error {
- r, err := m.Execute("SELECT @@gtid_binlog_pos")
- if err != nil {
- return errors.Trace(err)
- }
-
- pos, _ := r.GetString(0, 0)
-
- return h.waitUntilAfterGTID(s, pos)
-}
-
-func (h *MariadbGTIDHandler) CheckGTIDMode(slaves []*Server) error {
- return nil
-}
-
-func (h *MariadbGTIDHandler) waitUntilAfterGTID(s *Server, pos string) error {
- _, err := s.Execute(fmt.Sprintf("SELECT MASTER_GTID_WAIT('%s')", pos))
- return err
-}
diff --git a/vendor/github.com/siddontang/go-mysql/failover/mysql_gtid_handler.go b/vendor/github.com/siddontang/go-mysql/failover/mysql_gtid_handler.go
deleted file mode 100644
index 322913c..0000000
--- a/vendor/github.com/siddontang/go-mysql/failover/mysql_gtid_handler.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package failover
-
-import (
- "fmt"
- "net"
-
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-type MysqlGTIDHandler struct {
- Handler
-}
-
-func (h *MysqlGTIDHandler) Promote(s *Server) error {
- if err := h.WaitRelayLogDone(s); err != nil {
- return errors.Trace(err)
- }
-
- if err := s.StopSlave(); err != nil {
- return errors.Trace(err)
- }
-
- return nil
-}
-
-func (h *MysqlGTIDHandler) FindBestSlaves(slaves []*Server) ([]*Server, error) {
- // MHA use Relay_Master_Log_File and Exec_Master_Log_Pos to determind which is the best slave
-
- bestSlaves := []*Server{}
-
- ps := make([]Position, len(slaves))
-
- lastIndex := -1
-
- for i, slave := range slaves {
- pos, err := slave.FetchSlaveExecutePos()
-
- if err != nil {
- return nil, errors.Trace(err)
- }
-
- ps[i] = pos
-
- if lastIndex == -1 {
- lastIndex = i
- bestSlaves = []*Server{slave}
- } else {
- switch ps[lastIndex].Compare(pos) {
- case 1:
- //do nothing
- case -1:
- lastIndex = i
- bestSlaves = []*Server{slave}
- case 0:
- // these two slaves have same data,
- bestSlaves = append(bestSlaves, slave)
- }
- }
- }
-
- return bestSlaves, nil
-}
-
-const changeMasterToWithAuto = `CHANGE MASTER TO
- MASTER_HOST = "%s", MASTER_PORT = %s,
- MASTER_USER = "%s", MASTER_PASSWORD = "%s",
- MASTER_AUTO_POSITION = 1`
-
-func (h *MysqlGTIDHandler) ChangeMasterTo(s *Server, m *Server) error {
- if err := h.WaitRelayLogDone(s); err != nil {
- return errors.Trace(err)
- }
-
- if err := s.StopSlave(); err != nil {
- return errors.Trace(err)
- }
-
- if err := s.ResetSlave(); err != nil {
- return errors.Trace(err)
- }
-
- host, port, _ := net.SplitHostPort(m.Addr)
-
- if _, err := s.Execute(fmt.Sprintf(changeMasterToWithAuto,
- host, port, m.ReplUser.Name, m.ReplUser.Password)); err != nil {
- return errors.Trace(err)
- }
-
- if err := s.StartSlave(); err != nil {
- return errors.Trace(err)
- }
-
- return nil
-}
-
-func (h *MysqlGTIDHandler) WaitRelayLogDone(s *Server) error {
- if err := s.StopSlaveIOThread(); err != nil {
- return errors.Trace(err)
- }
-
- r, err := s.SlaveStatus()
- if err != nil {
- return errors.Trace(err)
- }
-
- retrieved, _ := r.GetStringByName(0, "Retrieved_Gtid_Set")
-
- // may only support MySQL version >= 5.6.9
- // see http://dev.mysql.com/doc/refman/5.6/en/gtid-functions.html
- return h.waitUntilAfterGTIDs(s, retrieved)
-}
-
-func (h *MysqlGTIDHandler) WaitCatchMaster(s *Server, m *Server) error {
- r, err := m.MasterStatus()
- if err != nil {
- return errors.Trace(err)
- }
-
- masterGTIDSet, _ := r.GetStringByName(0, "Executed_Gtid_Set")
-
- return h.waitUntilAfterGTIDs(s, masterGTIDSet)
-}
-
-func (h *MysqlGTIDHandler) CheckGTIDMode(slaves []*Server) error {
- for i := 0; i < len(slaves); i++ {
- mode, err := slaves[i].MysqlGTIDMode()
- if err != nil {
- return errors.Trace(err)
- } else if mode != GTIDModeOn {
- return errors.Errorf("%s use not GTID mode", slaves[i].Addr)
- }
- }
-
- return nil
-}
-
-func (h *MysqlGTIDHandler) waitUntilAfterGTIDs(s *Server, gtids string) error {
- _, err := s.Execute(fmt.Sprintf("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')", gtids))
- return err
-}
diff --git a/vendor/github.com/siddontang/go-mysql/failover/server.go b/vendor/github.com/siddontang/go-mysql/failover/server.go
deleted file mode 100644
index c02d6c8..0000000
--- a/vendor/github.com/siddontang/go-mysql/failover/server.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package failover
-
-import (
- "fmt"
-
- "github.com/siddontang/go-mysql/client"
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-type User struct {
- Name string
- Password string
-}
-
-type Server struct {
- Addr string
-
- User User
- ReplUser User
-
- conn *client.Conn
-}
-
-func NewServer(addr string, user User, replUser User) *Server {
- s := new(Server)
-
- s.Addr = addr
-
- s.User = user
- s.ReplUser = replUser
-
- return s
-}
-
-func (s *Server) Close() {
- if s.conn != nil {
- s.conn.Close()
- }
-}
-
-func (s *Server) Execute(cmd string, args ...interface{}) (r *Result, err error) {
- retryNum := 3
- for i := 0; i < retryNum; i++ {
- if s.conn == nil {
- s.conn, err = client.Connect(s.Addr, s.User.Name, s.User.Password, "")
- if err != nil {
- return nil, err
- }
- }
-
- r, err = s.conn.Execute(cmd, args...)
- if err != nil && ErrorEqual(err, ErrBadConn) {
- return
- } else if ErrorEqual(err, ErrBadConn) {
- s.conn = nil
- continue
- } else {
- return
- }
- }
- return
-}
-
-func (s *Server) StartSlave() error {
- _, err := s.Execute("START SLAVE")
- return err
-}
-
-func (s *Server) StopSlave() error {
- _, err := s.Execute("STOP SLAVE")
- return err
-}
-
-func (s *Server) StopSlaveIOThread() error {
- _, err := s.Execute("STOP SLAVE IO_THREAD")
- return err
-}
-
-func (s *Server) SlaveStatus() (*Resultset, error) {
- r, err := s.Execute("SHOW SLAVE STATUS")
- if err != nil {
- return nil, err
- } else {
- return r.Resultset, nil
- }
-}
-
-func (s *Server) MasterStatus() (*Resultset, error) {
- r, err := s.Execute("SHOW MASTER STATUS")
- if err != nil {
- return nil, err
- } else {
- return r.Resultset, nil
- }
-}
-
-func (s *Server) ResetSlave() error {
- _, err := s.Execute("RESET SLAVE")
- return err
-}
-
-func (s *Server) ResetSlaveALL() error {
- _, err := s.Execute("RESET SLAVE ALL")
- return err
-}
-
-func (s *Server) ResetMaster() error {
- _, err := s.Execute("RESET MASTER")
- return err
-}
-
-func (s *Server) MysqlGTIDMode() (string, error) {
- r, err := s.Execute("SELECT @@gtid_mode")
- if err != nil {
- return GTIDModeOff, err
- }
- on, _ := r.GetString(0, 0)
- if on != GTIDModeOn {
- return GTIDModeOff, nil
- } else {
- return GTIDModeOn, nil
- }
-}
-
-func (s *Server) SetReadonly(b bool) error {
- var err error
- if b {
- _, err = s.Execute("SET GLOBAL read_only = ON")
- } else {
- _, err = s.Execute("SET GLOBAL read_only = OFF")
- }
- return err
-}
-
-func (s *Server) LockTables() error {
- _, err := s.Execute("FLUSH TABLES WITH READ LOCK")
- return err
-}
-
-func (s *Server) UnlockTables() error {
- _, err := s.Execute("UNLOCK TABLES")
- return err
-}
-
-// Get current binlog filename and position read from master
-func (s *Server) FetchSlaveReadPos() (Position, error) {
- r, err := s.SlaveStatus()
- if err != nil {
- return Position{}, err
- }
-
- fname, _ := r.GetStringByName(0, "Master_Log_File")
- pos, _ := r.GetIntByName(0, "Read_Master_Log_Pos")
-
- return Position{Name: fname, Pos: uint32(pos)}, nil
-}
-
-// Get current executed binlog filename and position from master
-func (s *Server) FetchSlaveExecutePos() (Position, error) {
- r, err := s.SlaveStatus()
- if err != nil {
- return Position{}, err
- }
-
- fname, _ := r.GetStringByName(0, "Relay_Master_Log_File")
- pos, _ := r.GetIntByName(0, "Exec_Master_Log_Pos")
-
- return Position{Name: fname, Pos: uint32(pos)}, nil
-}
-
-func (s *Server) MasterPosWait(pos Position, timeout int) error {
- _, err := s.Execute(fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d, %d)", pos.Name, pos.Pos, timeout))
- return err
-}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/mariadb_gtid_test.go b/vendor/github.com/siddontang/go-mysql/mysql/mariadb_gtid_test.go
deleted file mode 100644
index 1455e26..0000000
--- a/vendor/github.com/siddontang/go-mysql/mysql/mariadb_gtid_test.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package mysql
-
-import (
- "github.com/pingcap/check"
-)
-
-type mariaDBTestSuite struct {
-}
-
-var _ = check.Suite(&mariaDBTestSuite{})
-
-func (t *mariaDBTestSuite) SetUpSuite(c *check.C) {
-
-}
-
-func (t *mariaDBTestSuite) TearDownSuite(c *check.C) {
-
-}
-
-func (t *mariaDBTestSuite) TestParseMariaDBGTID(c *check.C) {
- cases := []struct {
- gtidStr string
- hashError bool
- }{
- {"0-1-1", false},
- {"", false},
- {"0-1-1-1", true},
- {"1", true},
- {"0-1-seq", true},
- }
-
- for _, cs := range cases {
- gtid, err := ParseMariadbGTID(cs.gtidStr)
- if cs.hashError {
- c.Assert(err, check.NotNil)
- } else {
- c.Assert(err, check.IsNil)
- c.Assert(gtid.String(), check.Equals, cs.gtidStr)
- }
- }
-}
-
-func (t *mariaDBTestSuite) TestMariaDBGTIDConatin(c *check.C) {
- cases := []struct {
- originGTIDStr, otherGTIDStr string
- contain bool
- }{
- {"0-1-1", "0-1-2", false},
- {"0-1-1", "", true},
- {"2-1-1", "1-1-1", false},
- {"1-2-1", "1-1-1", true},
- {"1-2-2", "1-1-1", true},
- }
-
- for _, cs := range cases {
- originGTID, err := ParseMariadbGTID(cs.originGTIDStr)
- c.Assert(err, check.IsNil)
- otherGTID, err := ParseMariadbGTID(cs.otherGTIDStr)
- c.Assert(err, check.IsNil)
-
- c.Assert(originGTID.Contain(otherGTID), check.Equals, cs.contain)
- }
-}
-
-func (t *mariaDBTestSuite) TestMariaDBGTIDClone(c *check.C) {
- gtid, err := ParseMariadbGTID("1-1-1")
- c.Assert(err, check.IsNil)
-
- clone := gtid.Clone()
- c.Assert(gtid, check.DeepEquals, clone)
-}
-
-func (t *mariaDBTestSuite) TestMariaDBForward(c *check.C) {
- cases := []struct {
- currentGTIDStr, newerGTIDStr string
- hashError bool
- }{
- {"0-1-1", "0-1-2", false},
- {"0-1-1", "", false},
- {"2-1-1", "1-1-1", true},
- {"1-2-1", "1-1-1", false},
- {"1-2-2", "1-1-1", false},
- }
-
- for _, cs := range cases {
- currentGTID, err := ParseMariadbGTID(cs.currentGTIDStr)
- c.Assert(err, check.IsNil)
- newerGTID, err := ParseMariadbGTID(cs.newerGTIDStr)
- c.Assert(err, check.IsNil)
-
- err = currentGTID.forward(newerGTID)
- if cs.hashError {
- c.Assert(err, check.NotNil)
- c.Assert(currentGTID.String(), check.Equals, cs.currentGTIDStr)
- } else {
- c.Assert(err, check.IsNil)
- c.Assert(currentGTID.String(), check.Equals, cs.newerGTIDStr)
- }
- }
-}
-
-func (t *mariaDBTestSuite) TestParseMariaDBGTIDSet(c *check.C) {
- cases := []struct {
- gtidStr string
- subGTIDs map[uint32]string //domain ID => gtid string
- expectedStr []string // test String()
- hasError bool
- }{
- {"0-1-1", map[uint32]string{0: "0-1-1"}, []string{"0-1-1"}, false},
- {"", nil, []string{""}, false},
- {"0-1-1,1-2-3", map[uint32]string{0: "0-1-1", 1: "1-2-3"}, []string{"0-1-1,1-2-3", "1-2-3,0-1-1"}, false},
- {"0-1--1", nil, nil, true},
- }
-
- for _, cs := range cases {
- gtidSet, err := ParseMariadbGTIDSet(cs.gtidStr)
- if cs.hasError {
- c.Assert(err, check.NotNil)
- } else {
- c.Assert(err, check.IsNil)
- mariadbGTIDSet, ok := gtidSet.(*MariadbGTIDSet)
- c.Assert(ok, check.IsTrue)
-
- // check sub gtid
- c.Assert(mariadbGTIDSet.Sets, check.HasLen, len(cs.subGTIDs))
- for domainID, gtid := range mariadbGTIDSet.Sets {
- c.Assert(mariadbGTIDSet.Sets, check.HasKey, domainID)
- c.Assert(gtid.String(), check.Equals, cs.subGTIDs[domainID])
- }
-
- // check String() function
- inExpectedResult := false
- actualStr := mariadbGTIDSet.String()
- for _, str := range cs.expectedStr {
- if str == actualStr {
- inExpectedResult = true
- break
- }
- }
- c.Assert(inExpectedResult, check.IsTrue)
- }
- }
-}
-
-func (t *mariaDBTestSuite) TestMariaDBGTIDSetUpdate(c *check.C) {
- cases := []struct {
- isNilGTID bool
- gtidStr string
- subGTIDs map[uint32]string
- }{
- {true, "", map[uint32]string{1: "1-1-1", 2: "2-2-2"}},
- {false, "1-2-2", map[uint32]string{1: "1-2-2", 2: "2-2-2"}},
- {false, "1-2-1", map[uint32]string{1: "1-2-1", 2: "2-2-2"}},
- {false, "3-2-1", map[uint32]string{1: "1-1-1", 2: "2-2-2", 3: "3-2-1"}},
- }
-
- for _, cs := range cases {
- gtidSet, err := ParseMariadbGTIDSet("1-1-1,2-2-2")
- c.Assert(err, check.IsNil)
- mariadbGTIDSet, ok := gtidSet.(*MariadbGTIDSet)
- c.Assert(ok, check.IsTrue)
-
- if cs.isNilGTID {
- c.Assert(mariadbGTIDSet.AddSet(nil), check.IsNil)
- } else {
- err := gtidSet.Update(cs.gtidStr)
- c.Assert(err, check.IsNil)
- }
- // check sub gtid
- c.Assert(mariadbGTIDSet.Sets, check.HasLen, len(cs.subGTIDs))
- for domainID, gtid := range mariadbGTIDSet.Sets {
- c.Assert(mariadbGTIDSet.Sets, check.HasKey, domainID)
- c.Assert(gtid.String(), check.Equals, cs.subGTIDs[domainID])
- }
- }
-}
-
-func (t *mariaDBTestSuite) TestMariaDBGTIDSetEqual(c *check.C) {
- cases := []struct {
- originGTIDStr, otherGTIDStr string
- equals bool
- }{
- {"", "", true},
- {"1-1-1", "1-1-1,2-2-2", false},
- {"1-1-1,2-2-2", "1-1-1", false},
- {"1-1-1,2-2-2", "1-1-1,2-2-2", true},
- {"1-1-1,2-2-2", "1-1-1,2-2-3", false},
- }
-
- for _, cs := range cases {
- originGTID, err := ParseMariadbGTIDSet(cs.originGTIDStr)
- c.Assert(err, check.IsNil)
-
- otherGTID, err := ParseMariadbGTIDSet(cs.otherGTIDStr)
- c.Assert(err, check.IsNil)
-
- c.Assert(originGTID.Equal(otherGTID), check.Equals, cs.equals)
- }
-}
-
-func (t *mariaDBTestSuite) TestMariaDBGTIDSetContain(c *check.C) {
- cases := []struct {
- originGTIDStr, otherGTIDStr string
- contain bool
- }{
- {"", "", true},
- {"1-1-1", "1-1-1,2-2-2", false},
- {"1-1-1,2-2-2", "1-1-1", true},
- {"1-1-1,2-2-2", "1-1-1,2-2-2", true},
- {"1-1-1,2-2-2", "1-1-1,2-2-1", true},
- {"1-1-1,2-2-2", "1-1-1,2-2-3", false},
- }
-
- for _, cs := range cases {
- originGTIDSet, err := ParseMariadbGTIDSet(cs.originGTIDStr)
- c.Assert(err, check.IsNil)
-
- otherGTIDSet, err := ParseMariadbGTIDSet(cs.otherGTIDStr)
- c.Assert(err, check.IsNil)
-
- c.Assert(originGTIDSet.Contain(otherGTIDSet), check.Equals, cs.contain)
- }
-}
-
-func (t *mariaDBTestSuite) TestMariaDBGTIDSetClone(c *check.C) {
- cases := []string{"", "1-1-1", "1-1-1,2-2-2"}
-
- for _, str := range cases {
- gtidSet, err := ParseMariadbGTIDSet(str)
- c.Assert(err, check.IsNil)
-
- c.Assert(gtidSet.Clone(), check.DeepEquals, gtidSet)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/mysql_test.go b/vendor/github.com/siddontang/go-mysql/mysql/mysql_test.go
deleted file mode 100644
index df4b206..0000000
--- a/vendor/github.com/siddontang/go-mysql/mysql/mysql_test.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package mysql
-
-import (
- "strings"
- "testing"
-
- "github.com/pingcap/check"
-)
-
-func Test(t *testing.T) {
- check.TestingT(t)
-}
-
-type mysqlTestSuite struct {
-}
-
-var _ = check.Suite(&mysqlTestSuite{})
-
-func (t *mysqlTestSuite) SetUpSuite(c *check.C) {
-
-}
-
-func (t *mysqlTestSuite) TearDownSuite(c *check.C) {
-
-}
-
-func (t *mysqlTestSuite) TestMysqlGTIDInterval(c *check.C) {
- i, err := parseInterval("1-2")
- c.Assert(err, check.IsNil)
- c.Assert(i, check.DeepEquals, Interval{1, 3})
-
- i, err = parseInterval("1")
- c.Assert(err, check.IsNil)
- c.Assert(i, check.DeepEquals, Interval{1, 2})
-
- i, err = parseInterval("1-1")
- c.Assert(err, check.IsNil)
- c.Assert(i, check.DeepEquals, Interval{1, 2})
-
- i, err = parseInterval("1-2")
- c.Assert(err, check.IsNil)
-}
-
-func (t *mysqlTestSuite) TestMysqlGTIDIntervalSlice(c *check.C) {
- i := IntervalSlice{Interval{1, 2}, Interval{2, 4}, Interval{2, 3}}
- i.Sort()
- c.Assert(i, check.DeepEquals, IntervalSlice{Interval{1, 2}, Interval{2, 3}, Interval{2, 4}})
- n := i.Normalize()
- c.Assert(n, check.DeepEquals, IntervalSlice{Interval{1, 4}})
-
- i = IntervalSlice{Interval{1, 2}, Interval{3, 5}, Interval{1, 3}}
- i.Sort()
- c.Assert(i, check.DeepEquals, IntervalSlice{Interval{1, 2}, Interval{1, 3}, Interval{3, 5}})
- n = i.Normalize()
- c.Assert(n, check.DeepEquals, IntervalSlice{Interval{1, 5}})
-
- i = IntervalSlice{Interval{1, 2}, Interval{4, 5}, Interval{1, 3}}
- i.Sort()
- c.Assert(i, check.DeepEquals, IntervalSlice{Interval{1, 2}, Interval{1, 3}, Interval{4, 5}})
- n = i.Normalize()
- c.Assert(n, check.DeepEquals, IntervalSlice{Interval{1, 3}, Interval{4, 5}})
-
- i = IntervalSlice{Interval{1, 4}, Interval{2, 3}}
- i.Sort()
- c.Assert(i, check.DeepEquals, IntervalSlice{Interval{1, 4}, Interval{2, 3}})
- n = i.Normalize()
- c.Assert(n, check.DeepEquals, IntervalSlice{Interval{1, 4}})
-
- n1 := IntervalSlice{Interval{1, 3}, Interval{4, 5}}
- n2 := IntervalSlice{Interval{1, 2}}
-
- c.Assert(n1.Contain(n2), check.Equals, true)
- c.Assert(n2.Contain(n1), check.Equals, false)
-
- n1 = IntervalSlice{Interval{1, 3}, Interval{4, 5}}
- n2 = IntervalSlice{Interval{1, 6}}
-
- c.Assert(n1.Contain(n2), check.Equals, false)
- c.Assert(n2.Contain(n1), check.Equals, true)
-}
-
-func (t *mysqlTestSuite) TestMysqlGTIDCodec(c *check.C) {
- us, err := ParseUUIDSet("de278ad0-2106-11e4-9f8e-6edd0ca20947:1-2")
- c.Assert(err, check.IsNil)
-
- c.Assert(us.String(), check.Equals, "de278ad0-2106-11e4-9f8e-6edd0ca20947:1-2")
-
- buf := us.Encode()
- err = us.Decode(buf)
- c.Assert(err, check.IsNil)
-
- gs, err := ParseMysqlGTIDSet("de278ad0-2106-11e4-9f8e-6edd0ca20947:1-2,de278ad0-2106-11e4-9f8e-6edd0ca20948:1-2")
- c.Assert(err, check.IsNil)
-
- buf = gs.Encode()
- o, err := DecodeMysqlGTIDSet(buf)
- c.Assert(err, check.IsNil)
- c.Assert(gs, check.DeepEquals, o)
-}
-
-func (t *mysqlTestSuite) TestMysqlUpdate(c *check.C) {
- g1, err := ParseMysqlGTIDSet("3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57")
- c.Assert(err, check.IsNil)
-
- g1.Update("3E11FA47-71CA-11E1-9E33-C80AA9429562:21-58")
-
- c.Assert(strings.ToUpper(g1.String()), check.Equals, "3E11FA47-71CA-11E1-9E33-C80AA9429562:21-58")
-}
-
-func (t *mysqlTestSuite) TestMysqlGTIDContain(c *check.C) {
- g1, err := ParseMysqlGTIDSet("3E11FA47-71CA-11E1-9E33-C80AA9429562:23")
- c.Assert(err, check.IsNil)
-
- g2, err := ParseMysqlGTIDSet("3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57")
- c.Assert(err, check.IsNil)
-
- c.Assert(g2.Contain(g1), check.Equals, true)
- c.Assert(g1.Contain(g2), check.Equals, false)
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryInt8(c *check.C) {
- i8 := ParseBinaryInt8([]byte{128})
- c.Assert(i8, check.Equals, int8(-128))
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryUint8(c *check.C) {
- u8 := ParseBinaryUint8([]byte{128})
- c.Assert(u8, check.Equals, uint8(128))
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryInt16(c *check.C) {
- i16 := ParseBinaryInt16([]byte{1, 128})
- c.Assert(i16, check.Equals, int16(-128*256+1))
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryUint16(c *check.C) {
- u16 := ParseBinaryUint16([]byte{1, 128})
- c.Assert(u16, check.Equals, uint16(128*256+1))
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryInt24(c *check.C) {
- i32 := ParseBinaryInt24([]byte{1, 2, 128})
- c.Assert(i32, check.Equals, int32(-128*65536+2*256+1))
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryUint24(c *check.C) {
- u32 := ParseBinaryUint24([]byte{1, 2, 128})
- c.Assert(u32, check.Equals, uint32(128*65536+2*256+1))
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryInt32(c *check.C) {
- i32 := ParseBinaryInt32([]byte{1, 2, 3, 128})
- c.Assert(i32, check.Equals, int32(-128*16777216+3*65536+2*256+1))
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryUint32(c *check.C) {
- u32 := ParseBinaryUint32([]byte{1, 2, 3, 128})
- c.Assert(u32, check.Equals, uint32(128*16777216+3*65536+2*256+1))
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryInt64(c *check.C) {
- i64 := ParseBinaryInt64([]byte{1, 2, 3, 4, 5, 6, 7, 128})
- c.Assert(i64, check.Equals, -128*int64(72057594037927936)+7*int64(281474976710656)+6*int64(1099511627776)+5*int64(4294967296)+4*16777216+3*65536+2*256+1)
-}
-
-func (t *mysqlTestSuite) TestMysqlParseBinaryUint64(c *check.C) {
- u64 := ParseBinaryUint64([]byte{1, 2, 3, 4, 5, 6, 7, 128})
- c.Assert(u64, check.Equals, 128*uint64(72057594037927936)+7*uint64(281474976710656)+6*uint64(1099511627776)+5*uint64(4294967296)+4*16777216+3*65536+2*256+1)
-}
-
-func (t *mysqlTestSuite) TestErrorCode(c *check.C) {
- tbls := []struct {
- msg string
- code int
- }{
- {"ERROR 1094 (HY000): Unknown thread id: 1094", 1094},
- {"error string", 0},
- {"abcdefg", 0},
- {"123455 ks094", 0},
- {"ERROR 1046 (3D000): Unknown error 1046", 1046},
- }
- for _, v := range tbls {
- c.Assert(ErrorCode(v.msg), check.Equals, v.code)
- }
-}
-
-func (t *mysqlTestSuite) TestMysqlNullDecode(c *check.C) {
- _, isNull, n := LengthEncodedInt([]byte{0xfb})
-
- c.Assert(isNull, check.IsTrue)
- c.Assert(n, check.Equals, 1)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/position.go b/vendor/github.com/siddontang/go-mysql/mysql/position.go
deleted file mode 100644
index bee5485..0000000
--- a/vendor/github.com/siddontang/go-mysql/mysql/position.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package mysql
-
-import (
- "fmt"
-)
-
-// For binlog filename + position based replication
-type Position struct {
- Name string
- Pos uint32
-}
-
-func (p Position) Compare(o Position) int {
- // First compare binlog name
- if p.Name > o.Name {
- return 1
- } else if p.Name < o.Name {
- return -1
- } else {
- // Same binlog file, compare position
- if p.Pos > o.Pos {
- return 1
- } else if p.Pos < o.Pos {
- return -1
- } else {
- return 0
- }
- }
-}
-
-func (p Position) String() string {
- return fmt.Sprintf("(%s, %d)", p.Name, p.Pos)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/packet/conn.go b/vendor/github.com/siddontang/go-mysql/packet/conn.go
deleted file mode 100644
index 41b1bf1..0000000
--- a/vendor/github.com/siddontang/go-mysql/packet/conn.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package packet
-
-import "C"
-import (
- "bytes"
- "io"
- "net"
-
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/x509"
- "encoding/pem"
-
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-/*
- Conn is the base class to handle MySQL protocol.
-*/
-type Conn struct {
- net.Conn
-
- // we removed the buffer reader because it will cause the SSLRequest to block (tls connection handshake won't be
- // able to read the "Client Hello" data since it has been buffered into the buffer reader)
-
- Sequence uint8
-}
-
-func NewConn(conn net.Conn) *Conn {
- c := new(Conn)
-
- c.Conn = conn
-
- return c
-}
-
-func (c *Conn) ReadPacket() ([]byte, error) {
- var buf bytes.Buffer
-
- if err := c.ReadPacketTo(&buf); err != nil {
- return nil, errors.Trace(err)
- } else {
- return buf.Bytes(), nil
- }
-}
-
-func (c *Conn) ReadPacketTo(w io.Writer) error {
- header := []byte{0, 0, 0, 0}
-
- if _, err := io.ReadFull(c.Conn, header); err != nil {
- return ErrBadConn
- }
-
- length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16)
- // bug fixed: caching_sha2_password will send 0-length payload (the unscrambled password) when the password is empty
- //if length < 1 {
- // return errors.Errorf("invalid payload length %d", length)
- //}
-
- sequence := uint8(header[3])
-
- if sequence != c.Sequence {
- return errors.Errorf("invalid sequence %d != %d", sequence, c.Sequence)
- }
-
- c.Sequence++
-
- if n, err := io.CopyN(w, c.Conn, int64(length)); err != nil {
- return ErrBadConn
- } else if n != int64(length) {
- return ErrBadConn
- } else {
- if length < MaxPayloadLen {
- return nil
- }
-
- if err := c.ReadPacketTo(w); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// data already has 4 bytes header
-// will modify data inplace
-func (c *Conn) WritePacket(data []byte) error {
- length := len(data) - 4
-
- for length >= MaxPayloadLen {
- data[0] = 0xff
- data[1] = 0xff
- data[2] = 0xff
-
- data[3] = c.Sequence
-
- if n, err := c.Write(data[:4+MaxPayloadLen]); err != nil {
- return ErrBadConn
- } else if n != (4 + MaxPayloadLen) {
- return ErrBadConn
- } else {
- c.Sequence++
- length -= MaxPayloadLen
- data = data[MaxPayloadLen:]
- }
- }
-
- data[0] = byte(length)
- data[1] = byte(length >> 8)
- data[2] = byte(length >> 16)
- data[3] = c.Sequence
-
- if n, err := c.Write(data); err != nil {
- return ErrBadConn
- } else if n != len(data) {
- return ErrBadConn
- } else {
- c.Sequence++
- return nil
- }
-}
-
-// Client clear text authentication packet
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (c *Conn) WriteClearAuthPacket(password string) error {
- // Calculate the packet length and add a tailing 0
- pktLen := len(password) + 1
- data := make([]byte, 4 + pktLen)
-
- // Add the clear password [null terminated string]
- copy(data[4:], password)
- data[4+pktLen-1] = 0x00
-
- return c.WritePacket(data)
-}
-
-// Caching sha2 authentication. Public key request and send encrypted password
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (c *Conn) WritePublicKeyAuthPacket(password string, cipher []byte) error {
- // request public key
- data := make([]byte, 4 + 1)
- data[4] = 2 // cachingSha2PasswordRequestPublicKey
- c.WritePacket(data)
-
- data, err := c.ReadPacket()
- if err != nil {
- return err
- }
-
- block, _ := pem.Decode(data[1:])
- pub, err := x509.ParsePKIXPublicKey(block.Bytes)
- if err != nil {
- return err
- }
-
- plain := make([]byte, len(password)+1)
- copy(plain, password)
- for i := range plain {
- j := i % len(cipher)
- plain[i] ^= cipher[j]
- }
- sha1v := sha1.New()
- enc, _ := rsa.EncryptOAEP(sha1v, rand.Reader, pub.(*rsa.PublicKey), plain, nil)
- data = make([]byte, 4 + len(enc))
- copy(data[4:], enc)
- return c.WritePacket(data)
-}
-
-func (c *Conn) WriteEncryptedPassword(password string, seed []byte, pub *rsa.PublicKey) error {
- enc, err := EncryptPassword(password, seed, pub)
- if err != nil {
- return err
- }
- return c.WriteAuthSwitchPacket(enc, false)
-}
-
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (c *Conn) WriteAuthSwitchPacket(authData []byte, addNUL bool) error {
- pktLen := 4 + len(authData)
- if addNUL {
- pktLen++
- }
- data := make([]byte, pktLen)
-
- // Add the auth data [EOF]
- copy(data[4:], authData)
- if addNUL {
- data[pktLen-1] = 0x00
- }
-
- return c.WritePacket(data)
-}
-
-func (c *Conn) ResetSequence() {
- c.Sequence = 0
-}
-
-func (c *Conn) Close() error {
- c.Sequence = 0
- if c.Conn != nil {
- return c.Conn.Close()
- }
- return nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/replication/backup_test.go b/vendor/github.com/siddontang/go-mysql/replication/backup_test.go
deleted file mode 100644
index 5e39e7f..0000000
--- a/vendor/github.com/siddontang/go-mysql/replication/backup_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package replication
-
-import (
- "context"
- "github.com/juju/errors"
- . "github.com/pingcap/check"
- "github.com/siddontang/go-mysql/mysql"
- "os"
- "sync"
- "time"
-)
-
-func (t *testSyncerSuite) TestStartBackupEndInGivenTime(c *C) {
- t.setupTest(c, mysql.MySQLFlavor)
-
- t.testExecute(c, "RESET MASTER")
-
- var wg sync.WaitGroup
- wg.Add(1)
- defer wg.Wait()
-
- go func() {
- defer wg.Done()
-
- t.testSync(c, nil)
-
- t.testExecute(c, "FLUSH LOGS")
-
- t.testSync(c, nil)
- }()
-
- os.RemoveAll("./var")
- timeout := 2 * time.Second
-
- done := make(chan bool)
-
- go func() {
- err := t.b.StartBackup("./var", mysql.Position{Name: "", Pos: uint32(0)}, timeout)
- c.Assert(err, IsNil)
- done <- true
- }()
- failTimeout := 5 * timeout
- ctx, _ := context.WithTimeout(context.Background(), failTimeout)
- select {
- case <-done:
- return
- case <-ctx.Done():
- c.Assert(errors.New("time out error"), IsNil)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/replication/parser_test.go b/vendor/github.com/siddontang/go-mysql/replication/parser_test.go
deleted file mode 100644
index d4efc98..0000000
--- a/vendor/github.com/siddontang/go-mysql/replication/parser_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package replication
-
-import (
- . "github.com/pingcap/check"
-)
-
-func (t *testSyncerSuite) TestIndexOutOfRange(c *C) {
- parser := NewBinlogParser()
-
- parser.format = &FormatDescriptionEvent{
- Version: 0x4,
- ServerVersion: []uint8{0x35, 0x2e, 0x36, 0x2e, 0x32, 0x30, 0x2d, 0x6c, 0x6f, 0x67, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
- CreateTimestamp: 0x0,
- EventHeaderLength: 0x13,
- EventTypeHeaderLengths: []uint8{0x38, 0xd, 0x0, 0x8, 0x0, 0x12, 0x0, 0x4, 0x4, 0x4, 0x4, 0x12, 0x0, 0x0, 0x5c, 0x0, 0x4, 0x1a, 0x8, 0x0, 0x0, 0x0, 0x8, 0x8, 0x8, 0x2, 0x0, 0x0, 0x0, 0xa, 0xa, 0xa, 0x19, 0x19, 0x0},
- ChecksumAlgorithm: 0x1,
- }
-
- parser.tables = map[uint64]*TableMapEvent{
- 0x3043b: &TableMapEvent{tableIDSize: 6, TableID: 0x3043b, Flags: 0x1, Schema: []uint8{0x73, 0x65, 0x69, 0x75, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72}, Table: []uint8{0x61, 0x70, 0x70, 0x5f, 0x63, 0x72, 0x6f, 0x6e}, ColumnCount: 0x15, ColumnType: []uint8{0x3, 0xf, 0xc, 0xc, 0xf, 0x3, 0xc, 0x3, 0xfc, 0xf, 0x1, 0xfe, 0x2, 0xc, 0xf, 0xf, 0xc, 0xf, 0xf, 0x3, 0xf}, ColumnMeta: []uint16{0x0, 0x180, 0x0, 0x0, 0x2fd, 0x0, 0x0, 0x0, 0x2, 0x180, 0x0, 0xfe78, 0x0, 0x0, 0x180, 0x180, 0x0, 0x180, 0x180, 0x0, 0x2fd}, NullBitmap: []uint8{0xf8, 0xfb, 0x17}},
- 0x30453: &TableMapEvent{tableIDSize: 6, TableID: 0x30453, Flags: 0x1, Schema: []uint8{0x73, 0x65, 0x69, 0x75, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72}, Table: []uint8{0x73, 0x74, 0x67, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x75, 0x70}, ColumnCount: 0x36, ColumnType: []uint8{0x3, 0x3, 0x3, 0x3, 0x3, 0xf, 0xf, 0x8, 0x3, 0x3, 0x3, 0xf, 0xf, 0x1, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xfe, 0x12, 0xf, 0xf, 0xf, 0xf6, 0x1, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xfe, 0xf6, 0x12, 0x3, 0xf, 0xf, 0x1, 0x1, 0x12, 0xf, 0xf, 0xf, 0xf, 0x3, 0xf, 0x3}, ColumnMeta: []uint16{0x0, 0x0, 0x0, 0x0, 0x0, 0x2fd, 0x12c, 0x0, 0x0, 0x0, 0x0, 0x180, 0x180, 0x0, 0x30, 0x180, 0x180, 0x180, 0x30, 0xc0, 0xfe03, 0x0, 0x180, 0x180, 0x180, 0xc02, 0x0, 0x5a, 0x5a, 0x5a, 0x5a, 0x2fd, 0x2fd, 0x2fd, 0xc0, 0x12c, 0x30, 0xc, 0xfe06, 0xb02, 0x0, 0x0, 0x180, 0x180, 0x0, 0x0, 0x0, 0x180, 0x180, 0x2d, 0x2fd, 0x0, 0x2fd, 0x0}, NullBitmap: []uint8{0xee, 0xdf, 0xff, 0xff, 0xff, 0xff, 0x17}},
- 0x30504: &TableMapEvent{tableIDSize: 6, TableID: 0x30504, Flags: 0x1, Schema: []uint8{0x73, 0x65, 0x69, 0x75, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72}, Table: []uint8{0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x74, 0x67, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x75, 0x70}, ColumnCount: 0x13, ColumnType: []uint8{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0xf, 0xc, 0xc, 0xc, 0xf, 0xf, 0x3, 0xf}, ColumnMeta: []uint16{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x180, 0x0, 0x0, 0x0, 0x180, 0x180, 0x0, 0x2fd}, NullBitmap: []uint8{0x6, 0xfb, 0x5}},
- 0x30450: &TableMapEvent{tableIDSize: 6, TableID: 0x30450, Flags: 0x1, Schema: []uint8{0x73, 0x65, 0x69, 0x75, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72}, Table: []uint8{0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74}, ColumnCount: 0x16, ColumnType: []uint8{0x3, 0xfc, 0xc, 0x3, 0xc, 0xf, 0x3, 0xf, 0xc, 0xf, 0xf, 0xf, 0xf, 0x3, 0xc, 0xf, 0xf, 0xf, 0xf, 0x3, 0x3, 0xf}, ColumnMeta: []uint16{0x0, 0x2, 0x0, 0x0, 0x0, 0x2d, 0x0, 0x180, 0x0, 0x180, 0x180, 0x2fd, 0x2d, 0x0, 0x0, 0x180, 0x180, 0x2fd, 0x2d, 0x0, 0x0, 0x2fd}, NullBitmap: []uint8{0xfe, 0xff, 0x2f}},
- 0x305bb: &TableMapEvent{tableIDSize: 6, TableID: 0x305bb, Flags: 0x1, Schema: []uint8{0x79, 0x6d, 0x63, 0x61, 0x63, 0x68, 0x67, 0x6f}, Table: []uint8{0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x6c, 0x6f, 0x67}, ColumnCount: 0x11, ColumnType: []uint8{0x3, 0x3, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xc, 0xf, 0xf, 0xc, 0xf, 0xf, 0x3, 0xf}, ColumnMeta: []uint16{0x0, 0x0, 0x2fd, 0x12c, 0x2fd, 0x2fd, 0x2d, 0x12c, 0x2fd, 0x0, 0x180, 0x180, 0x0, 0x180, 0x180, 0x0, 0x2fd}, NullBitmap: []uint8{0xfe, 0x7f, 0x1}},
- 0x16c36b: &TableMapEvent{tableIDSize: 6, TableID: 0x16c36b, Flags: 0x1, Schema: []uint8{0x61, 0x63, 0x70}, Table: []uint8{0x73, 0x74, 0x67, 0x5f, 0x6d, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x32}, ColumnCount: 0xe, ColumnType: []uint8{0x8, 0x8, 0x3, 0x3, 0x2, 0x2, 0xf, 0x12, 0xf, 0xf, 0x12, 0xf, 0xf, 0xf}, ColumnMeta: []uint16{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2d, 0x0, 0x180, 0x180, 0x0, 0x180, 0x180, 0x2fd}, NullBitmap: []uint8{0xba, 0x3f}},
- 0x16c368: &TableMapEvent{tableIDSize: 6, TableID: 0x16c368, Flags: 0x1, Schema: []uint8{0x73, 0x65, 0x69, 0x75, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72}, Table: []uint8{0x73, 0x74, 0x67, 0x5f, 0x6d, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x32}, ColumnCount: 0xe, ColumnType: []uint8{0x8, 0x8, 0x3, 0x3, 0x2, 0x2, 0xf, 0x12, 0xf, 0xf, 0x12, 0xf, 0xf, 0xf}, ColumnMeta: []uint16{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2d, 0x0, 0x180, 0x180, 0x0, 0x180, 0x180, 0x2fd}, NullBitmap: []uint8{0xba, 0x3f}},
- 0x3045a: &TableMapEvent{tableIDSize: 6, TableID: 0x3045a, Flags: 0x1, Schema: []uint8{0x73, 0x65, 0x69, 0x75, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72}, Table: []uint8{0x63, 0x6f, 0x6e, 0x73}, ColumnCount: 0x1e, ColumnType: []uint8{0x3, 0x3, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xfe, 0x12, 0xf, 0xf, 0xf, 0xf6, 0xf, 0xf, 0xf, 0xf, 0x1, 0x1, 0x1, 0x12, 0xf, 0xf, 0x12, 0xf, 0xf, 0x3, 0xf, 0x1}, ColumnMeta: []uint16{0x0, 0x0, 0x30, 0x180, 0x180, 0x180, 0x30, 0xc0, 0xfe03, 0x0, 0x180, 0x180, 0x180, 0xc02, 0x180, 0x180, 0x180, 0x180, 0x0, 0x0, 0x0, 0x0, 0x180, 0x180, 0x0, 0x180, 0x180, 0x0, 0x2fd, 0x0}, NullBitmap: []uint8{0xfc, 0xff, 0xe3, 0x37}},
- 0x3045f: &TableMapEvent{tableIDSize: 6, TableID: 0x3045f, Flags: 0x1, Schema: []uint8{0x73, 0x65, 0x69, 0x75, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72}, Table: []uint8{0x63, 0x6f, 0x6e, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72}, ColumnCount: 0x19, ColumnType: []uint8{0x3, 0x3, 0x3, 0x1, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xfe, 0x3, 0xc, 0x1, 0xc, 0xf, 0xf, 0xc, 0xf, 0xf, 0x3, 0xf, 0x4, 0x4}, ColumnMeta: []uint16{0x0, 0x0, 0x0, 0x0, 0x2fd, 0x2fd, 0x2fd, 0xc0, 0x12c, 0x30, 0xc, 0xfe06, 0x0, 0x0, 0x0, 0x0, 0x180, 0x180, 0x0, 0x180, 0x180, 0x0, 0x2fd, 0x4, 0x4}, NullBitmap: []uint8{0xf0, 0xef, 0x5f, 0x0}},
- 0x3065f: &TableMapEvent{tableIDSize: 6, TableID: 0x3065f, Flags: 0x1, Schema: []uint8{0x73, 0x65, 0x69, 0x75, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72}, Table: []uint8{0x63, 0x6f, 0x6e, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x61, 0x6b, 0x6f, 0x75, 0x74, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72}, ColumnCount: 0xd, ColumnType: []uint8{0x3, 0x3, 0x3, 0x3, 0x1, 0x12, 0xf, 0xf, 0x12, 0xf, 0xf, 0x3, 0xf}, ColumnMeta: []uint16{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x180, 0x180, 0x0, 0x180, 0x180, 0x0, 0x2fd}, NullBitmap: []uint8{0xe0, 0x17}},
- }
-
- _, err := parser.Parse([]byte{
- /* 0x00, */ 0xc1, 0x86, 0x8e, 0x55, 0x1e, 0xa5, 0x14, 0x80, 0xa, 0x55, 0x0, 0x0, 0x0, 0x7, 0xc,
- 0xbf, 0xe, 0x0, 0x0, 0x5f, 0x6, 0x3, 0x0, 0x0, 0x0, 0x1, 0x0, 0x2, 0x0, 0xd, 0xff,
- 0x0, 0x0, 0x19, 0x63, 0x7, 0x0, 0xca, 0x61, 0x5, 0x0, 0x5e, 0xf7, 0xc, 0x0, 0xf5, 0x7,
- 0x0, 0x0, 0x1, 0x99, 0x96, 0x76, 0x74, 0xdd, 0x10, 0x0, 0x73, 0x69, 0x67, 0x6e, 0x75, 0x70,
- 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6, 0x0, 0x73, 0x79, 0x73, 0x74,
- 0x65, 0x6d, 0xb1, 0x3c, 0x38, 0xcb,
- })
-
- c.Assert(err, IsNil)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/replication/replication_test.go b/vendor/github.com/siddontang/go-mysql/replication/replication_test.go
deleted file mode 100644
index 50fd1ee..0000000
--- a/vendor/github.com/siddontang/go-mysql/replication/replication_test.go
+++ /dev/null
@@ -1,428 +0,0 @@
-package replication
-
-import (
- "context"
- "flag"
- "fmt"
- "os"
- "path"
- "sync"
- "testing"
- "time"
-
- . "github.com/pingcap/check"
- uuid "github.com/satori/go.uuid"
- "github.com/siddontang/go-mysql/client"
- "github.com/siddontang/go-mysql/mysql"
-)
-
-// Use docker mysql to test, mysql is 3306, mariadb is 3316
-var testHost = flag.String("host", "127.0.0.1", "MySQL master host")
-
-var testOutputLogs = flag.Bool("out", false, "output binlog event")
-
-func TestBinLogSyncer(t *testing.T) {
- TestingT(t)
-}
-
-type testSyncerSuite struct {
- b *BinlogSyncer
- c *client.Conn
-
- wg sync.WaitGroup
-
- flavor string
-}
-
-var _ = Suite(&testSyncerSuite{})
-
-func (t *testSyncerSuite) SetUpSuite(c *C) {
-
-}
-
-func (t *testSyncerSuite) TearDownSuite(c *C) {
-}
-
-func (t *testSyncerSuite) SetUpTest(c *C) {
-}
-
-func (t *testSyncerSuite) TearDownTest(c *C) {
- if t.b != nil {
- t.b.Close()
- t.b = nil
- }
-
- if t.c != nil {
- t.c.Close()
- t.c = nil
- }
-}
-
-func (t *testSyncerSuite) testExecute(c *C, query string) {
- _, err := t.c.Execute(query)
- c.Assert(err, IsNil)
-}
-
-func (t *testSyncerSuite) testSync(c *C, s *BinlogStreamer) {
- t.wg.Add(1)
- go func() {
- defer t.wg.Done()
-
- if s == nil {
- return
- }
-
- eventCount := 0
- for {
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
- e, err := s.GetEvent(ctx)
- cancel()
-
- if err == context.DeadlineExceeded {
- eventCount += 1
- return
- }
-
- c.Assert(err, IsNil)
-
- if *testOutputLogs {
- e.Dump(os.Stdout)
- os.Stdout.Sync()
- }
- }
- }()
-
- //use mixed format
- t.testExecute(c, "SET SESSION binlog_format = 'MIXED'")
-
- str := `DROP TABLE IF EXISTS test_replication`
- t.testExecute(c, str)
-
- str = `CREATE TABLE test_replication (
- id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
- str VARCHAR(256),
- f FLOAT,
- d DOUBLE,
- de DECIMAL(10,2),
- i INT,
- bi BIGINT,
- e enum ("e1", "e2"),
- b BIT(8),
- y YEAR,
- da DATE,
- ts TIMESTAMP,
- dt DATETIME,
- tm TIME,
- t TEXT,
- bb BLOB,
- se SET('a', 'b', 'c'),
- PRIMARY KEY (id)
- ) ENGINE=InnoDB DEFAULT CHARSET=utf8`
-
- t.testExecute(c, str)
-
- //use row format
- t.testExecute(c, "SET SESSION binlog_format = 'ROW'")
-
- t.testExecute(c, `INSERT INTO test_replication (str, f, i, e, b, y, da, ts, dt, tm, de, t, bb, se)
- VALUES ("3", -3.14, 10, "e1", 0b0011, 1985,
- "2012-05-07", "2012-05-07 14:01:01", "2012-05-07 14:01:01",
- "14:01:01", -45363.64, "abc", "12345", "a,b")`)
-
- id := 100
-
- if t.flavor == mysql.MySQLFlavor {
- t.testExecute(c, "SET SESSION binlog_row_image = 'MINIMAL'")
-
- t.testExecute(c, fmt.Sprintf(`INSERT INTO test_replication (id, str, f, i, bb, de) VALUES (%d, "4", -3.14, 100, "abc", -45635.64)`, id))
- t.testExecute(c, fmt.Sprintf(`UPDATE test_replication SET f = -12.14, de = 555.34 WHERE id = %d`, id))
- t.testExecute(c, fmt.Sprintf(`DELETE FROM test_replication WHERE id = %d`, id))
- }
-
- // check whether we can create the table including the json field
- str = `DROP TABLE IF EXISTS test_json`
- t.testExecute(c, str)
-
- str = `CREATE TABLE test_json (
- id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
- c1 JSON,
- c2 DECIMAL(10, 0),
- PRIMARY KEY (id)
- ) ENGINE=InnoDB`
-
- if _, err := t.c.Execute(str); err == nil {
- t.testExecute(c, `INSERT INTO test_json (c2) VALUES (1)`)
- t.testExecute(c, `INSERT INTO test_json (c1, c2) VALUES ('{"key1": "value1", "key2": "value2"}', 1)`)
- }
-
- t.testExecute(c, "DROP TABLE IF EXISTS test_json_v2")
-
- str = `CREATE TABLE test_json_v2 (
- id INT,
- c JSON,
- PRIMARY KEY (id)
- ) ENGINE=InnoDB`
-
- if _, err := t.c.Execute(str); err == nil {
- tbls := []string{
- // Refer: https://github.com/shyiko/mysql-binlog-connector-java/blob/c8e81c879710dc19941d952f9031b0a98f8b7c02/src/test/java/com/github/shyiko/mysql/binlog/event/deserialization/json/JsonBinaryValueIntegrationTest.java#L84
- // License: https://github.com/shyiko/mysql-binlog-connector-java#license
- `INSERT INTO test_json_v2 VALUES (0, NULL)`,
- `INSERT INTO test_json_v2 VALUES (1, '{\"a\": 2}')`,
- `INSERT INTO test_json_v2 VALUES (2, '[1,2]')`,
- `INSERT INTO test_json_v2 VALUES (3, '{\"a\":\"b\", \"c\":\"d\",\"ab\":\"abc\", \"bc\": [\"x\", \"y\"]}')`,
- `INSERT INTO test_json_v2 VALUES (4, '[\"here\", [\"I\", \"am\"], \"!!!\"]')`,
- `INSERT INTO test_json_v2 VALUES (5, '\"scalar string\"')`,
- `INSERT INTO test_json_v2 VALUES (6, 'true')`,
- `INSERT INTO test_json_v2 VALUES (7, 'false')`,
- `INSERT INTO test_json_v2 VALUES (8, 'null')`,
- `INSERT INTO test_json_v2 VALUES (9, '-1')`,
- `INSERT INTO test_json_v2 VALUES (10, CAST(CAST(1 AS UNSIGNED) AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (11, '32767')`,
- `INSERT INTO test_json_v2 VALUES (12, '32768')`,
- `INSERT INTO test_json_v2 VALUES (13, '-32768')`,
- `INSERT INTO test_json_v2 VALUES (14, '-32769')`,
- `INSERT INTO test_json_v2 VALUES (15, '2147483647')`,
- `INSERT INTO test_json_v2 VALUES (16, '2147483648')`,
- `INSERT INTO test_json_v2 VALUES (17, '-2147483648')`,
- `INSERT INTO test_json_v2 VALUES (18, '-2147483649')`,
- `INSERT INTO test_json_v2 VALUES (19, '18446744073709551615')`,
- `INSERT INTO test_json_v2 VALUES (20, '18446744073709551616')`,
- `INSERT INTO test_json_v2 VALUES (21, '3.14')`,
- `INSERT INTO test_json_v2 VALUES (22, '{}')`,
- `INSERT INTO test_json_v2 VALUES (23, '[]')`,
- `INSERT INTO test_json_v2 VALUES (24, CAST(CAST('2015-01-15 23:24:25' AS DATETIME) AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (25, CAST(CAST('23:24:25' AS TIME) AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (125, CAST(CAST('23:24:25.12' AS TIME(3)) AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (225, CAST(CAST('23:24:25.0237' AS TIME(3)) AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (26, CAST(CAST('2015-01-15' AS DATE) AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (27, CAST(TIMESTAMP'2015-01-15 23:24:25' AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (127, CAST(TIMESTAMP'2015-01-15 23:24:25.12' AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (227, CAST(TIMESTAMP'2015-01-15 23:24:25.0237' AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (327, CAST(UNIX_TIMESTAMP('2015-01-15 23:24:25') AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (28, CAST(ST_GeomFromText('POINT(1 1)') AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (29, CAST('[]' AS CHAR CHARACTER SET 'ascii'))`,
- // TODO: 30 and 31 are BIT type from JSON_TYPE, may support later.
- `INSERT INTO test_json_v2 VALUES (30, CAST(x'cafe' AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (31, CAST(x'cafebabe' AS JSON))`,
- `INSERT INTO test_json_v2 VALUES (100, CONCAT('{\"', REPEAT('a', 64 * 1024 - 1), '\":123}'))`,
- }
-
- for _, query := range tbls {
- t.testExecute(c, query)
- }
-
- // If MySQL supports JSON, it must supports GEOMETRY.
- t.testExecute(c, "DROP TABLE IF EXISTS test_geo")
-
- str = `CREATE TABLE test_geo (g GEOMETRY)`
- _, err = t.c.Execute(str)
- c.Assert(err, IsNil)
-
- tbls = []string{
- `INSERT INTO test_geo VALUES (POINT(1, 1))`,
- `INSERT INTO test_geo VALUES (LINESTRING(POINT(0,0), POINT(1,1), POINT(2,2)))`,
- // TODO: add more geometry tests
- }
-
- for _, query := range tbls {
- t.testExecute(c, query)
- }
- }
-
- str = `DROP TABLE IF EXISTS test_parse_time`
- t.testExecute(c, str)
-
- // Must allow zero time.
- t.testExecute(c, `SET sql_mode=''`)
- str = `CREATE TABLE test_parse_time (
- a1 DATETIME,
- a2 DATETIME(3),
- a3 DATETIME(6),
- b1 TIMESTAMP,
- b2 TIMESTAMP(3) ,
- b3 TIMESTAMP(6))`
- t.testExecute(c, str)
-
- t.testExecute(c, `INSERT INTO test_parse_time VALUES
- ("2014-09-08 17:51:04.123456", "2014-09-08 17:51:04.123456", "2014-09-08 17:51:04.123456",
- "2014-09-08 17:51:04.123456","2014-09-08 17:51:04.123456","2014-09-08 17:51:04.123456"),
- ("0000-00-00 00:00:00.000000", "0000-00-00 00:00:00.000000", "0000-00-00 00:00:00.000000",
- "0000-00-00 00:00:00.000000", "0000-00-00 00:00:00.000000", "0000-00-00 00:00:00.000000"),
- ("2014-09-08 17:51:04.000456", "2014-09-08 17:51:04.000456", "2014-09-08 17:51:04.000456",
- "2014-09-08 17:51:04.000456","2014-09-08 17:51:04.000456","2014-09-08 17:51:04.000456")`)
-
- t.wg.Wait()
-}
-
-func (t *testSyncerSuite) setupTest(c *C, flavor string) {
- var port uint16 = 3306
- switch flavor {
- case mysql.MariaDBFlavor:
- port = 3316
- }
-
- t.flavor = flavor
-
- var err error
- if t.c != nil {
- t.c.Close()
- }
-
- t.c, err = client.Connect(fmt.Sprintf("%s:%d", *testHost, port), "root", "", "")
- if err != nil {
- c.Skip(err.Error())
- }
-
- // _, err = t.c.Execute("CREATE DATABASE IF NOT EXISTS test")
- // c.Assert(err, IsNil)
-
- _, err = t.c.Execute("USE test")
- c.Assert(err, IsNil)
-
- if t.b != nil {
- t.b.Close()
- }
-
- cfg := BinlogSyncerConfig{
- ServerID: 100,
- Flavor: flavor,
- Host: *testHost,
- Port: port,
- User: "root",
- Password: "",
- UseDecimal: true,
- }
-
- t.b = NewBinlogSyncer(cfg)
-}
-
-func (t *testSyncerSuite) testPositionSync(c *C) {
- //get current master binlog file and position
- r, err := t.c.Execute("SHOW MASTER STATUS")
- c.Assert(err, IsNil)
- binFile, _ := r.GetString(0, 0)
- binPos, _ := r.GetInt(0, 1)
-
- s, err := t.b.StartSync(mysql.Position{Name: binFile, Pos: uint32(binPos)})
- c.Assert(err, IsNil)
-
- // Test re-sync.
- time.Sleep(100 * time.Millisecond)
- t.b.c.SetReadDeadline(time.Now().Add(time.Millisecond))
- time.Sleep(100 * time.Millisecond)
-
- t.testSync(c, s)
-}
-
-func (t *testSyncerSuite) TestMysqlPositionSync(c *C) {
- t.setupTest(c, mysql.MySQLFlavor)
- t.testPositionSync(c)
-}
-
-func (t *testSyncerSuite) TestMysqlGTIDSync(c *C) {
- t.setupTest(c, mysql.MySQLFlavor)
-
- r, err := t.c.Execute("SELECT @@gtid_mode")
- c.Assert(err, IsNil)
- modeOn, _ := r.GetString(0, 0)
- if modeOn != "ON" {
- c.Skip("GTID mode is not ON")
- }
-
- r, err = t.c.Execute("SHOW GLOBAL VARIABLES LIKE 'SERVER_UUID'")
- c.Assert(err, IsNil)
-
- var masterUuid uuid.UUID
- if s, _ := r.GetString(0, 1); len(s) > 0 && s != "NONE" {
- masterUuid, err = uuid.FromString(s)
- c.Assert(err, IsNil)
- }
-
- set, _ := mysql.ParseMysqlGTIDSet(fmt.Sprintf("%s:%d-%d", masterUuid.String(), 1, 2))
-
- s, err := t.b.StartSyncGTID(set)
- c.Assert(err, IsNil)
-
- t.testSync(c, s)
-}
-
-func (t *testSyncerSuite) TestMariadbPositionSync(c *C) {
- t.setupTest(c, mysql.MariaDBFlavor)
-
- t.testPositionSync(c)
-}
-
-func (t *testSyncerSuite) TestMariadbGTIDSync(c *C) {
- t.setupTest(c, mysql.MariaDBFlavor)
-
- // get current master gtid binlog pos
- r, err := t.c.Execute("SELECT @@gtid_binlog_pos")
- c.Assert(err, IsNil)
-
- str, _ := r.GetString(0, 0)
- set, _ := mysql.ParseMariadbGTIDSet(str)
-
- s, err := t.b.StartSyncGTID(set)
- c.Assert(err, IsNil)
-
- t.testSync(c, s)
-}
-
-func (t *testSyncerSuite) TestMysqlSemiPositionSync(c *C) {
- t.setupTest(c, mysql.MySQLFlavor)
-
- t.b.cfg.SemiSyncEnabled = true
-
- t.testPositionSync(c)
-}
-
-func (t *testSyncerSuite) TestMysqlBinlogCodec(c *C) {
- t.setupTest(c, mysql.MySQLFlavor)
-
- t.testExecute(c, "RESET MASTER")
-
- var wg sync.WaitGroup
- wg.Add(1)
- defer wg.Wait()
-
- go func() {
- defer wg.Done()
-
- t.testSync(c, nil)
-
- t.testExecute(c, "FLUSH LOGS")
-
- t.testSync(c, nil)
- }()
-
- binlogDir := "./var"
-
- os.RemoveAll(binlogDir)
-
- err := t.b.StartBackup(binlogDir, mysql.Position{Name: "", Pos: uint32(0)}, 2*time.Second)
- c.Assert(err, IsNil)
-
- p := NewBinlogParser()
- p.SetVerifyChecksum(true)
-
- f := func(e *BinlogEvent) error {
- if *testOutputLogs {
- e.Dump(os.Stdout)
- os.Stdout.Sync()
- }
- return nil
- }
-
- dir, err := os.Open(binlogDir)
- c.Assert(err, IsNil)
- defer dir.Close()
-
- files, err := dir.Readdirnames(-1)
- c.Assert(err, IsNil)
-
- for _, file := range files {
- err = p.ParseFile(path.Join(binlogDir, file), 0, f)
- c.Assert(err, IsNil)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/replication/row_event_test.go b/vendor/github.com/siddontang/go-mysql/replication/row_event_test.go
deleted file mode 100644
index 533a876..0000000
--- a/vendor/github.com/siddontang/go-mysql/replication/row_event_test.go
+++ /dev/null
@@ -1,662 +0,0 @@
-package replication
-
-import (
- "fmt"
- "strconv"
-
- . "github.com/pingcap/check"
- "github.com/shopspring/decimal"
-)
-
-type testDecodeSuite struct{}
-
-var _ = Suite(&testDecodeSuite{})
-
-type decodeDecimalChecker struct {
- *CheckerInfo
-}
-
-func (_ *decodeDecimalChecker) Check(params []interface{}, names []string) (bool, string) {
- var test int
- val := struct {
- Value decimal.Decimal
- Pos int
- Err error
- EValue decimal.Decimal
- EPos int
- EErr error
- }{}
-
- for i, name := range names {
- switch name {
- case "obtainedValue":
- val.Value, _ = params[i].(decimal.Decimal)
- case "obtainedPos":
- val.Pos, _ = params[i].(int)
- case "obtainedErr":
- val.Err, _ = params[i].(error)
- case "expectedValue":
- val.EValue, _ = params[i].(decimal.Decimal)
- case "expectedPos":
- val.EPos, _ = params[i].(int)
- case "expectedErr":
- val.EErr, _ = params[i].(error)
- case "caseNumber":
- test = params[i].(int)
- }
- }
- errorMsgFmt := fmt.Sprintf("For Test %v: ", test) + "Did not get expected %v(%v), got %v instead."
- if val.Err != val.EErr {
- return false, fmt.Sprintf(errorMsgFmt, "error", val.EErr, val.Err)
- }
- if val.Pos != val.EPos {
- return false, fmt.Sprintf(errorMsgFmt, "position", val.EPos, val.Pos)
- }
- if !val.Value.Equal(val.EValue) {
- return false, fmt.Sprintf(errorMsgFmt, "value", val.EValue, val.Value)
- }
- return true, ""
-}
-
-var DecodeDecimalsEquals = &decodeDecimalChecker{
- &CheckerInfo{Name: "Equals", Params: []string{"obtainedValue", "obtainedPos", "obtainedErr", "expectedValue", "expectedPos", "expectedErr", "caseNumber"}},
-}
-
-func (_ *testDecodeSuite) TestDecodeDecimal(c *C) {
- // _PLACEHOLDER_ := 0
- testcases := []struct {
- Data []byte
- Precision int
- Decimals int
- Expected string
- ExpectedPos int
- ExpectedErr error
- }{
- // These are cases from the mysql test cases
- /*
- -- Generated with gentestsql.go --
- DROP TABLE IF EXISTS decodedecimal;
- CREATE TABLE decodedecimal (
- id int(11) not null auto_increment,
- v4_2 decimal(4,2),
- v5_0 decimal(5,0),
- v7_3 decimal(7,3),
- v10_2 decimal(10,2),
- v10_3 decimal(10,3),
- v13_2 decimal(13,2),
- v15_14 decimal(15,14),
- v20_10 decimal(20,10),
- v30_5 decimal(30,5),
- v30_20 decimal(30,20),
- v30_25 decimal(30,25),
- prec int(11),
- scale int(11),
- PRIMARY KEY(id)
- ) engine=InnoDB;
- INSERT INTO decodedecimal (v4_2,v5_0,v7_3,v10_2,v10_3,v13_2,v15_14,v20_10,v30_5,v30_20,v30_25,prec,scale) VALUES
- ("-10.55","-10.55","-10.55","-10.55","-10.55","-10.55","-10.55","-10.55","-10.55","-10.55","-10.55",4,2),
- ("0.0123456789012345678912345","0.0123456789012345678912345","0.0123456789012345678912345","0.0123456789012345678912345","0.0123456789012345678912345","0.0123456789012345678912345","0.0123456789012345678912345","0.0123456789012345678912345","0.0123456789012345678912345","0.0123456789012345678912345","0.0123456789012345678912345",30,25),
- ("12345","12345","12345","12345","12345","12345","12345","12345","12345","12345","12345",5,0),
- ("12345","12345","12345","12345","12345","12345","12345","12345","12345","12345","12345",10,3),
- ("123.45","123.45","123.45","123.45","123.45","123.45","123.45","123.45","123.45","123.45","123.45",10,3),
- ("-123.45","-123.45","-123.45","-123.45","-123.45","-123.45","-123.45","-123.45","-123.45","-123.45","-123.45",20,10),
- (".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",15,14),
- (".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",".00012345000098765",22,20),
- (".12345000098765",".12345000098765",".12345000098765",".12345000098765",".12345000098765",".12345000098765",".12345000098765",".12345000098765",".12345000098765",".12345000098765",".12345000098765",30,20),
- ("-.000000012345000098765","-.000000012345000098765","-.000000012345000098765","-.000000012345000098765","-.000000012345000098765","-.000000012345000098765","-.000000012345000098765","-.000000012345000098765","-.000000012345000098765","-.000000012345000098765","-.000000012345000098765",30,20),
- ("1234500009876.5","1234500009876.5","1234500009876.5","1234500009876.5","1234500009876.5","1234500009876.5","1234500009876.5","1234500009876.5","1234500009876.5","1234500009876.5","1234500009876.5",30,5),
- ("111111111.11","111111111.11","111111111.11","111111111.11","111111111.11","111111111.11","111111111.11","111111111.11","111111111.11","111111111.11","111111111.11",10,2),
- ("000000000.01","000000000.01","000000000.01","000000000.01","000000000.01","000000000.01","000000000.01","000000000.01","000000000.01","000000000.01","000000000.01",7,3),
- ("123.4","123.4","123.4","123.4","123.4","123.4","123.4","123.4","123.4","123.4","123.4",10,2),
- ("-562.58","-562.58","-562.58","-562.58","-562.58","-562.58","-562.58","-562.58","-562.58","-562.58","-562.58",13,2),
- ("-3699.01","-3699.01","-3699.01","-3699.01","-3699.01","-3699.01","-3699.01","-3699.01","-3699.01","-3699.01","-3699.01",13,2),
- ("-1948.14","-1948.14","-1948.14","-1948.14","-1948.14","-1948.14","-1948.14","-1948.14","-1948.14","-1948.14","-1948.14",13,2)
- ;
- select * from decodedecimal;
- +----+--------+-------+-----------+-------------+-------------+----------------+-------------------+-----------------------+---------------------+---------------------------------+---------------------------------+------+-------+
- | id | v4_2 | v5_0 | v7_3 | v10_2 | v10_3 | v13_2 | v15_14 | v20_10 | v30_5 | v30_20 | v30_25 | prec | scale |
- +----+--------+-------+-----------+-------------+-------------+----------------+-------------------+-----------------------+---------------------+---------------------------------+---------------------------------+------+-------+
- | 1 | -10.55 | -11 | -10.550 | -10.55 | -10.550 | -10.55 | -9.99999999999999 | -10.5500000000 | -10.55000 | -10.55000000000000000000 | -10.5500000000000000000000000 | 4 | 2 |
- | 2 | 0.01 | 0 | 0.012 | 0.01 | 0.012 | 0.01 | 0.01234567890123 | 0.0123456789 | 0.01235 | 0.01234567890123456789 | 0.0123456789012345678912345 | 30 | 25 |
- | 3 | 99.99 | 12345 | 9999.999 | 12345.00 | 12345.000 | 12345.00 | 9.99999999999999 | 12345.0000000000 | 12345.00000 | 12345.00000000000000000000 | 12345.0000000000000000000000000 | 5 | 0 |
- | 4 | 99.99 | 12345 | 9999.999 | 12345.00 | 12345.000 | 12345.00 | 9.99999999999999 | 12345.0000000000 | 12345.00000 | 12345.00000000000000000000 | 12345.0000000000000000000000000 | 10 | 3 |
- | 5 | 99.99 | 123 | 123.450 | 123.45 | 123.450 | 123.45 | 9.99999999999999 | 123.4500000000 | 123.45000 | 123.45000000000000000000 | 123.4500000000000000000000000 | 10 | 3 |
- | 6 | -99.99 | -123 | -123.450 | -123.45 | -123.450 | -123.45 | -9.99999999999999 | -123.4500000000 | -123.45000 | -123.45000000000000000000 | -123.4500000000000000000000000 | 20 | 10 |
- | 7 | 0.00 | 0 | 0.000 | 0.00 | 0.000 | 0.00 | 0.00012345000099 | 0.0001234500 | 0.00012 | 0.00012345000098765000 | 0.0001234500009876500000000 | 15 | 14 |
- | 8 | 0.00 | 0 | 0.000 | 0.00 | 0.000 | 0.00 | 0.00012345000099 | 0.0001234500 | 0.00012 | 0.00012345000098765000 | 0.0001234500009876500000000 | 22 | 20 |
- | 9 | 0.12 | 0 | 0.123 | 0.12 | 0.123 | 0.12 | 0.12345000098765 | 0.1234500010 | 0.12345 | 0.12345000098765000000 | 0.1234500009876500000000000 | 30 | 20 |
- | 10 | 0.00 | 0 | 0.000 | 0.00 | 0.000 | 0.00 | -0.00000001234500 | -0.0000000123 | 0.00000 | -0.00000001234500009877 | -0.0000000123450000987650000 | 30 | 20 |
- | 11 | 99.99 | 99999 | 9999.999 | 99999999.99 | 9999999.999 | 99999999999.99 | 9.99999999999999 | 9999999999.9999999999 | 1234500009876.50000 | 9999999999.99999999999999999999 | 99999.9999999999999999999999999 | 30 | 5 |
- | 12 | 99.99 | 99999 | 9999.999 | 99999999.99 | 9999999.999 | 111111111.11 | 9.99999999999999 | 111111111.1100000000 | 111111111.11000 | 111111111.11000000000000000000 | 99999.9999999999999999999999999 | 10 | 2 |
- | 13 | 0.01 | 0 | 0.010 | 0.01 | 0.010 | 0.01 | 0.01000000000000 | 0.0100000000 | 0.01000 | 0.01000000000000000000 | 0.0100000000000000000000000 | 7 | 3 |
- | 14 | 99.99 | 123 | 123.400 | 123.40 | 123.400 | 123.40 | 9.99999999999999 | 123.4000000000 | 123.40000 | 123.40000000000000000000 | 123.4000000000000000000000000 | 10 | 2 |
- | 15 | -99.99 | -563 | -562.580 | -562.58 | -562.580 | -562.58 | -9.99999999999999 | -562.5800000000 | -562.58000 | -562.58000000000000000000 | -562.5800000000000000000000000 | 13 | 2 |
- | 16 | -99.99 | -3699 | -3699.010 | -3699.01 | -3699.010 | -3699.01 | -9.99999999999999 | -3699.0100000000 | -3699.01000 | -3699.01000000000000000000 | -3699.0100000000000000000000000 | 13 | 2 |
- | 17 | -99.99 | -1948 | -1948.140 | -1948.14 | -1948.140 | -1948.14 | -9.99999999999999 | -1948.1400000000 | -1948.14000 | -1948.14000000000000000000 | -1948.1400000000000000000000000 | 13 | 2 |
- +----+--------+-------+-----------+-------------+-------------+----------------+-------------------+-----------------------+---------------------+---------------------------------+---------------------------------+------+-------+
- */
- {[]byte{117, 200, 127, 255}, 4, 2, "-10.55", 2, nil},
- {[]byte{127, 255, 244, 127, 245}, 5, 0, "-11", 3, nil},
- {[]byte{127, 245, 253, 217, 127, 255}, 7, 3, "-10.550", 4, nil},
- {[]byte{127, 255, 255, 245, 200, 127, 255}, 10, 2, "-10.55", 5, nil},
- {[]byte{127, 255, 255, 245, 253, 217, 127, 255}, 10, 3, "-10.550", 6, nil},
- {[]byte{127, 255, 255, 255, 245, 200, 118, 196}, 13, 2, "-10.55", 6, nil},
- {[]byte{118, 196, 101, 54, 0, 254, 121, 96, 127, 255}, 15, 14, "-9.99999999999999", 8, nil},
- {[]byte{127, 255, 255, 255, 245, 223, 55, 170, 127, 255, 127, 255}, 20, 10, "-10.5500000000", 10, nil},
- {[]byte{127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 245, 255, 41, 39, 127, 255}, 30, 5, "-10.55000", 15, nil},
- {[]byte{127, 255, 255, 255, 245, 223, 55, 170, 127, 255, 255, 255, 255, 255, 127, 255}, 30, 20, "-10.55000000000000000000", 14, nil},
- {[]byte{127, 255, 245, 223, 55, 170, 127, 255, 255, 255, 255, 255, 255, 255, 255, 4, 0}, 30, 25, "-10.5500000000000000000000000", 15, nil},
- {[]byte{128, 1, 128, 0}, 4, 2, "0.01", 2, nil},
- {[]byte{128, 0, 0, 128, 0}, 5, 0, "0", 3, nil},
- {[]byte{128, 0, 0, 12, 128, 0}, 7, 3, "0.012", 4, nil},
- {[]byte{128, 0, 0, 0, 1, 128, 0}, 10, 2, "0.01", 5, nil},
- {[]byte{128, 0, 0, 0, 0, 12, 128, 0}, 10, 3, "0.012", 6, nil},
- {[]byte{128, 0, 0, 0, 0, 1, 128, 0}, 13, 2, "0.01", 6, nil},
- {[]byte{128, 0, 188, 97, 78, 1, 96, 11, 128, 0}, 15, 14, "0.01234567890123", 8, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 188, 97, 78, 9, 128, 0}, 20, 10, "0.0123456789", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 211, 128, 0}, 30, 5, "0.01235", 15, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 188, 97, 78, 53, 183, 191, 135, 89, 128, 0}, 30, 20, "0.01234567890123456789", 14, nil},
- {[]byte{128, 0, 0, 0, 188, 97, 78, 53, 183, 191, 135, 0, 135, 253, 217, 30, 0}, 30, 25, "0.0123456789012345678912345", 15, nil},
- {[]byte{227, 99, 128, 48}, 4, 2, "99.99", 2, nil},
- {[]byte{128, 48, 57, 167, 15}, 5, 0, "12345", 3, nil},
- {[]byte{167, 15, 3, 231, 128, 0}, 7, 3, "9999.999", 4, nil},
- {[]byte{128, 0, 48, 57, 0, 128, 0}, 10, 2, "12345.00", 5, nil},
- {[]byte{128, 0, 48, 57, 0, 0, 128, 0}, 10, 3, "12345.000", 6, nil},
- {[]byte{128, 0, 0, 48, 57, 0, 137, 59}, 13, 2, "12345.00", 6, nil},
- {[]byte{137, 59, 154, 201, 255, 1, 134, 159, 128, 0}, 15, 14, "9.99999999999999", 8, nil},
- {[]byte{128, 0, 0, 48, 57, 0, 0, 0, 0, 0, 128, 0}, 20, 10, "12345.0000000000", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 57, 0, 0, 0, 128, 0}, 30, 5, "12345.00000", 15, nil},
- {[]byte{128, 0, 0, 48, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 48}, 30, 20, "12345.00000000000000000000", 14, nil},
- {[]byte{128, 48, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0}, 30, 25, "12345.0000000000000000000000000", 15, nil},
- {[]byte{227, 99, 128, 48}, 4, 2, "99.99", 2, nil},
- {[]byte{128, 48, 57, 167, 15}, 5, 0, "12345", 3, nil},
- {[]byte{167, 15, 3, 231, 128, 0}, 7, 3, "9999.999", 4, nil},
- {[]byte{128, 0, 48, 57, 0, 128, 0}, 10, 2, "12345.00", 5, nil},
- {[]byte{128, 0, 48, 57, 0, 0, 128, 0}, 10, 3, "12345.000", 6, nil},
- {[]byte{128, 0, 0, 48, 57, 0, 137, 59}, 13, 2, "12345.00", 6, nil},
- {[]byte{137, 59, 154, 201, 255, 1, 134, 159, 128, 0}, 15, 14, "9.99999999999999", 8, nil},
- {[]byte{128, 0, 0, 48, 57, 0, 0, 0, 0, 0, 128, 0}, 20, 10, "12345.0000000000", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 57, 0, 0, 0, 128, 0}, 30, 5, "12345.00000", 15, nil},
- {[]byte{128, 0, 0, 48, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 48}, 30, 20, "12345.00000000000000000000", 14, nil},
- {[]byte{128, 48, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0}, 30, 25, "12345.0000000000000000000000000", 15, nil},
- {[]byte{227, 99, 128, 0}, 4, 2, "99.99", 2, nil},
- {[]byte{128, 0, 123, 128, 123}, 5, 0, "123", 3, nil},
- {[]byte{128, 123, 1, 194, 128, 0}, 7, 3, "123.450", 4, nil},
- {[]byte{128, 0, 0, 123, 45, 128, 0}, 10, 2, "123.45", 5, nil},
- {[]byte{128, 0, 0, 123, 1, 194, 128, 0}, 10, 3, "123.450", 6, nil},
- {[]byte{128, 0, 0, 0, 123, 45, 137, 59}, 13, 2, "123.45", 6, nil},
- {[]byte{137, 59, 154, 201, 255, 1, 134, 159, 128, 0}, 15, 14, "9.99999999999999", 8, nil},
- {[]byte{128, 0, 0, 0, 123, 26, 210, 116, 128, 0, 128, 0}, 20, 10, "123.4500000000", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 123, 0, 175, 200, 128, 0}, 30, 5, "123.45000", 15, nil},
- {[]byte{128, 0, 0, 0, 123, 26, 210, 116, 128, 0, 0, 0, 0, 0, 128, 0}, 30, 20, "123.45000000000000000000", 14, nil},
- {[]byte{128, 0, 123, 26, 210, 116, 128, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0}, 30, 25, "123.4500000000000000000000000", 15, nil},
- {[]byte{28, 156, 127, 255}, 4, 2, "-99.99", 2, nil},
- {[]byte{127, 255, 132, 127, 132}, 5, 0, "-123", 3, nil},
- {[]byte{127, 132, 254, 61, 127, 255}, 7, 3, "-123.450", 4, nil},
- {[]byte{127, 255, 255, 132, 210, 127, 255}, 10, 2, "-123.45", 5, nil},
- {[]byte{127, 255, 255, 132, 254, 61, 127, 255}, 10, 3, "-123.450", 6, nil},
- {[]byte{127, 255, 255, 255, 132, 210, 118, 196}, 13, 2, "-123.45", 6, nil},
- {[]byte{118, 196, 101, 54, 0, 254, 121, 96, 127, 255}, 15, 14, "-9.99999999999999", 8, nil},
- {[]byte{127, 255, 255, 255, 132, 229, 45, 139, 127, 255, 127, 255}, 20, 10, "-123.4500000000", 10, nil},
- {[]byte{127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 132, 255, 80, 55, 127, 255}, 30, 5, "-123.45000", 15, nil},
- {[]byte{127, 255, 255, 255, 132, 229, 45, 139, 127, 255, 255, 255, 255, 255, 127, 255}, 30, 20, "-123.45000000000000000000", 14, nil},
- {[]byte{127, 255, 132, 229, 45, 139, 127, 255, 255, 255, 255, 255, 255, 255, 255, 20, 0}, 30, 25, "-123.4500000000000000000000000", 15, nil},
- {[]byte{128, 0, 128, 0}, 4, 2, "0.00", 2, nil},
- {[]byte{128, 0, 0, 128, 0}, 5, 0, "0", 3, nil},
- {[]byte{128, 0, 0, 0, 128, 0}, 7, 3, "0.000", 4, nil},
- {[]byte{128, 0, 0, 0, 0, 128, 0}, 10, 2, "0.00", 5, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 128, 0}, 10, 3, "0.000", 6, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 128, 0}, 13, 2, "0.00", 6, nil},
- {[]byte{128, 0, 1, 226, 58, 0, 0, 99, 128, 0}, 15, 14, "0.00012345000099", 8, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 1, 226, 58, 0, 128, 0}, 20, 10, "0.0001234500", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 128, 0}, 30, 5, "0.00012", 15, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 1, 226, 58, 0, 15, 18, 2, 0, 128, 0}, 30, 20, "0.00012345000098765000", 14, nil},
- {[]byte{128, 0, 0, 0, 1, 226, 58, 0, 15, 18, 2, 0, 0, 0, 0, 15, 0}, 30, 25, "0.0001234500009876500000000", 15, nil},
- {[]byte{128, 0, 128, 0}, 4, 2, "0.00", 2, nil},
- {[]byte{128, 0, 0, 128, 0}, 5, 0, "0", 3, nil},
- {[]byte{128, 0, 0, 0, 128, 0}, 7, 3, "0.000", 4, nil},
- {[]byte{128, 0, 0, 0, 0, 128, 0}, 10, 2, "0.00", 5, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 128, 0}, 10, 3, "0.000", 6, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 128, 0}, 13, 2, "0.00", 6, nil},
- {[]byte{128, 0, 1, 226, 58, 0, 0, 99, 128, 0}, 15, 14, "0.00012345000099", 8, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 1, 226, 58, 0, 128, 0}, 20, 10, "0.0001234500", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 128, 0}, 30, 5, "0.00012", 15, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 1, 226, 58, 0, 15, 18, 2, 0, 128, 0}, 30, 20, "0.00012345000098765000", 14, nil},
- {[]byte{128, 0, 0, 0, 1, 226, 58, 0, 15, 18, 2, 0, 0, 0, 0, 22, 0}, 30, 25, "0.0001234500009876500000000", 15, nil},
- {[]byte{128, 12, 128, 0}, 4, 2, "0.12", 2, nil},
- {[]byte{128, 0, 0, 128, 0}, 5, 0, "0", 3, nil},
- {[]byte{128, 0, 0, 123, 128, 0}, 7, 3, "0.123", 4, nil},
- {[]byte{128, 0, 0, 0, 12, 128, 0}, 10, 2, "0.12", 5, nil},
- {[]byte{128, 0, 0, 0, 0, 123, 128, 0}, 10, 3, "0.123", 6, nil},
- {[]byte{128, 0, 0, 0, 0, 12, 128, 7}, 13, 2, "0.12", 6, nil},
- {[]byte{128, 7, 91, 178, 144, 1, 129, 205, 128, 0}, 15, 14, "0.12345000098765", 8, nil},
- {[]byte{128, 0, 0, 0, 0, 7, 91, 178, 145, 0, 128, 0}, 20, 10, "0.1234500010", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 57, 128, 0}, 30, 5, "0.12345", 15, nil},
- {[]byte{128, 0, 0, 0, 0, 7, 91, 178, 144, 58, 222, 87, 208, 0, 128, 0}, 30, 20, "0.12345000098765000000", 14, nil},
- {[]byte{128, 0, 0, 7, 91, 178, 144, 58, 222, 87, 208, 0, 0, 0, 0, 30, 0}, 30, 25, "0.1234500009876500000000000", 15, nil},
- {[]byte{128, 0, 128, 0}, 4, 2, "0.00", 2, nil},
- {[]byte{128, 0, 0, 128, 0}, 5, 0, "0", 3, nil},
- {[]byte{128, 0, 0, 0, 128, 0}, 7, 3, "0.000", 4, nil},
- {[]byte{128, 0, 0, 0, 0, 128, 0}, 10, 2, "0.00", 5, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 128, 0}, 10, 3, "0.000", 6, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 127, 255}, 13, 2, "0.00", 6, nil},
- {[]byte{127, 255, 255, 255, 243, 255, 121, 59, 127, 255}, 15, 14, "-0.00000001234500", 8, nil},
- {[]byte{127, 255, 255, 255, 255, 255, 255, 255, 243, 252, 128, 0}, 20, 10, "-0.0000000123", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255}, 30, 5, "0.00000", 15, nil},
- {[]byte{127, 255, 255, 255, 255, 255, 255, 255, 243, 235, 111, 183, 93, 178, 127, 255}, 30, 20, "-0.00000001234500009877", 14, nil},
- {[]byte{127, 255, 255, 255, 255, 255, 243, 235, 111, 183, 93, 255, 139, 69, 47, 30, 0}, 30, 25, "-0.0000000123450000987650000", 15, nil},
- {[]byte{227, 99, 129, 134}, 4, 2, "99.99", 2, nil},
- {[]byte{129, 134, 159, 167, 15}, 5, 0, "99999", 3, nil},
- {[]byte{167, 15, 3, 231, 133, 245}, 7, 3, "9999.999", 4, nil},
- {[]byte{133, 245, 224, 255, 99, 128, 152}, 10, 2, "99999999.99", 5, nil},
- {[]byte{128, 152, 150, 127, 3, 231, 227, 59}, 10, 3, "9999999.999", 6, nil},
- {[]byte{227, 59, 154, 201, 255, 99, 137, 59}, 13, 2, "99999999999.99", 6, nil},
- {[]byte{137, 59, 154, 201, 255, 1, 134, 159, 137, 59}, 15, 14, "9.99999999999999", 8, nil},
- {[]byte{137, 59, 154, 201, 255, 59, 154, 201, 255, 9, 128, 0}, 20, 10, "9999999999.9999999999", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 4, 210, 29, 205, 139, 148, 0, 195, 80, 137, 59}, 30, 5, "1234500009876.50000", 15, nil},
- {[]byte{137, 59, 154, 201, 255, 59, 154, 201, 255, 59, 154, 201, 255, 99, 129, 134}, 30, 20, "9999999999.99999999999999999999", 14, nil},
- {[]byte{129, 134, 159, 59, 154, 201, 255, 59, 154, 201, 255, 0, 152, 150, 127, 30, 0}, 30, 25, "99999.9999999999999999999999999", 15, nil},
- {[]byte{227, 99, 129, 134}, 4, 2, "99.99", 2, nil},
- {[]byte{129, 134, 159, 167, 15}, 5, 0, "99999", 3, nil},
- {[]byte{167, 15, 3, 231, 133, 245}, 7, 3, "9999.999", 4, nil},
- {[]byte{133, 245, 224, 255, 99, 128, 152}, 10, 2, "99999999.99", 5, nil},
- {[]byte{128, 152, 150, 127, 3, 231, 128, 6}, 10, 3, "9999999.999", 6, nil},
- {[]byte{128, 6, 159, 107, 199, 11, 137, 59}, 13, 2, "111111111.11", 6, nil},
- {[]byte{137, 59, 154, 201, 255, 1, 134, 159, 128, 6}, 15, 14, "9.99999999999999", 8, nil},
- {[]byte{128, 6, 159, 107, 199, 6, 142, 119, 128, 0, 128, 0}, 20, 10, "111111111.1100000000", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 6, 159, 107, 199, 0, 42, 248, 128, 6}, 30, 5, "111111111.11000", 15, nil},
- {[]byte{128, 6, 159, 107, 199, 6, 142, 119, 128, 0, 0, 0, 0, 0, 129, 134}, 30, 20, "111111111.11000000000000000000", 14, nil},
- {[]byte{129, 134, 159, 59, 154, 201, 255, 59, 154, 201, 255, 0, 152, 150, 127, 10, 0}, 30, 25, "99999.9999999999999999999999999", 15, nil},
- {[]byte{128, 1, 128, 0}, 4, 2, "0.01", 2, nil},
- {[]byte{128, 0, 0, 128, 0}, 5, 0, "0", 3, nil},
- {[]byte{128, 0, 0, 10, 128, 0}, 7, 3, "0.010", 4, nil},
- {[]byte{128, 0, 0, 0, 1, 128, 0}, 10, 2, "0.01", 5, nil},
- {[]byte{128, 0, 0, 0, 0, 10, 128, 0}, 10, 3, "0.010", 6, nil},
- {[]byte{128, 0, 0, 0, 0, 1, 128, 0}, 13, 2, "0.01", 6, nil},
- {[]byte{128, 0, 152, 150, 128, 0, 0, 0, 128, 0}, 15, 14, "0.01000000000000", 8, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 152, 150, 128, 0, 128, 0}, 20, 10, "0.0100000000", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 232, 128, 0}, 30, 5, "0.01000", 15, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 152, 150, 128, 0, 0, 0, 0, 0, 128, 0}, 30, 20, "0.01000000000000000000", 14, nil},
- {[]byte{128, 0, 0, 0, 152, 150, 128, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0}, 30, 25, "0.0100000000000000000000000", 15, nil},
- {[]byte{227, 99, 128, 0}, 4, 2, "99.99", 2, nil},
- {[]byte{128, 0, 123, 128, 123}, 5, 0, "123", 3, nil},
- {[]byte{128, 123, 1, 144, 128, 0}, 7, 3, "123.400", 4, nil},
- {[]byte{128, 0, 0, 123, 40, 128, 0}, 10, 2, "123.40", 5, nil},
- {[]byte{128, 0, 0, 123, 1, 144, 128, 0}, 10, 3, "123.400", 6, nil},
- {[]byte{128, 0, 0, 0, 123, 40, 137, 59}, 13, 2, "123.40", 6, nil},
- {[]byte{137, 59, 154, 201, 255, 1, 134, 159, 128, 0}, 15, 14, "9.99999999999999", 8, nil},
- {[]byte{128, 0, 0, 0, 123, 23, 215, 132, 0, 0, 128, 0}, 20, 10, "123.4000000000", 10, nil},
- {[]byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 123, 0, 156, 64, 128, 0}, 30, 5, "123.40000", 15, nil},
- {[]byte{128, 0, 0, 0, 123, 23, 215, 132, 0, 0, 0, 0, 0, 0, 128, 0}, 30, 20, "123.40000000000000000000", 14, nil},
- {[]byte{128, 0, 123, 23, 215, 132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0}, 30, 25, "123.4000000000000000000000000", 15, nil},
- {[]byte{28, 156, 127, 253}, 4, 2, "-99.99", 2, nil},
- {[]byte{127, 253, 204, 125, 205}, 5, 0, "-563", 3, nil},
- {[]byte{125, 205, 253, 187, 127, 255}, 7, 3, "-562.580", 4, nil},
- {[]byte{127, 255, 253, 205, 197, 127, 255}, 10, 2, "-562.58", 5, nil},
- {[]byte{127, 255, 253, 205, 253, 187, 127, 255}, 10, 3, "-562.580", 6, nil},
- {[]byte{127, 255, 255, 253, 205, 197, 118, 196}, 13, 2, "-562.58", 6, nil},
- {[]byte{118, 196, 101, 54, 0, 254, 121, 96, 127, 255}, 15, 14, "-9.99999999999999", 8, nil},
- {[]byte{127, 255, 255, 253, 205, 221, 109, 230, 255, 255, 127, 255}, 20, 10, "-562.5800000000", 10, nil},
- {[]byte{127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 253, 205, 255, 29, 111, 127, 255}, 30, 5, "-562.58000", 15, nil},
- {[]byte{127, 255, 255, 253, 205, 221, 109, 230, 255, 255, 255, 255, 255, 255, 127, 253}, 30, 20, "-562.58000000000000000000", 14, nil},
- {[]byte{127, 253, 205, 221, 109, 230, 255, 255, 255, 255, 255, 255, 255, 255, 255, 13, 0}, 30, 25, "-562.5800000000000000000000000", 15, nil},
- {[]byte{28, 156, 127, 241}, 4, 2, "-99.99", 2, nil},
- {[]byte{127, 241, 140, 113, 140}, 5, 0, "-3699", 3, nil},
- {[]byte{113, 140, 255, 245, 127, 255}, 7, 3, "-3699.010", 4, nil},
- {[]byte{127, 255, 241, 140, 254, 127, 255}, 10, 2, "-3699.01", 5, nil},
- {[]byte{127, 255, 241, 140, 255, 245, 127, 255}, 10, 3, "-3699.010", 6, nil},
- {[]byte{127, 255, 255, 241, 140, 254, 118, 196}, 13, 2, "-3699.01", 6, nil},
- {[]byte{118, 196, 101, 54, 0, 254, 121, 96, 127, 255}, 15, 14, "-9.99999999999999", 8, nil},
- {[]byte{127, 255, 255, 241, 140, 255, 103, 105, 127, 255, 127, 255}, 20, 10, "-3699.0100000000", 10, nil},
- {[]byte{127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 241, 140, 255, 252, 23, 127, 255}, 30, 5, "-3699.01000", 15, nil},
- {[]byte{127, 255, 255, 241, 140, 255, 103, 105, 127, 255, 255, 255, 255, 255, 127, 241}, 30, 20, "-3699.01000000000000000000", 14, nil},
- {[]byte{127, 241, 140, 255, 103, 105, 127, 255, 255, 255, 255, 255, 255, 255, 255, 13, 0}, 30, 25, "-3699.0100000000000000000000000", 15, nil},
- {[]byte{28, 156, 127, 248}, 4, 2, "-99.99", 2, nil},
- {[]byte{127, 248, 99, 120, 99}, 5, 0, "-1948", 3, nil},
- {[]byte{120, 99, 255, 115, 127, 255}, 7, 3, "-1948.140", 4, nil},
- {[]byte{127, 255, 248, 99, 241, 127, 255}, 10, 2, "-1948.14", 5, nil},
- {[]byte{127, 255, 248, 99, 255, 115, 127, 255}, 10, 3, "-1948.140", 6, nil},
- {[]byte{127, 255, 255, 248, 99, 241, 118, 196}, 13, 2, "-1948.14", 6, nil},
- {[]byte{118, 196, 101, 54, 0, 254, 121, 96, 127, 255}, 15, 14, "-9.99999999999999", 8, nil},
- {[]byte{127, 255, 255, 248, 99, 247, 167, 196, 255, 255, 127, 255}, 20, 10, "-1948.1400000000", 10, nil},
- {[]byte{127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 248, 99, 255, 201, 79, 127, 255}, 30, 5, "-1948.14000", 15, nil},
- {[]byte{127, 255, 255, 248, 99, 247, 167, 196, 255, 255, 255, 255, 255, 255, 127, 248}, 30, 20, "-1948.14000000000000000000", 14, nil},
- {[]byte{127, 248, 99, 247, 167, 196, 255, 255, 255, 255, 255, 255, 255, 255, 255, 13, 0}, 30, 25, "-1948.1400000000000000000000000", 15, nil},
- }
- for i, tc := range testcases {
- value, pos, err := decodeDecimal(tc.Data, tc.Precision, tc.Decimals, false)
- expectedFloat, _ := strconv.ParseFloat(tc.Expected, 64)
- c.Assert(value.(float64), DecodeDecimalsEquals, pos, err, expectedFloat, tc.ExpectedPos, tc.ExpectedErr, i)
-
- value, pos, err = decodeDecimal(tc.Data, tc.Precision, tc.Decimals, true)
- expectedDecimal, _ := decimal.NewFromString(tc.Expected)
- c.Assert(value.(decimal.Decimal), DecodeDecimalsEquals, pos, err, expectedDecimal, tc.ExpectedPos, tc.ExpectedErr, i)
- }
-}
-
-func (_ *testDecodeSuite) TestLastNull(c *C) {
- // Table format:
- // desc funnytable;
- // +-------+------------+------+-----+---------+-------+
- // | Field | Type | Null | Key | Default | Extra |
- // +-------+------------+------+-----+---------+-------+
- // | value | tinyint(4) | YES | | NULL | |
- // +-------+------------+------+-----+---------+-------+
-
- // insert into funnytable values (1), (2), (null);
- // insert into funnytable values (1), (null), (2);
- // all must get 3 rows
-
- tableMapEventData := []byte("\xd3\x01\x00\x00\x00\x00\x01\x00\x04test\x00\nfunnytable\x00\x01\x01\x00\x01")
-
- tableMapEvent := new(TableMapEvent)
- tableMapEvent.tableIDSize = 6
- err := tableMapEvent.Decode(tableMapEventData)
- c.Assert(err, IsNil)
-
- rows := new(RowsEvent)
- rows.tableIDSize = 6
- rows.tables = make(map[uint64]*TableMapEvent)
- rows.tables[tableMapEvent.TableID] = tableMapEvent
- rows.Version = 2
-
- tbls := [][]byte{
- []byte("\xd3\x01\x00\x00\x00\x00\x01\x00\x02\x00\x01\xff\xfe\x01\xff\xfe\x02"),
- []byte("\xd3\x01\x00\x00\x00\x00\x01\x00\x02\x00\x01\xff\xfe\x01\xfe\x02\xff"),
- }
-
- for _, tbl := range tbls {
- rows.Rows = nil
- err = rows.Decode(tbl)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows, HasLen, 3)
- }
-}
-
-func (_ *testDecodeSuite) TestParseRowPanic(c *C) {
- tableMapEvent := new(TableMapEvent)
- tableMapEvent.tableIDSize = 6
- tableMapEvent.TableID = 1810
- tableMapEvent.ColumnType = []byte{3, 15, 15, 15, 9, 15, 15, 252, 3, 3, 3, 15, 3, 3, 3, 15, 3, 15, 1, 15, 3, 1, 252, 15, 15, 15}
- tableMapEvent.ColumnMeta = []uint16{0, 108, 60, 765, 0, 765, 765, 4, 0, 0, 0, 765, 0, 0, 0, 3, 0, 3, 0, 765, 0, 0, 2, 108, 108, 108}
-
- rows := new(RowsEvent)
- rows.tableIDSize = 6
- rows.tables = make(map[uint64]*TableMapEvent)
- rows.tables[tableMapEvent.TableID] = tableMapEvent
- rows.Version = 2
-
- data := []byte{18, 7, 0, 0, 0, 0, 1, 0, 2, 0, 26, 1, 1, 16, 252, 248, 142, 63, 0, 0, 13, 0, 0, 0, 13, 0, 0, 0}
-
- err := rows.Decode(data)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows[0][0], Equals, int32(16270))
-}
-
-type simpleDecimalEqualsChecker struct {
- *CheckerInfo
-}
-
-var SimpleDecimalEqualsChecker Checker = &simpleDecimalEqualsChecker{
- &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
-}
-
-func (checker *simpleDecimalEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
- defer func() {
- if v := recover(); v != nil {
- result = false
- error = fmt.Sprint(v)
- }
- }()
-
- return params[0].(decimal.Decimal).Equal(params[1].(decimal.Decimal)), ""
-}
-
-func (_ *testDecodeSuite) TestParseJson(c *C) {
- // Table format:
- // mysql> desc t10;
- // +-------+---------------+------+-----+---------+-------+
- // | Field | Type | Null | Key | Default | Extra |
- // +-------+---------------+------+-----+---------+-------+
- // | c1 | json | YES | | NULL | |
- // | c2 | decimal(10,0) | YES | | NULL | |
- // +-------+---------------+------+-----+---------+-------+
-
- // CREATE TABLE `t10` (
- // `c1` json DEFAULT NULL,
- // `c2` decimal(10,0)
- // ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
- // INSERT INTO `t10` (`c2`) VALUES (1);
- // INSERT INTO `t10` (`c1`, `c2`) VALUES ('{"key1": "value1", "key2": "value2"}', 1);
- // test json deserialization
- // INSERT INTO `t10`(`c1`,`c2`) VALUES ('{"text":"Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet a, venenatis vitae, justo. Nullam dictum felis eu pede mollis pretium. Integer tincidunt. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. Aliquam lorem ante, dapibus in, viverra quis, feugiat a, tellus. Phasellus viverra nulla ut metus varius laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam eget dui. Etiam rhoncus. Maecenas tempus, tellus eget condimentum rhoncus, sem quam semper libero, sit amet adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. Donec vitae sapien ut libero venenatis faucibus. Nullam quis ante. Etiam sit amet orci eget eros faucibus tincidunt. Duis leo. Sed fringilla mauris sit amet nibh. Donec sodales sagittis magna. Sed consequat, leo eget bibendum sodales, augue velit cursus nunc, quis gravida magna mi a libero. Fusce vulputate eleifend sapien. Vestibulum purus quam, scelerisque ut, mollis sed, nonummy id, metus. Nullam accumsan lorem in dui. Cras ultricies mi eu turpis hendrerit fringilla. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In ac dui quis mi consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu tortor, suscipit eget, imperdiet nec, imperdiet iaculis, ipsum. Sed aliquam ultrices mauris. Integer ante arcu, accumsan a, consectetuer eget, posuere ut, mauris. Praesent adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc nonummy metus. Vestibulum volutpat pretium libero. Cras id dui. Aenean ut eros et nisl sagittis vestibulum. Nullam nulla eros, ultricies sit amet, nonummy id, imperdiet feugiat, pede. Sed lectus. Donec mollis hendrerit risus. Phasellus nec sem in justo pellentesque facilisis. Etiam imperdiet imperdiet orci. Nunc nec neque. Phasellus leo dolor, tempus non, auctor et, hendrerit quis, nisi. Curabitur ligula sapien, tincidunt non, euismod vitae, posuere imperdiet, leo. Maecenas malesuada. Praesent congue erat at massa. Sed cursus turpis vitae tortor. Donec posuere vulputate arcu. Phasellus accumsan cursus velit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed aliquam, nisi quis porttitor congue, elit erat euismod orci, ac"}',101);
- tableMapEventData := []byte("m\x00\x00\x00\x00\x00\x01\x00\x04test\x00\x03t10\x00\x02\xf5\xf6\x03\x04\n\x00\x03")
-
- tableMapEvent := new(TableMapEvent)
- tableMapEvent.tableIDSize = 6
- err := tableMapEvent.Decode(tableMapEventData)
- c.Assert(err, IsNil)
-
- rows := new(RowsEvent)
- rows.tableIDSize = 6
- rows.tables = make(map[uint64]*TableMapEvent)
- rows.tables[tableMapEvent.TableID] = tableMapEvent
- rows.Version = 2
-
- tbls := [][]byte{
- []byte("m\x00\x00\x00\x00\x00\x01\x00\x02\x00\x02\xff\xfd\x80\x00\x00\x00\x01"),
- []byte("m\x00\x00\x00\x00\x00\x01\x00\x02\x00\x02\xff\xfc)\x00\x00\x00\x00\x02\x00(\x00\x12\x00\x04\x00\x16\x00\x04\x00\f\x1a\x00\f!\x00key1key2\x06value1\x06value2\x80\x00\x00\x00\x01"),
- }
-
- for _, tbl := range tbls {
- rows.Rows = nil
- err = rows.Decode(tbl)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows[0][1], Equals, float64(1))
- }
-
- longTbls := [][]byte{
- []byte("m\x00\x00\x00\x00\x00\x01\x00\x02\x00\x02\xff\xfc\xd0\n\x00\x00\x00\x01\x00\xcf\n\v\x00\x04\x00\f\x0f\x00text\xbe\x15Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet a, venenatis vitae, justo. Nullam dictum felis eu pede mollis pretium. Integer tincidunt. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. Aliquam lorem ante, dapibus in, viverra quis, feugiat a, tellus. Phasellus viverra nulla ut metus varius laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam eget dui. Etiam rhoncus. Maecenas tempus, tellus eget condimentum rhoncus, sem quam semper libero, sit amet adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. Donec vitae sapien ut libero venenatis faucibus. Nullam quis ante. Etiam sit amet orci eget eros faucibus tincidunt. Duis leo. Sed fringilla mauris sit amet nibh. Donec sodales sagittis magna. Sed consequat, leo eget bibendum sodales, augue velit cursus nunc, quis gravida magna mi a libero. Fusce vulputate eleifend sapien. Vestibulum purus quam, scelerisque ut, mollis sed, nonummy id, metus. Nullam accumsan lorem in dui. Cras ultricies mi eu turpis hendrerit fringilla. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In ac dui quis mi consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu tortor, suscipit eget, imperdiet nec, imperdiet iaculis, ipsum. Sed aliquam ultrices mauris. Integer ante arcu, accumsan a, consectetuer eget, posuere ut, mauris. Praesent adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc nonummy metus. Vestibulum volutpat pretium libero. Cras id dui. Aenean ut eros et nisl sagittis vestibulum. Nullam nulla eros, ultricies sit amet, nonummy id, imperdiet feugiat, pede. Sed lectus. Donec mollis hendrerit risus. Phasellus nec sem in justo pellentesque facilisis. Etiam imperdiet imperdiet orci. Nunc nec neque. Phasellus leo dolor, tempus non, auctor et, hendrerit quis, nisi. Curabitur ligula sapien, tincidunt non, euismod vitae, posuere imperdiet, leo. Maecenas malesuada. Praesent congue erat at massa. Sed cursus turpis vitae tortor. Donec posuere vulputate arcu. Phasellus accumsan cursus velit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed aliquam, nisi quis porttitor congue, elit erat euismod orci, ac\x80\x00\x00\x00e"),
- }
-
- for _, ltbl := range longTbls {
- rows.Rows = nil
- err = rows.Decode(ltbl)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows[0][1], Equals, float64(101))
- }
-}
-func (_ *testDecodeSuite) TestParseJsonDecimal(c *C) {
- // Table format:
- // mysql> desc t10;
- // +-------+---------------+------+-----+---------+-------+
- // | Field | Type | Null | Key | Default | Extra |
- // +-------+---------------+------+-----+---------+-------+
- // | c1 | json | YES | | NULL | |
- // | c2 | decimal(10,0) | YES | | NULL | |
- // +-------+---------------+------+-----+---------+-------+
-
- // CREATE TABLE `t10` (
- // `c1` json DEFAULT NULL,
- // `c2` decimal(10,0)
- // ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
- // INSERT INTO `t10` (`c2`) VALUES (1);
- // INSERT INTO `t10` (`c1`, `c2`) VALUES ('{"key1": "value1", "key2": "value2"}', 1);
- // test json deserialization
- // INSERT INTO `t10`(`c1`,`c2`) VALUES ('{"text":"Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet a, venenatis vitae, justo. Nullam dictum felis eu pede mollis pretium. Integer tincidunt. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. Aliquam lorem ante, dapibus in, viverra quis, feugiat a, tellus. Phasellus viverra nulla ut metus varius laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam eget dui. Etiam rhoncus. Maecenas tempus, tellus eget condimentum rhoncus, sem quam semper libero, sit amet adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. Donec vitae sapien ut libero venenatis faucibus. Nullam quis ante. Etiam sit amet orci eget eros faucibus tincidunt. Duis leo. Sed fringilla mauris sit amet nibh. Donec sodales sagittis magna. Sed consequat, leo eget bibendum sodales, augue velit cursus nunc, quis gravida magna mi a libero. Fusce vulputate eleifend sapien. Vestibulum purus quam, scelerisque ut, mollis sed, nonummy id, metus. Nullam accumsan lorem in dui. Cras ultricies mi eu turpis hendrerit fringilla. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In ac dui quis mi consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu tortor, suscipit eget, imperdiet nec, imperdiet iaculis, ipsum. Sed aliquam ultrices mauris. Integer ante arcu, accumsan a, consectetuer eget, posuere ut, mauris. Praesent adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc nonummy metus. Vestibulum volutpat pretium libero. Cras id dui. Aenean ut eros et nisl sagittis vestibulum. Nullam nulla eros, ultricies sit amet, nonummy id, imperdiet feugiat, pede. Sed lectus. Donec mollis hendrerit risus. Phasellus nec sem in justo pellentesque facilisis. Etiam imperdiet imperdiet orci. Nunc nec neque. Phasellus leo dolor, tempus non, auctor et, hendrerit quis, nisi. Curabitur ligula sapien, tincidunt non, euismod vitae, posuere imperdiet, leo. Maecenas malesuada. Praesent congue erat at massa. Sed cursus turpis vitae tortor. Donec posuere vulputate arcu. Phasellus accumsan cursus velit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed aliquam, nisi quis porttitor congue, elit erat euismod orci, ac"}',101);
- tableMapEventData := []byte("m\x00\x00\x00\x00\x00\x01\x00\x04test\x00\x03t10\x00\x02\xf5\xf6\x03\x04\n\x00\x03")
-
- tableMapEvent := new(TableMapEvent)
- tableMapEvent.tableIDSize = 6
- err := tableMapEvent.Decode(tableMapEventData)
- c.Assert(err, IsNil)
-
- rows := RowsEvent{useDecimal: true}
- rows.tableIDSize = 6
- rows.tables = make(map[uint64]*TableMapEvent)
- rows.tables[tableMapEvent.TableID] = tableMapEvent
- rows.Version = 2
-
- tbls := [][]byte{
- []byte("m\x00\x00\x00\x00\x00\x01\x00\x02\x00\x02\xff\xfd\x80\x00\x00\x00\x01"),
- []byte("m\x00\x00\x00\x00\x00\x01\x00\x02\x00\x02\xff\xfc)\x00\x00\x00\x00\x02\x00(\x00\x12\x00\x04\x00\x16\x00\x04\x00\f\x1a\x00\f!\x00key1key2\x06value1\x06value2\x80\x00\x00\x00\x01"),
- }
-
- for _, tbl := range tbls {
- rows.Rows = nil
- err = rows.Decode(tbl)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows[0][1], SimpleDecimalEqualsChecker, decimal.NewFromFloat(1))
- }
-
- longTbls := [][]byte{
- []byte("m\x00\x00\x00\x00\x00\x01\x00\x02\x00\x02\xff\xfc\xd0\n\x00\x00\x00\x01\x00\xcf\n\v\x00\x04\x00\f\x0f\x00text\xbe\x15Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet a, venenatis vitae, justo. Nullam dictum felis eu pede mollis pretium. Integer tincidunt. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. Aliquam lorem ante, dapibus in, viverra quis, feugiat a, tellus. Phasellus viverra nulla ut metus varius laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam eget dui. Etiam rhoncus. Maecenas tempus, tellus eget condimentum rhoncus, sem quam semper libero, sit amet adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. Donec vitae sapien ut libero venenatis faucibus. Nullam quis ante. Etiam sit amet orci eget eros faucibus tincidunt. Duis leo. Sed fringilla mauris sit amet nibh. Donec sodales sagittis magna. Sed consequat, leo eget bibendum sodales, augue velit cursus nunc, quis gravida magna mi a libero. Fusce vulputate eleifend sapien. Vestibulum purus quam, scelerisque ut, mollis sed, nonummy id, metus. Nullam accumsan lorem in dui. Cras ultricies mi eu turpis hendrerit fringilla. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In ac dui quis mi consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu tortor, suscipit eget, imperdiet nec, imperdiet iaculis, ipsum. Sed aliquam ultrices mauris. Integer ante arcu, accumsan a, consectetuer eget, posuere ut, mauris. Praesent adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc nonummy metus. Vestibulum volutpat pretium libero. Cras id dui. Aenean ut eros et nisl sagittis vestibulum. Nullam nulla eros, ultricies sit amet, nonummy id, imperdiet feugiat, pede. Sed lectus. Donec mollis hendrerit risus. Phasellus nec sem in justo pellentesque facilisis. Etiam imperdiet imperdiet orci. Nunc nec neque. Phasellus leo dolor, tempus non, auctor et, hendrerit quis, nisi. Curabitur ligula sapien, tincidunt non, euismod vitae, posuere imperdiet, leo. Maecenas malesuada. Praesent congue erat at massa. Sed cursus turpis vitae tortor. Donec posuere vulputate arcu. Phasellus accumsan cursus velit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed aliquam, nisi quis porttitor congue, elit erat euismod orci, ac\x80\x00\x00\x00e"),
- }
-
- for _, ltbl := range longTbls {
- rows.Rows = nil
- err = rows.Decode(ltbl)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows[0][1], SimpleDecimalEqualsChecker, decimal.NewFromFloat(101))
- }
-}
-
-func (_ *testDecodeSuite) TestEnum(c *C) {
- // mysql> desc aenum;
- // +-------+-------------------------------------------+------+-----+---------+-------+
- // | Field | Type | Null | Key | Default | Extra |
- // +-------+-------------------------------------------+------+-----+---------+-------+
- // | id | int(11) | YES | | NULL | |
- // | aset | enum('0','1','2','3','4','5','6','7','8') | YES | | NULL | |
- // +-------+-------------------------------------------+------+-----+---------+-------+
- // 2 rows in set (0.00 sec)
- //
- // insert into aenum(id, aset) values(1, '0');
- tableMapEventData := []byte("\x42\x0f\x00\x00\x00\x00\x01\x00\x05\x74\x74\x65\x73\x74\x00\x05")
- tableMapEventData = append(tableMapEventData, []byte("\x61\x65\x6e\x75\x6d\x00\x02\x03\xfe\x02\xf7\x01\x03")...)
- tableMapEvent := new(TableMapEvent)
- tableMapEvent.tableIDSize = 6
- err := tableMapEvent.Decode(tableMapEventData)
- c.Assert(err, IsNil)
-
- rows := new(RowsEvent)
- rows.tableIDSize = 6
- rows.tables = make(map[uint64]*TableMapEvent)
- rows.tables[tableMapEvent.TableID] = tableMapEvent
- rows.Version = 2
-
- data := []byte("\x42\x0f\x00\x00\x00\x00\x01\x00\x02\x00\x02\xff\xfc\x01\x00\x00\x00\x01")
-
- rows.Rows = nil
- err = rows.Decode(data)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows[0][1], Equals, int64(1))
-}
-
-func (_ *testDecodeSuite) TestMultiBytesEnum(c *C) {
- // CREATE TABLE numbers (
- // id int auto_increment,
- // num ENUM( '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255','256','257'
-
- // ),
- // primary key(id)
- // );
-
- //
- // insert into numbers(num) values ('0'), ('256');
- tableMapEventData := []byte("\x84\x0f\x00\x00\x00\x00\x01\x00\x05\x74\x74\x65\x73\x74\x00\x07")
- tableMapEventData = append(tableMapEventData, []byte("\x6e\x75\x6d\x62\x65\x72\x73\x00\x02\x03\xfe\x02\xf7\x02\x02")...)
- tableMapEvent := new(TableMapEvent)
- tableMapEvent.tableIDSize = 6
- err := tableMapEvent.Decode(tableMapEventData)
- c.Assert(err, IsNil)
-
- rows := new(RowsEvent)
- rows.tableIDSize = 6
- rows.tables = make(map[uint64]*TableMapEvent)
- rows.tables[tableMapEvent.TableID] = tableMapEvent
- rows.Version = 2
-
- data := []byte("\x84\x0f\x00\x00\x00\x00\x01\x00\x02\x00\x02\xff\xfc\x01\x00\x00\x00\x01\x00\xfc\x02\x00\x00\x00\x01\x01")
-
- rows.Rows = nil
- err = rows.Decode(data)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows[0][1], Equals, int64(1))
- c.Assert(rows.Rows[1][1], Equals, int64(257))
-}
-
-func (_ *testDecodeSuite) TestSet(c *C) {
- // mysql> desc aset;
- // +--------+---------------------------------------------------------------------------------------+------+-----+---------+-------+
- // | Field | Type | Null | Key | Default | Extra |
- // +--------+---------------------------------------------------------------------------------------+------+-----+---------+-------+
- // | id | int(11) | YES | | NULL | |
- // | region | set('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18') | YES | | NULL | |
- // +--------+---------------------------------------------------------------------------------------+------+-----+---------+-------+
- // 2 rows in set (0.00 sec)
- //
- // insert into aset(id, region) values(1, '1,3');
-
- tableMapEventData := []byte("\xe7\x0e\x00\x00\x00\x00\x01\x00\x05\x74\x74\x65\x73\x74\x00\x04")
- tableMapEventData = append(tableMapEventData, []byte("\x61\x73\x65\x74\x00\x02\x03\xfe\x02\xf8\x03\x03")...)
- tableMapEvent := new(TableMapEvent)
- tableMapEvent.tableIDSize = 6
- err := tableMapEvent.Decode(tableMapEventData)
- c.Assert(err, IsNil)
-
- rows := new(RowsEvent)
- rows.tableIDSize = 6
- rows.tables = make(map[uint64]*TableMapEvent)
- rows.tables[tableMapEvent.TableID] = tableMapEvent
- rows.Version = 2
-
- data := []byte("\xe7\x0e\x00\x00\x00\x00\x01\x00\x02\x00\x02\xff\xfc\x01\x00\x00\x00\x05\x00\x00")
-
- rows.Rows = nil
- err = rows.Decode(data)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows[0][1], Equals, int64(5))
-}
-
-func (_ *testDecodeSuite) TestJsonNull(c *C) {
- // Table:
- // desc hj_order_preview
- // +------------------+------------+------+-----+-------------------+----------------+
- // | Field | Type | Null | Key | Default | Extra |
- // +------------------+------------+------+-----+-------------------+----------------+
- // | id | int(13) | NO | PRI | | auto_increment |
- // | buyer_id | bigint(13) | NO | | | |
- // | order_sn | bigint(13) | NO | | | |
- // | order_detail | json | NO | | | |
- // | is_del | tinyint(1) | NO | | 0 | |
- // | add_time | int(13) | NO | | | |
- // | last_update_time | timestamp | NO | | CURRENT_TIMESTAMP | |
- // +------------------+------------+------+-----+-------------------+----------------+
- // insert into hj_order_preview
- // (id, buyer_id, order_sn, is_del, add_time, last_update_time)
- // values (1, 95891865464386, 13376222192996417, 0, 1479983995, 1479983995)
-
- tableMapEventData := []byte("r\x00\x00\x00\x00\x00\x01\x00\x04test\x00\x10hj_order_preview\x00\a\x03\b\b\xf5\x01\x03\x11\x02\x04\x00\x00")
-
- tableMapEvent := new(TableMapEvent)
- tableMapEvent.tableIDSize = 6
- err := tableMapEvent.Decode(tableMapEventData)
- c.Assert(err, IsNil)
-
- rows := new(RowsEvent)
- rows.tableIDSize = 6
- rows.tables = make(map[uint64]*TableMapEvent)
- rows.tables[tableMapEvent.TableID] = tableMapEvent
- rows.Version = 2
-
- data :=
- []byte("r\x00\x00\x00\x00\x00\x01\x00\x02\x00\a\xff\x80\x01\x00\x00\x00B\ue4d06W\x00\x00A\x10@l\x9a\x85/\x00\x00\x00\x00\x00\x00{\xc36X\x00\x00\x00\x00")
-
- rows.Rows = nil
- err = rows.Decode(data)
- c.Assert(err, IsNil)
- c.Assert(rows.Rows[0][3], HasLen, 0)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/replication/time_test.go b/vendor/github.com/siddontang/go-mysql/replication/time_test.go
deleted file mode 100644
index 3a06aaf..0000000
--- a/vendor/github.com/siddontang/go-mysql/replication/time_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package replication
-
-import (
- "time"
-
- . "github.com/pingcap/check"
-)
-
-type testTimeSuite struct{}
-
-var _ = Suite(&testTimeSuite{})
-
-func (s *testTimeSuite) TestTime(c *C) {
- tbls := []struct {
- year int
- month int
- day int
- hour int
- min int
- sec int
- microSec int
- frac int
- expected string
- }{
- {2000, 1, 1, 1, 1, 1, 1, 0, "2000-01-01 01:01:01"},
- {2000, 1, 1, 1, 1, 1, 1, 1, "2000-01-01 01:01:01.0"},
- {2000, 1, 1, 1, 1, 1, 1, 6, "2000-01-01 01:01:01.000001"},
- }
-
- for _, t := range tbls {
- t1 := fracTime{time.Date(t.year, time.Month(t.month), t.day, t.hour, t.min, t.sec, t.microSec*1000, time.UTC), t.frac, nil}
- c.Assert(t1.String(), Equals, t.expected)
- }
-
- zeroTbls := []struct {
- frac int
- dec int
- expected string
- }{
- {0, 1, "0000-00-00 00:00:00.0"},
- {1, 1, "0000-00-00 00:00:00.0"},
- {123, 3, "0000-00-00 00:00:00.000"},
- {123000, 3, "0000-00-00 00:00:00.123"},
- {123, 6, "0000-00-00 00:00:00.000123"},
- {123000, 6, "0000-00-00 00:00:00.123000"},
- }
-
- for _, t := range zeroTbls {
- c.Assert(formatZeroTime(t.frac, t.dec), Equals, t.expected)
- }
-}
-
-func (s *testTimeSuite) TestTimeStringLocation(c *C) {
- t := fracTime{
- time.Date(2018, time.Month(7), 30, 10, 0, 0, 0, time.FixedZone("EST", -5*3600)),
- 0,
- nil,
- }
-
- c.Assert(t.String(), Equals, "2018-07-30 10:00:00")
-
- t = fracTime{
- time.Date(2018, time.Month(7), 30, 10, 0, 0, 0, time.FixedZone("EST", -5*3600)),
- 0,
- time.UTC,
- }
- c.Assert(t.String(), Equals, "2018-07-30 15:00:00")
-}
-
-var _ = Suite(&testTimeSuite{})
diff --git a/vendor/github.com/siddontang/go-mysql/schema/schema.go b/vendor/github.com/siddontang/go-mysql/schema/schema.go
deleted file mode 100644
index c98b9ac..0000000
--- a/vendor/github.com/siddontang/go-mysql/schema/schema.go
+++ /dev/null
@@ -1,398 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package schema
-
-import (
- "database/sql"
- "fmt"
- "strings"
-
- "github.com/juju/errors"
- "github.com/siddontang/go-mysql/mysql"
-)
-
-var ErrTableNotExist = errors.New("table is not exist")
-var ErrMissingTableMeta = errors.New("missing table meta")
-var HAHealthCheckSchema = "mysql.ha_health_check"
-
-// Different column type
-const (
- TYPE_NUMBER = iota + 1 // tinyint, smallint, mediumint, int, bigint, year
- TYPE_FLOAT // float, double
- TYPE_ENUM // enum
- TYPE_SET // set
- TYPE_STRING // other
- TYPE_DATETIME // datetime
- TYPE_TIMESTAMP // timestamp
- TYPE_DATE // date
- TYPE_TIME // time
- TYPE_BIT // bit
- TYPE_JSON // json
- TYPE_DECIMAL // decimal
-)
-
-type TableColumn struct {
- Name string
- Type int
- Collation string
- RawType string
- IsAuto bool
- IsUnsigned bool
- EnumValues []string
- SetValues []string
-}
-
-type Index struct {
- Name string
- Columns []string
- Cardinality []uint64
-}
-
-type Table struct {
- Schema string
- Name string
-
- Columns []TableColumn
- Indexes []*Index
- PKColumns []int
-
- UnsignedColumns []int
-}
-
-func (ta *Table) String() string {
- return fmt.Sprintf("%s.%s", ta.Schema, ta.Name)
-}
-
-func (ta *Table) AddColumn(name string, columnType string, collation string, extra string) {
- index := len(ta.Columns)
- ta.Columns = append(ta.Columns, TableColumn{Name: name, Collation: collation})
- ta.Columns[index].RawType = columnType
-
- if strings.HasPrefix(columnType, "float") ||
- strings.HasPrefix(columnType, "double") {
- ta.Columns[index].Type = TYPE_FLOAT
- } else if strings.HasPrefix(columnType, "decimal") {
- ta.Columns[index].Type = TYPE_DECIMAL
- } else if strings.HasPrefix(columnType, "enum") {
- ta.Columns[index].Type = TYPE_ENUM
- ta.Columns[index].EnumValues = strings.Split(strings.Replace(
- strings.TrimSuffix(
- strings.TrimPrefix(
- columnType, "enum("),
- ")"),
- "'", "", -1),
- ",")
- } else if strings.HasPrefix(columnType, "set") {
- ta.Columns[index].Type = TYPE_SET
- ta.Columns[index].SetValues = strings.Split(strings.Replace(
- strings.TrimSuffix(
- strings.TrimPrefix(
- columnType, "set("),
- ")"),
- "'", "", -1),
- ",")
- } else if strings.HasPrefix(columnType, "datetime") {
- ta.Columns[index].Type = TYPE_DATETIME
- } else if strings.HasPrefix(columnType, "timestamp") {
- ta.Columns[index].Type = TYPE_TIMESTAMP
- } else if strings.HasPrefix(columnType, "time") {
- ta.Columns[index].Type = TYPE_TIME
- } else if "date" == columnType {
- ta.Columns[index].Type = TYPE_DATE
- } else if strings.HasPrefix(columnType, "bit") {
- ta.Columns[index].Type = TYPE_BIT
- } else if strings.HasPrefix(columnType, "json") {
- ta.Columns[index].Type = TYPE_JSON
- } else if strings.Contains(columnType, "int") || strings.HasPrefix(columnType, "year") {
- ta.Columns[index].Type = TYPE_NUMBER
- } else {
- ta.Columns[index].Type = TYPE_STRING
- }
-
- if strings.Contains(columnType, "unsigned") || strings.Contains(columnType, "zerofill") {
- ta.Columns[index].IsUnsigned = true
- ta.UnsignedColumns = append(ta.UnsignedColumns, index)
- }
-
- if extra == "auto_increment" {
- ta.Columns[index].IsAuto = true
- }
-}
-
-func (ta *Table) FindColumn(name string) int {
- for i, col := range ta.Columns {
- if col.Name == name {
- return i
- }
- }
- return -1
-}
-
-func (ta *Table) GetPKColumn(index int) *TableColumn {
- return &ta.Columns[ta.PKColumns[index]]
-}
-
-func (ta *Table) AddIndex(name string) (index *Index) {
- index = NewIndex(name)
- ta.Indexes = append(ta.Indexes, index)
- return index
-}
-
-func NewIndex(name string) *Index {
- return &Index{name, make([]string, 0, 8), make([]uint64, 0, 8)}
-}
-
-func (idx *Index) AddColumn(name string, cardinality uint64) {
- idx.Columns = append(idx.Columns, name)
- if cardinality == 0 {
- cardinality = uint64(len(idx.Cardinality) + 1)
- }
- idx.Cardinality = append(idx.Cardinality, cardinality)
-}
-
-func (idx *Index) FindColumn(name string) int {
- for i, colName := range idx.Columns {
- if name == colName {
- return i
- }
- }
- return -1
-}
-
-func IsTableExist(conn mysql.Executer, schema string, name string) (bool, error) {
- query := fmt.Sprintf("SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%s' and TABLE_NAME = '%s' LIMIT 1", schema, name)
- r, err := conn.Execute(query)
- if err != nil {
- return false, errors.Trace(err)
- }
-
- return r.RowNumber() == 1, nil
-}
-
-func NewTableFromSqlDB(conn *sql.DB, schema string, name string) (*Table, error) {
- ta := &Table{
- Schema: schema,
- Name: name,
- Columns: make([]TableColumn, 0, 16),
- Indexes: make([]*Index, 0, 8),
- }
-
- if err := ta.fetchColumnsViaSqlDB(conn); err != nil {
- return nil, errors.Trace(err)
- }
-
- if err := ta.fetchIndexesViaSqlDB(conn); err != nil {
- return nil, errors.Trace(err)
- }
-
- return ta, nil
-}
-
-func NewTable(conn mysql.Executer, schema string, name string) (*Table, error) {
- ta := &Table{
- Schema: schema,
- Name: name,
- Columns: make([]TableColumn, 0, 16),
- Indexes: make([]*Index, 0, 8),
- }
-
- if err := ta.fetchColumns(conn); err != nil {
- return nil, errors.Trace(err)
- }
-
- if err := ta.fetchIndexes(conn); err != nil {
- return nil, errors.Trace(err)
- }
-
- return ta, nil
-}
-
-func (ta *Table) fetchColumns(conn mysql.Executer) error {
- r, err := conn.Execute(fmt.Sprintf("show full columns from `%s`.`%s`", ta.Schema, ta.Name))
- if err != nil {
- return errors.Trace(err)
- }
-
- for i := 0; i < r.RowNumber(); i++ {
- name, _ := r.GetString(i, 0)
- colType, _ := r.GetString(i, 1)
- collation, _ := r.GetString(i, 2)
- extra, _ := r.GetString(i, 6)
-
- ta.AddColumn(name, colType, collation, extra)
- }
-
- return nil
-}
-
-func (ta *Table) fetchColumnsViaSqlDB(conn *sql.DB) error {
- r, err := conn.Query(fmt.Sprintf("show full columns from `%s`.`%s`", ta.Schema, ta.Name))
- if err != nil {
- return errors.Trace(err)
- }
-
- defer r.Close()
-
- var unusedVal interface{}
- unused := &unusedVal
-
- for r.Next() {
- var name, colType, extra string
- var collation sql.NullString
- err := r.Scan(&name, &colType, &collation, &unused, &unused, &unused, &extra, &unused, &unused)
- if err != nil {
- return errors.Trace(err)
- }
- ta.AddColumn(name, colType, collation.String, extra)
- }
-
- return r.Err()
-}
-
-func (ta *Table) fetchIndexes(conn mysql.Executer) error {
- r, err := conn.Execute(fmt.Sprintf("show index from `%s`.`%s`", ta.Schema, ta.Name))
- if err != nil {
- return errors.Trace(err)
- }
- var currentIndex *Index
- currentName := ""
-
- for i := 0; i < r.RowNumber(); i++ {
- indexName, _ := r.GetString(i, 2)
- if currentName != indexName {
- currentIndex = ta.AddIndex(indexName)
- currentName = indexName
- }
- cardinality, _ := r.GetUint(i, 6)
- colName, _ := r.GetString(i, 4)
- currentIndex.AddColumn(colName, cardinality)
- }
-
- return ta.fetchPrimaryKeyColumns()
-
-}
-
-func (ta *Table) fetchIndexesViaSqlDB(conn *sql.DB) error {
- r, err := conn.Query(fmt.Sprintf("show index from `%s`.`%s`", ta.Schema, ta.Name))
- if err != nil {
- return errors.Trace(err)
- }
-
- defer r.Close()
-
- var currentIndex *Index
- currentName := ""
-
- var unusedVal interface{}
- unused := &unusedVal
-
- for r.Next() {
- var indexName, colName string
- var cardinality interface{}
-
- err := r.Scan(
- &unused,
- &unused,
- &indexName,
- &unused,
- &colName,
- &unused,
- &cardinality,
- &unused,
- &unused,
- &unused,
- &unused,
- &unused,
- &unused,
- )
- if err != nil {
- return errors.Trace(err)
- }
-
- if currentName != indexName {
- currentIndex = ta.AddIndex(indexName)
- currentName = indexName
- }
-
- c := toUint64(cardinality)
- currentIndex.AddColumn(colName, c)
- }
-
- return ta.fetchPrimaryKeyColumns()
-}
-
-func toUint64(i interface{}) uint64 {
- switch i := i.(type) {
- case int:
- return uint64(i)
- case int8:
- return uint64(i)
- case int16:
- return uint64(i)
- case int32:
- return uint64(i)
- case int64:
- return uint64(i)
- case uint:
- return uint64(i)
- case uint8:
- return uint64(i)
- case uint16:
- return uint64(i)
- case uint32:
- return uint64(i)
- case uint64:
- return uint64(i)
- }
-
- return 0
-}
-
-func (ta *Table) fetchPrimaryKeyColumns() error {
- if len(ta.Indexes) == 0 {
- return nil
- }
-
- pkIndex := ta.Indexes[0]
- if pkIndex.Name != "PRIMARY" {
- return nil
- }
-
- ta.PKColumns = make([]int, len(pkIndex.Columns))
- for i, pkCol := range pkIndex.Columns {
- ta.PKColumns[i] = ta.FindColumn(pkCol)
- }
-
- return nil
-}
-
-// Get primary keys in one row for a table, a table may use multi fields as the PK
-func (ta *Table) GetPKValues(row []interface{}) ([]interface{}, error) {
- indexes := ta.PKColumns
- if len(indexes) == 0 {
- return nil, errors.Errorf("table %s has no PK", ta)
- } else if len(ta.Columns) != len(row) {
- return nil, errors.Errorf("table %s has %d columns, but row data %v len is %d", ta,
- len(ta.Columns), row, len(row))
- }
-
- values := make([]interface{}, 0, len(indexes))
-
- for _, index := range indexes {
- values = append(values, row[index])
- }
-
- return values, nil
-}
-
-// Get term column's value
-func (ta *Table) GetColumnValue(column string, row []interface{}) (interface{}, error) {
- index := ta.FindColumn(column)
- if index == -1 {
- return nil, errors.Errorf("table %s has no column name %s", ta, column)
- }
-
- return row[index], nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/schema/schema_test.go b/vendor/github.com/siddontang/go-mysql/schema/schema_test.go
deleted file mode 100644
index c5bafe1..0000000
--- a/vendor/github.com/siddontang/go-mysql/schema/schema_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package schema
-
-import (
- "database/sql"
- "flag"
- "fmt"
- "testing"
-
- . "github.com/pingcap/check"
- "github.com/siddontang/go-mysql/client"
- _ "github.com/siddontang/go-mysql/driver"
-)
-
-// use docker mysql for test
-var host = flag.String("host", "127.0.0.1", "MySQL host")
-
-func Test(t *testing.T) {
- TestingT(t)
-}
-
-type schemaTestSuite struct {
- conn *client.Conn
- sqlDB *sql.DB
-}
-
-var _ = Suite(&schemaTestSuite{})
-
-func (s *schemaTestSuite) SetUpSuite(c *C) {
- var err error
- s.conn, err = client.Connect(fmt.Sprintf("%s:%d", *host, 3306), "root", "", "test")
- c.Assert(err, IsNil)
-
- s.sqlDB, err = sql.Open("mysql", fmt.Sprintf("root:@%s:3306", *host))
- c.Assert(err, IsNil)
-}
-
-func (s *schemaTestSuite) TearDownSuite(c *C) {
- if s.conn != nil {
- s.conn.Close()
- }
-
- if s.sqlDB != nil {
- s.sqlDB.Close()
- }
-}
-
-func (s *schemaTestSuite) TestSchema(c *C) {
- _, err := s.conn.Execute(`DROP TABLE IF EXISTS schema_test`)
- c.Assert(err, IsNil)
-
- str := `
- CREATE TABLE IF NOT EXISTS schema_test (
- id INT,
- id1 INT,
- id2 INT,
- name VARCHAR(256),
- status ENUM('appointing','serving','abnormal','stop','noaftermarket','finish','financial_audit'),
- se SET('a', 'b', 'c'),
- f FLOAT,
- d DECIMAL(2, 1),
- uint INT UNSIGNED,
- zfint INT ZEROFILL,
- name_ucs VARCHAR(256) CHARACTER SET ucs2,
- name_utf8 VARCHAR(256) CHARACTER SET utf8,
- PRIMARY KEY(id2, id),
- UNIQUE (id1),
- INDEX name_idx (name)
- ) ENGINE = INNODB;
- `
-
- _, err = s.conn.Execute(str)
- c.Assert(err, IsNil)
-
- ta, err := NewTable(s.conn, "test", "schema_test")
- c.Assert(err, IsNil)
-
- c.Assert(ta.Columns, HasLen, 12)
- c.Assert(ta.Indexes, HasLen, 3)
- c.Assert(ta.PKColumns, DeepEquals, []int{2, 0})
- c.Assert(ta.Indexes[0].Columns, HasLen, 2)
- c.Assert(ta.Indexes[0].Name, Equals, "PRIMARY")
- c.Assert(ta.Indexes[2].Name, Equals, "name_idx")
- c.Assert(ta.Columns[4].EnumValues, DeepEquals, []string{"appointing", "serving", "abnormal", "stop", "noaftermarket", "finish", "financial_audit"})
- c.Assert(ta.Columns[5].SetValues, DeepEquals, []string{"a", "b", "c"})
- c.Assert(ta.Columns[7].Type, Equals, TYPE_DECIMAL)
- c.Assert(ta.Columns[0].IsUnsigned, IsFalse)
- c.Assert(ta.Columns[8].IsUnsigned, IsTrue)
- c.Assert(ta.Columns[9].IsUnsigned, IsTrue)
- c.Assert(ta.Columns[10].Collation, Matches, "^ucs2.*")
- c.Assert(ta.Columns[11].Collation, Matches, "^utf8.*")
-
- taSqlDb, err := NewTableFromSqlDB(s.sqlDB, "test", "schema_test")
- c.Assert(err, IsNil)
-
- c.Assert(taSqlDb, DeepEquals, ta)
-}
-
-func (s *schemaTestSuite) TestQuoteSchema(c *C) {
- str := "CREATE TABLE IF NOT EXISTS `a-b_test` (`a.b` INT) ENGINE = INNODB"
-
- _, err := s.conn.Execute(str)
- c.Assert(err, IsNil)
-
- ta, err := NewTable(s.conn, "test", "a-b_test")
- c.Assert(err, IsNil)
-
- c.Assert(ta.Columns[0].Name, Equals, "a.b")
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/auth.go b/vendor/github.com/siddontang/go-mysql/server/auth.go
deleted file mode 100644
index 0eb54a6..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/auth.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package server
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/tls"
- "fmt"
-
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-var ErrAccessDenied = errors.New("access denied")
-
-func (c *Conn) compareAuthData(authPluginName string, clientAuthData []byte) error {
- switch authPluginName {
- case AUTH_NATIVE_PASSWORD:
- if err := c.acquirePassword(); err != nil {
- return err
- }
- return c.compareNativePasswordAuthData(clientAuthData, c.password)
-
- case AUTH_CACHING_SHA2_PASSWORD:
- if err := c.compareCacheSha2PasswordAuthData(clientAuthData); err != nil {
- return err
- }
- if c.cachingSha2FullAuth {
- return c.handleAuthSwitchResponse()
- }
- return nil
-
- case AUTH_SHA256_PASSWORD:
- if err := c.acquirePassword(); err != nil {
- return err
- }
- cont, err := c.handlePublicKeyRetrieval(clientAuthData)
- if err != nil {
- return err
- }
- if !cont {
- return nil
- }
- return c.compareSha256PasswordAuthData(clientAuthData, c.password)
-
- default:
- return errors.Errorf("unknown authentication plugin name '%s'", authPluginName)
- }
-}
-
-func (c *Conn) acquirePassword() error {
- password, found, err := c.credentialProvider.GetCredential(c.user)
- if err != nil {
- return err
- }
- if !found {
- return NewDefaultError(ER_NO_SUCH_USER, c.user, c.RemoteAddr().String())
- }
- c.password = password
- return nil
-}
-
-func scrambleValidation(cached, nonce, scramble []byte) bool {
- // SHA256(SHA256(SHA256(STORED_PASSWORD)), NONCE)
- crypt := sha256.New()
- crypt.Write(cached)
- crypt.Write(nonce)
- message2 := crypt.Sum(nil)
- // SHA256(PASSWORD)
- if len(message2) != len(scramble) {
- return false
- }
- for i := range message2 {
- message2[i] ^= scramble[i]
- }
- // SHA256(SHA256(PASSWORD)
- crypt.Reset()
- crypt.Write(message2)
- m := crypt.Sum(nil)
- return bytes.Equal(m, cached)
-}
-
-func (c *Conn) compareNativePasswordAuthData(clientAuthData []byte, password string) error {
- if bytes.Equal(CalcPassword(c.salt, []byte(c.password)), clientAuthData) {
- return nil
- }
- return ErrAccessDenied
-}
-
-func (c *Conn) compareSha256PasswordAuthData(clientAuthData []byte, password string) error {
- // Empty passwords are not hashed, but sent as empty string
- if len(clientAuthData) == 0 {
- if password == "" {
- return nil
- }
- return ErrAccessDenied
- }
- if tlsConn, ok := c.Conn.Conn.(*tls.Conn); ok {
- if !tlsConn.ConnectionState().HandshakeComplete {
- return errors.New("incomplete TSL handshake")
- }
- // connection is SSL/TLS, client should send plain password
- // deal with the trailing \NUL added for plain text password received
- if l := len(clientAuthData); l != 0 && clientAuthData[l-1] == 0x00 {
- clientAuthData = clientAuthData[:l-1]
- }
- if bytes.Equal(clientAuthData, []byte(password)) {
- return nil
- }
- return ErrAccessDenied
- } else {
- // client should send encrypted password
- // decrypt
- dbytes, err := rsa.DecryptOAEP(sha1.New(), rand.Reader, (c.serverConf.tlsConfig.Certificates[0].PrivateKey).(*rsa.PrivateKey), clientAuthData, nil)
- if err != nil {
- return err
- }
- plain := make([]byte, len(password)+1)
- copy(plain, password)
- for i := range plain {
- j := i % len(c.salt)
- plain[i] ^= c.salt[j]
- }
- if bytes.Equal(plain, dbytes) {
- return nil
- }
- return ErrAccessDenied
- }
-}
-
-func (c *Conn) compareCacheSha2PasswordAuthData(clientAuthData []byte) error {
- // Empty passwords are not hashed, but sent as empty string
- if len(clientAuthData) == 0 {
- if err := c.acquirePassword(); err != nil {
- return err
- }
- if c.password == "" {
- return nil
- }
- return ErrAccessDenied
- }
- // the caching of 'caching_sha2_password' in MySQL, see: https://dev.mysql.com/worklog/task/?id=9591
- if _, ok := c.credentialProvider.(*InMemoryProvider); ok {
- // since we have already kept the password in memory and calculate the scramble is not that high of cost, we eliminate
- // the caching part. So our server will never ask the client to do a full authentication via RSA key exchange and it appears
- // like the auth will always hit the cache.
- if err := c.acquirePassword(); err != nil {
- return err
- }
- if bytes.Equal(CalcCachingSha2Password(c.salt, c.password), clientAuthData) {
- // 'fast' auth: write "More data" packet (first byte == 0x01) with the second byte = 0x03
- return c.writeAuthMoreDataFastAuth()
- }
- return ErrAccessDenied
- }
- // other type of credential provider, we use the cache
- cached, ok := c.serverConf.cacheShaPassword.Load(fmt.Sprintf("%s@%s", c.user, c.Conn.LocalAddr()))
- if ok {
- // Scramble validation
- if scrambleValidation(cached.([]byte), c.salt, clientAuthData) {
- // 'fast' auth: write "More data" packet (first byte == 0x01) with the second byte = 0x03
- return c.writeAuthMoreDataFastAuth()
- }
- return ErrAccessDenied
- }
- // cache miss, do full auth
- if err := c.writeAuthMoreDataFullAuth(); err != nil {
- return err
- }
- c.cachingSha2FullAuth = true
- return nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/auth_switch_response.go b/vendor/github.com/siddontang/go-mysql/server/auth_switch_response.go
deleted file mode 100644
index 038acff..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/auth_switch_response.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package server
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/tls"
- "fmt"
-
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-func (c *Conn) handleAuthSwitchResponse() error {
- authData, err := c.readAuthSwitchRequestResponse()
- if err != nil {
- return err
- }
-
- switch c.authPluginName {
- case AUTH_NATIVE_PASSWORD:
- if err := c.acquirePassword(); err != nil {
- return err
- }
- if !bytes.Equal(CalcPassword(c.salt, []byte(c.password)), authData) {
- return ErrAccessDenied
- }
- return nil
-
- case AUTH_CACHING_SHA2_PASSWORD:
- if !c.cachingSha2FullAuth {
- // Switched auth method but no MoreData packet send yet
- if err := c.compareCacheSha2PasswordAuthData(authData); err != nil {
- return err
- } else {
- if c.cachingSha2FullAuth {
- return c.handleAuthSwitchResponse()
- }
- return nil
- }
- }
- // AuthMoreData packet already sent, do full auth
- if err := c.handleCachingSha2PasswordFullAuth(authData); err != nil {
- return err
- }
- c.writeCachingSha2Cache()
- return nil
-
- case AUTH_SHA256_PASSWORD:
- cont, err := c.handlePublicKeyRetrieval(authData)
- if err != nil {
- return err
- }
- if !cont {
- return nil
- }
- if err := c.acquirePassword(); err != nil {
- return err
- }
- return c.compareSha256PasswordAuthData(authData, c.password)
-
- default:
- return errors.Errorf("unknown authentication plugin name '%s'", c.authPluginName)
- }
-}
-
-func (c *Conn) handleCachingSha2PasswordFullAuth(authData []byte) error {
- if err := c.acquirePassword(); err != nil {
- return err
- }
- if tlsConn, ok := c.Conn.Conn.(*tls.Conn); ok {
- if !tlsConn.ConnectionState().HandshakeComplete {
- return errors.New("incomplete TSL handshake")
- }
- // connection is SSL/TLS, client should send plain password
- // deal with the trailing \NUL added for plain text password received
- if l := len(authData); l != 0 && authData[l-1] == 0x00 {
- authData = authData[:l-1]
- }
- if bytes.Equal(authData, []byte(c.password)) {
- return nil
- }
- return ErrAccessDenied
- } else {
- // client either request for the public key or send the encrypted password
- if len(authData) == 1 && authData[0] == 0x02 {
- // send the public key
- if err := c.writeAuthMoreDataPubkey(); err != nil {
- return err
- }
- // read the encrypted password
- var err error
- if authData, err = c.readAuthSwitchRequestResponse(); err != nil {
- return err
- }
- }
- // the encrypted password
- // decrypt
- dbytes, err := rsa.DecryptOAEP(sha1.New(), rand.Reader, (c.serverConf.tlsConfig.Certificates[0].PrivateKey).(*rsa.PrivateKey), authData, nil)
- if err != nil {
- return err
- }
- plain := make([]byte, len(c.password)+1)
- copy(plain, c.password)
- for i := range plain {
- j := i % len(c.salt)
- plain[i] ^= c.salt[j]
- }
- if bytes.Equal(plain, dbytes) {
- return nil
- }
- return ErrAccessDenied
- }
-}
-
-func (c *Conn) writeCachingSha2Cache() {
- // write cache
- if c.password == "" {
- return
- }
- // SHA256(PASSWORD)
- crypt := sha256.New()
- crypt.Write([]byte(c.password))
- m1 := crypt.Sum(nil)
- // SHA256(SHA256(PASSWORD))
- crypt.Reset()
- crypt.Write(m1)
- m2 := crypt.Sum(nil)
- // caching_sha2_password will maintain an in-memory hash of `user`@`host` => SHA256(SHA256(PASSWORD))
- c.serverConf.cacheShaPassword.Store(fmt.Sprintf("%s@%s", c.user, c.Conn.LocalAddr()), m2)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/caching_sha2_cache_test.go b/vendor/github.com/siddontang/go-mysql/server/caching_sha2_cache_test.go
deleted file mode 100644
index a8139eb..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/caching_sha2_cache_test.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package server
-
-import (
- "database/sql"
- "fmt"
- "net"
- "strings"
- "sync"
- "testing"
- "time"
-
- _ "github.com/go-sql-driver/mysql"
- "github.com/juju/errors"
- . "github.com/pingcap/check"
- "github.com/siddontang/go-log/log"
- "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/test_util/test_keys"
-)
-
-var delay = 50
-
-// test caching for 'caching_sha2_password'
-// NOTE the idea here is to plugin a throttled credential provider so that the first connection (cache miss) will take longer time
-// than the second connection (cache hit). Remember to set the password for MySQL user otherwise it won't cache empty password.
-func TestCachingSha2Cache(t *testing.T) {
- log.SetLevel(log.LevelDebug)
-
- remoteProvider := &RemoteThrottleProvider{NewInMemoryProvider(), delay + 50}
- remoteProvider.AddUser(*testUser, *testPassword)
- cacheServer := NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_CACHING_SHA2_PASSWORD, test_keys.PubPem, tlsConf)
-
- // no TLS
- Suite(&cacheTestSuite{
- server: cacheServer,
- credProvider: remoteProvider,
- tlsPara: "false",
- })
-
- TestingT(t)
-}
-
-func TestCachingSha2CacheTLS(t *testing.T) {
- log.SetLevel(log.LevelDebug)
-
- remoteProvider := &RemoteThrottleProvider{NewInMemoryProvider(), delay + 50}
- remoteProvider.AddUser(*testUser, *testPassword)
- cacheServer := NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_CACHING_SHA2_PASSWORD, test_keys.PubPem, tlsConf)
-
- // TLS
- Suite(&cacheTestSuite{
- server: cacheServer,
- credProvider: remoteProvider,
- tlsPara: "skip-verify",
- })
-
- TestingT(t)
-}
-
-type RemoteThrottleProvider struct {
- *InMemoryProvider
- delay int // in milliseconds
-}
-
-func (m *RemoteThrottleProvider) GetCredential(username string) (password string, found bool, err error) {
- time.Sleep(time.Millisecond * time.Duration(m.delay))
- return m.InMemoryProvider.GetCredential(username)
-}
-
-type cacheTestSuite struct {
- server *Server
- credProvider CredentialProvider
- tlsPara string
-
- db *sql.DB
-
- l net.Listener
-}
-
-func (s *cacheTestSuite) SetUpSuite(c *C) {
- var err error
-
- s.l, err = net.Listen("tcp", *testAddr)
- c.Assert(err, IsNil)
-
- go s.onAccept(c)
-
- time.Sleep(30 * time.Millisecond)
-}
-
-func (s *cacheTestSuite) TearDownSuite(c *C) {
- if s.l != nil {
- s.l.Close()
- }
-}
-
-func (s *cacheTestSuite) onAccept(c *C) {
- for {
- conn, err := s.l.Accept()
- if err != nil {
- return
- }
-
- go s.onConn(conn, c)
- }
-}
-
-func (s *cacheTestSuite) onConn(conn net.Conn, c *C) {
- //co, err := NewConn(conn, *testUser, *testPassword, &testHandler{s})
- co, err := NewCustomizedConn(conn, s.server, s.credProvider, &testCacheHandler{s})
- c.Assert(err, IsNil)
- for {
- err = co.HandleCommand()
- if err != nil {
- return
- }
- }
-}
-
-func (s *cacheTestSuite) runSelect(c *C) {
- var a int64
- var b string
-
- err := s.db.QueryRow("SELECT a, b FROM tbl WHERE id=1").Scan(&a, &b)
- c.Assert(err, IsNil)
- c.Assert(a, Equals, int64(1))
- c.Assert(b, Equals, "hello world")
-}
-
-func (s *cacheTestSuite) TestCache(c *C) {
- // first connection
- t1 := time.Now()
- var err error
- s.db, err = sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?tls=%s", *testUser, *testPassword, *testAddr, *testDB, s.tlsPara))
- c.Assert(err, IsNil)
- s.db.SetMaxIdleConns(4)
- s.runSelect(c)
- t2 := time.Now()
-
- d1 := int(t2.Sub(t1).Nanoseconds() / 1e6)
- //log.Debugf("first connection took %d milliseconds", d1)
-
- c.Assert(d1, GreaterEqual, delay)
-
- if s.db != nil {
- s.db.Close()
- }
-
- // second connection
- t3 := time.Now()
- s.db, err = sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?tls=%s", *testUser, *testPassword, *testAddr, *testDB, s.tlsPara))
- c.Assert(err, IsNil)
- s.db.SetMaxIdleConns(4)
- s.runSelect(c)
- t4 := time.Now()
-
- d2 := int(t4.Sub(t3).Nanoseconds() / 1e6)
- //log.Debugf("second connection took %d milliseconds", d2)
-
- c.Assert(d2, Less, delay)
- if s.db != nil {
- s.db.Close()
- }
-
- s.server.cacheShaPassword = &sync.Map{}
-}
-
-type testCacheHandler struct {
- s *cacheTestSuite
-}
-
-func (h *testCacheHandler) UseDB(dbName string) error {
- return nil
-}
-
-func (h *testCacheHandler) handleQuery(query string, binary bool) (*mysql.Result, error) {
- ss := strings.Split(query, " ")
- switch strings.ToLower(ss[0]) {
- case "select":
- var r *mysql.Resultset
- var err error
- //for handle go mysql driver select @@max_allowed_packet
- if strings.Contains(strings.ToLower(query), "max_allowed_packet") {
- r, err = mysql.BuildSimpleResultset([]string{"@@max_allowed_packet"}, [][]interface{}{
- {mysql.MaxPayloadLen},
- }, binary)
- } else {
- r, err = mysql.BuildSimpleResultset([]string{"a", "b"}, [][]interface{}{
- {1, "hello world"},
- }, binary)
- }
-
- if err != nil {
- return nil, errors.Trace(err)
- } else {
- return &mysql.Result{0, 0, 0, r}, nil
- }
- case "insert":
- return &mysql.Result{0, 1, 0, nil}, nil
- case "delete":
- return &mysql.Result{0, 0, 1, nil}, nil
- case "update":
- return &mysql.Result{0, 0, 1, nil}, nil
- case "replace":
- return &mysql.Result{0, 0, 1, nil}, nil
- default:
- return nil, fmt.Errorf("invalid query %s", query)
- }
-
- return nil, nil
-}
-
-func (h *testCacheHandler) HandleQuery(query string) (*mysql.Result, error) {
- return h.handleQuery(query, false)
-}
-
-func (h *testCacheHandler) HandleFieldList(table string, fieldWildcard string) ([]*mysql.Field, error) {
- return nil, nil
-}
-func (h *testCacheHandler) HandleStmtPrepare(sql string) (params int, columns int, ctx interface{}, err error) {
- return 0, 0, nil, nil
-}
-
-func (h *testCacheHandler) HandleStmtClose(context interface{}) error {
- return nil
-}
-
-func (h *testCacheHandler) HandleStmtExecute(ctx interface{}, query string, args []interface{}) (*mysql.Result, error) {
- return h.handleQuery(query, true)
-}
-
-func (h *testCacheHandler) HandleOtherCommand(cmd byte, data []byte) error {
- return mysql.NewError(mysql.ER_UNKNOWN_ERROR, fmt.Sprintf("command %d is not supported now", cmd))
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/command.go b/vendor/github.com/siddontang/go-mysql/server/command.go
deleted file mode 100644
index 6c8d13a..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/command.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package server
-
-import (
- "bytes"
- "fmt"
-
- . "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go/hack"
-)
-
-type Handler interface {
- //handle COM_INIT_DB command, you can check whether the dbName is valid, or other.
- UseDB(dbName string) error
- //handle COM_QUERY command, like SELECT, INSERT, UPDATE, etc...
- //If Result has a Resultset (SELECT, SHOW, etc...), we will send this as the response, otherwise, we will send Result
- HandleQuery(query string) (*Result, error)
- //handle COM_FILED_LIST command
- HandleFieldList(table string, fieldWildcard string) ([]*Field, error)
- //handle COM_STMT_PREPARE, params is the param number for this statement, columns is the column number
- //context will be used later for statement execute
- HandleStmtPrepare(query string) (params int, columns int, context interface{}, err error)
- //handle COM_STMT_EXECUTE, context is the previous one set in prepare
- //query is the statement prepare query, and args is the params for this statement
- HandleStmtExecute(context interface{}, query string, args []interface{}) (*Result, error)
- //handle COM_STMT_CLOSE, context is the previous one set in prepare
- //this handler has no response
- HandleStmtClose(context interface{}) error
- //handle any other command that is not currently handled by the library,
- //default implementation for this method will return an ER_UNKNOWN_ERROR
- HandleOtherCommand(cmd byte, data []byte) error
-}
-
-func (c *Conn) HandleCommand() error {
- if c.Conn == nil {
- return fmt.Errorf("connection closed")
- }
-
- data, err := c.ReadPacket()
- if err != nil {
- c.Close()
- c.Conn = nil
- return err
- }
-
- v := c.dispatch(data)
-
- err = c.writeValue(v)
-
- if c.Conn != nil {
- c.ResetSequence()
- }
-
- if err != nil {
- c.Close()
- c.Conn = nil
- }
- return err
-}
-
-func (c *Conn) dispatch(data []byte) interface{} {
- cmd := data[0]
- data = data[1:]
-
- switch cmd {
- case COM_QUIT:
- c.Close()
- c.Conn = nil
- return noResponse{}
- case COM_QUERY:
- if r, err := c.h.HandleQuery(hack.String(data)); err != nil {
- return err
- } else {
- return r
- }
- case COM_PING:
- return nil
- case COM_INIT_DB:
- if err := c.h.UseDB(hack.String(data)); err != nil {
- return err
- } else {
- return nil
- }
- case COM_FIELD_LIST:
- index := bytes.IndexByte(data, 0x00)
- table := hack.String(data[0:index])
- wildcard := hack.String(data[index+1:])
-
- if fs, err := c.h.HandleFieldList(table, wildcard); err != nil {
- return err
- } else {
- return fs
- }
- case COM_STMT_PREPARE:
- c.stmtID++
- st := new(Stmt)
- st.ID = c.stmtID
- st.Query = hack.String(data)
- var err error
- if st.Params, st.Columns, st.Context, err = c.h.HandleStmtPrepare(st.Query); err != nil {
- return err
- } else {
- st.ResetParams()
- c.stmts[c.stmtID] = st
- return st
- }
- case COM_STMT_EXECUTE:
- if r, err := c.handleStmtExecute(data); err != nil {
- return err
- } else {
- return r
- }
- case COM_STMT_CLOSE:
- c.handleStmtClose(data)
- return noResponse{}
- case COM_STMT_SEND_LONG_DATA:
- c.handleStmtSendLongData(data)
- return noResponse{}
- case COM_STMT_RESET:
- if r, err := c.handleStmtReset(data); err != nil {
- return err
- } else {
- return r
- }
- default:
- return c.h.HandleOtherCommand(cmd, data)
- }
-
- return fmt.Errorf("command %d is not handled correctly", cmd)
-}
-
-type EmptyHandler struct {
-}
-
-func (h EmptyHandler) UseDB(dbName string) error {
- return nil
-}
-func (h EmptyHandler) HandleQuery(query string) (*Result, error) {
- return nil, fmt.Errorf("not supported now")
-}
-
-func (h EmptyHandler) HandleFieldList(table string, fieldWildcard string) ([]*Field, error) {
- return nil, fmt.Errorf("not supported now")
-}
-func (h EmptyHandler) HandleStmtPrepare(query string) (int, int, interface{}, error) {
- return 0, 0, nil, fmt.Errorf("not supported now")
-}
-func (h EmptyHandler) HandleStmtExecute(context interface{}, query string, args []interface{}) (*Result, error) {
- return nil, fmt.Errorf("not supported now")
-}
-
-func (h EmptyHandler) HandleStmtClose(context interface{}) error {
- return nil
-}
-
-func (h EmptyHandler) HandleOtherCommand(cmd byte, data []byte) error {
- return NewError(
- ER_UNKNOWN_ERROR,
- fmt.Sprintf("command %d is not supported now", cmd),
- )
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/command_test.go b/vendor/github.com/siddontang/go-mysql/server/command_test.go
deleted file mode 100644
index 34b034e..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/command_test.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package server
-
-// Ensure EmptyHandler implements Handler interface or cause compile time error
-var _ Handler = EmptyHandler{}
diff --git a/vendor/github.com/siddontang/go-mysql/server/conn.go b/vendor/github.com/siddontang/go-mysql/server/conn.go
deleted file mode 100644
index a279b93..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/conn.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package server
-
-import (
- "net"
- "sync/atomic"
-
- . "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/packet"
- "github.com/siddontang/go/sync2"
-)
-
-/*
- Conn acts like a MySQL server connection, you can use MySQL client to communicate with it.
-*/
-type Conn struct {
- *packet.Conn
-
- serverConf *Server
- capability uint32
- authPluginName string
- connectionID uint32
- status uint16
- salt []byte // should be 8 + 12 for auth-plugin-data-part-1 and auth-plugin-data-part-2
-
- credentialProvider CredentialProvider
- user string
- password string
- cachingSha2FullAuth bool
-
- h Handler
-
- stmts map[uint32]*Stmt
- stmtID uint32
-
- closed sync2.AtomicBool
-}
-
-var baseConnID uint32 = 10000
-
-// create connection with default server settings
-func NewConn(conn net.Conn, user string, password string, h Handler) (*Conn, error) {
- p := NewInMemoryProvider()
- p.AddUser(user, password)
- salt, _ := RandomBuf(20)
- c := &Conn{
- Conn: packet.NewConn(conn),
- serverConf: defaultServer,
- credentialProvider: p,
- h: h,
- connectionID: atomic.AddUint32(&baseConnID, 1),
- stmts: make(map[uint32]*Stmt),
- salt: salt,
- }
- c.closed.Set(false)
-
- if err := c.handshake(); err != nil {
- c.Close()
- return nil, err
- }
-
- return c, nil
-}
-
-// create connection with customized server settings
-func NewCustomizedConn(conn net.Conn, serverConf *Server, p CredentialProvider, h Handler) (*Conn, error) {
- salt, _ := RandomBuf(20)
- c := &Conn{
- Conn: packet.NewConn(conn),
- serverConf: serverConf,
- credentialProvider: p,
- h: h,
- connectionID: atomic.AddUint32(&baseConnID, 1),
- stmts: make(map[uint32]*Stmt),
- salt: salt,
- }
- c.closed.Set(false)
-
- if err := c.handshake(); err != nil {
- c.Close()
- return nil, err
- }
-
- return c, nil
-}
-
-func (c *Conn) handshake() error {
- if err := c.writeInitialHandshake(); err != nil {
- return err
- }
-
- if err := c.readHandshakeResponse(); err != nil {
- if err == ErrAccessDenied {
- err = NewDefaultError(ER_ACCESS_DENIED_ERROR, c.user, c.LocalAddr().String(), "Yes")
- }
- c.writeError(err)
- return err
- }
-
- if err := c.writeOK(nil); err != nil {
- return err
- }
-
- c.ResetSequence()
-
- return nil
-}
-
-func (c *Conn) Close() {
- c.closed.Set(true)
- c.Conn.Close()
-}
-
-func (c *Conn) Closed() bool {
- return c.closed.Get()
-}
-
-func (c *Conn) GetUser() string {
- return c.user
-}
-
-func (c *Conn) ConnectionID() uint32 {
- return c.connectionID
-}
-
-func (c *Conn) IsAutoCommit() bool {
- return c.status&SERVER_STATUS_AUTOCOMMIT > 0
-}
-
-func (c *Conn) IsInTransaction() bool {
- return c.status&SERVER_STATUS_IN_TRANS > 0
-}
-
-func (c *Conn) SetInTransaction() {
- c.status |= SERVER_STATUS_IN_TRANS
-}
-
-func (c *Conn) ClearInTransaction() {
- c.status &= ^SERVER_STATUS_IN_TRANS
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/credential_provider.go b/vendor/github.com/siddontang/go-mysql/server/credential_provider.go
deleted file mode 100644
index 3d44eb0..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/credential_provider.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package server
-
-import "sync"
-
-// interface for user credential provider
-// hint: can be extended for more functionality
-// =================================IMPORTANT NOTE===============================
-// if the password in a third-party credential provider could be updated at runtime, we have to invalidate the caching
-// for 'caching_sha2_password' by calling 'func (s *Server)InvalidateCache(string, string)'.
-type CredentialProvider interface {
- // check if the user exists
- CheckUsername(username string) (bool, error)
- // get user credential
- GetCredential(username string) (password string, found bool, err error)
-}
-
-func NewInMemoryProvider() *InMemoryProvider {
- return &InMemoryProvider{
- userPool: sync.Map{},
- }
-}
-
-// implements a in memory credential provider
-type InMemoryProvider struct {
- userPool sync.Map // username -> password
-}
-
-func (m *InMemoryProvider) CheckUsername(username string) (found bool, err error) {
- _, ok := m.userPool.Load(username)
- return ok, nil
-}
-
-func (m *InMemoryProvider) GetCredential(username string) (password string, found bool, err error) {
- v, ok := m.userPool.Load(username)
- if !ok {
- return "", false, nil
- }
- return v.(string), true, nil
-}
-
-func (m *InMemoryProvider) AddUser(username, password string) {
- m.userPool.Store(username, password)
-}
-
-type Provider InMemoryProvider
diff --git a/vendor/github.com/siddontang/go-mysql/server/example/server_example.go b/vendor/github.com/siddontang/go-mysql/server/example/server_example.go
deleted file mode 100644
index 1efa1a3..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/example/server_example.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package main
-
-import (
- "net"
-
- "github.com/siddontang/go-log/log"
- "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/server"
- "github.com/siddontang/go-mysql/test_util/test_keys"
-
- "crypto/tls"
- "time"
-)
-
-type RemoteThrottleProvider struct {
- *server.InMemoryProvider
- delay int // in milliseconds
-}
-
-func (m *RemoteThrottleProvider) GetCredential(username string) (password string, found bool, err error) {
- time.Sleep(time.Millisecond * time.Duration(m.delay))
- return m.InMemoryProvider.GetCredential(username)
-}
-
-func main() {
- l, _ := net.Listen("tcp", "127.0.0.1:3306")
- // user either the in-memory credential provider or the remote credential provider (you can implement your own)
- //inMemProvider := server.NewInMemoryProvider()
- //inMemProvider.AddUser("root", "123")
- remoteProvider := &RemoteThrottleProvider{server.NewInMemoryProvider(), 10 + 50}
- remoteProvider.AddUser("root", "123")
- var tlsConf = server.NewServerTLSConfig(test_keys.CaPem, test_keys.CertPem, test_keys.KeyPem, tls.VerifyClientCertIfGiven)
- for {
- c, _ := l.Accept()
- go func() {
- // Create a connection with user root and an empty password.
- // You can use your own handler to handle command here.
- svr := server.NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_CACHING_SHA2_PASSWORD, test_keys.PubPem, tlsConf)
- conn, err := server.NewCustomizedConn(c, svr, remoteProvider, server.EmptyHandler{})
-
- if err != nil {
- log.Errorf("Connection error: %v", err)
- return
- }
-
- for {
- conn.HandleCommand()
- }
- }()
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/handshake_resp.go b/vendor/github.com/siddontang/go-mysql/server/handshake_resp.go
deleted file mode 100644
index 79af6f2..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/handshake_resp.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package server
-
-import (
- "bytes"
- "crypto/tls"
- "encoding/binary"
-
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-func (c *Conn) readHandshakeResponse() error {
- data, pos, err := c.readFirstPart()
- if err != nil {
- return err
- }
- if pos, err = c.readUserName(data, pos); err != nil {
- return err
- }
- authData, authLen, pos, err := c.readAuthData(data, pos)
- if err != nil {
- return err
- }
-
- pos += authLen
-
- if pos, err = c.readDb(data, pos); err != nil {
- return err
- }
-
- pos = c.readPluginName(data, pos)
-
- cont, err := c.handleAuthMatch(authData, pos)
- if err != nil {
- return err
- }
- if !cont {
- return nil
- }
-
- // ignore connect attrs for now, the proxy does not support passing attrs to actual MySQL server
-
- // try to authenticate the client
- return c.compareAuthData(c.authPluginName, authData)
-}
-
-func (c *Conn) readFirstPart() ([]byte, int, error) {
- data, err := c.ReadPacket()
- if err != nil {
- return nil, 0, err
- }
-
- pos := 0
-
- // check CLIENT_PROTOCOL_41
- if uint32(binary.LittleEndian.Uint16(data[:2]))&CLIENT_PROTOCOL_41 == 0 {
- return nil, 0, errors.New("CLIENT_PROTOCOL_41 compatible client is required")
- }
-
- //capability
- c.capability = binary.LittleEndian.Uint32(data[:4])
- if c.capability&CLIENT_SECURE_CONNECTION == 0 {
- return nil, 0, errors.New("CLIENT_SECURE_CONNECTION compatible client is required")
- }
- pos += 4
-
- //skip max packet size
- pos += 4
-
- //charset, skip, if you want to use another charset, use set names
- //c.collation = CollationId(data[pos])
- pos++
-
- //skip reserved 23[00]
- pos += 23
-
- // is this a SSLRequest packet?
- if len(data) == (4 + 4 + 1 + 23) {
- if c.serverConf.capability&CLIENT_SSL == 0 {
- return nil, 0, errors.Errorf("The host '%s' does not support SSL connections", c.RemoteAddr().String())
- }
- // switch to TLS
- tlsConn := tls.Server(c.Conn.Conn, c.serverConf.tlsConfig)
- if err := tlsConn.Handshake(); err != nil {
- return nil, 0, err
- }
- c.Conn.Conn = tlsConn
-
- // mysql handshake again
- return c.readFirstPart()
- }
- return data, pos, nil
-}
-
-func (c *Conn) readUserName(data []byte, pos int) (int, error) {
- //user name
- user := string(data[pos : pos+bytes.IndexByte(data[pos:], 0x00)])
- pos += len(user) + 1
- c.user = user
- return pos, nil
-}
-
-func (c *Conn) readDb(data []byte, pos int) (int, error) {
- if c.capability&CLIENT_CONNECT_WITH_DB != 0 {
- if len(data[pos:]) == 0 {
- return pos, nil
- }
-
- db := string(data[pos : pos+bytes.IndexByte(data[pos:], 0x00)])
- pos += len(db) + 1
-
- if err := c.h.UseDB(db); err != nil {
- return 0, err
- }
- }
- return pos, nil
-}
-
-func (c *Conn) readPluginName(data []byte, pos int) int {
- if c.capability&CLIENT_PLUGIN_AUTH != 0 {
- c.authPluginName = string(data[pos : pos+bytes.IndexByte(data[pos:], 0x00)])
- pos += len(c.authPluginName)
- } else {
- // The method used is Native Authentication if both CLIENT_PROTOCOL_41 and CLIENT_SECURE_CONNECTION are set,
- // but CLIENT_PLUGIN_AUTH is not set, so we fallback to 'mysql_native_password'
- c.authPluginName = AUTH_NATIVE_PASSWORD
- }
- return pos
-}
-
-func (c *Conn) readAuthData(data []byte, pos int) ([]byte, int, int, error) {
- // length encoded data
- var auth []byte
- var authLen int
- if c.capability&CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA != 0 {
- authData, isNULL, readBytes, err := LengthEncodedString(data[pos:])
- if err != nil {
- return nil, 0, 0, err
- }
- if isNULL {
- // no auth length and no auth data, just \NUL, considered invalid auth data, and reject connection as MySQL does
- return nil, 0, 0, NewDefaultError(ER_ACCESS_DENIED_ERROR, c.LocalAddr().String(), c.user, "Yes")
- }
- auth = authData
- authLen = readBytes
- } else {
- //auth length and auth
- authLen = int(data[pos])
- pos++
- auth = data[pos : pos+authLen]
- if authLen == 0 {
- // skip the next \NUL in case the password is empty
- pos++
- }
- }
- return auth, authLen, pos, nil
-}
-
-// Public Key Retrieval
-// See: https://dev.mysql.com/doc/internals/en/public-key-retrieval.html
-func (c *Conn) handlePublicKeyRetrieval(authData []byte) (bool, error) {
- // if the client use 'sha256_password' auth method, and request for a public key
- // we send back a keyfile with Protocol::AuthMoreData
- if c.authPluginName == AUTH_SHA256_PASSWORD && len(authData) == 1 && authData[0] == 0x01 {
- if c.serverConf.capability&CLIENT_SSL == 0 {
- return false, errors.New("server does not support SSL: CLIENT_SSL not enabled")
- }
- if err := c.writeAuthMoreDataPubkey(); err != nil {
- return false, err
- }
-
- return false, c.handleAuthSwitchResponse()
- }
- return true, nil
-}
-
-func (c *Conn) handleAuthMatch(authData []byte, pos int) (bool, error) {
- // if the client responds the handshake with a different auth method, the server will send the AuthSwitchRequest packet
- // to the client to ask the client to switch.
-
- if c.authPluginName != c.serverConf.defaultAuthMethod {
- if err := c.writeAuthSwitchRequest(c.serverConf.defaultAuthMethod); err != nil {
- return false, err
- }
- c.authPluginName = c.serverConf.defaultAuthMethod
- // handle AuthSwitchResponse
- return false, c.handleAuthSwitchResponse()
- }
- return true, nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/initial_handshake.go b/vendor/github.com/siddontang/go-mysql/server/initial_handshake.go
deleted file mode 100644
index 312ac2b..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/initial_handshake.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package server
-
-// see: https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_connection_phase_packets_protocol_handshake_v10.html
-func (c *Conn) writeInitialHandshake() error {
- data := make([]byte, 4)
-
- //min version 10
- data = append(data, 10)
-
- //server version[00]
- data = append(data, c.serverConf.serverVersion...)
- data = append(data, 0x00)
-
- //connection id
- data = append(data, byte(c.connectionID), byte(c.connectionID>>8), byte(c.connectionID>>16), byte(c.connectionID>>24))
-
- //auth-plugin-data-part-1
- data = append(data, c.salt[0:8]...)
-
- //filter 0x00 byte, terminating the first part of a scramble
- data = append(data, 0x00)
-
- defaultFlag := c.serverConf.capability
- //capability flag lower 2 bytes, using default capability here
- data = append(data, byte(defaultFlag), byte(defaultFlag>>8))
-
- //charset
- data = append(data, c.serverConf.collationId)
-
- //status
- data = append(data, byte(c.status), byte(c.status>>8))
-
- //capability flag upper 2 bytes, using default capability here
- data = append(data, byte(defaultFlag>>16), byte(defaultFlag>>24))
-
- // server supports CLIENT_PLUGIN_AUTH and CLIENT_SECURE_CONNECTION
- data = append(data, byte(8+12+1))
-
- //reserved 10 [00]
- data = append(data, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
-
- //auth-plugin-data-part-2
- data = append(data, c.salt[8:]...)
- // second part of the password cipher [mininum 13 bytes],
- // where len=MAX(13, length of auth-plugin-data - 8)
- // add \NUL to terminate the string
- data = append(data, 0x00)
-
- // auth plugin name
- data = append(data, c.serverConf.defaultAuthMethod...)
-
- // EOF if MySQL version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
- // \NUL otherwise, so we use \NUL
- data = append(data, 0)
-
- return c.WritePacket(data)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/resp.go b/vendor/github.com/siddontang/go-mysql/server/resp.go
deleted file mode 100644
index db86323..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/resp.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package server
-
-import (
- "fmt"
-
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-func (c *Conn) writeOK(r *Result) error {
- if r == nil {
- r = &Result{}
- }
-
- r.Status |= c.status
-
- data := make([]byte, 4, 32)
-
- data = append(data, OK_HEADER)
-
- data = append(data, PutLengthEncodedInt(r.AffectedRows)...)
- data = append(data, PutLengthEncodedInt(r.InsertId)...)
-
- if c.capability&CLIENT_PROTOCOL_41 > 0 {
- data = append(data, byte(r.Status), byte(r.Status>>8))
- data = append(data, 0, 0)
- }
-
- return c.WritePacket(data)
-}
-
-func (c *Conn) writeError(e error) error {
- var m *MyError
- var ok bool
- if m, ok = e.(*MyError); !ok {
- m = NewError(ER_UNKNOWN_ERROR, e.Error())
- }
-
- data := make([]byte, 4, 16+len(m.Message))
-
- data = append(data, ERR_HEADER)
- data = append(data, byte(m.Code), byte(m.Code>>8))
-
- if c.capability&CLIENT_PROTOCOL_41 > 0 {
- data = append(data, '#')
- data = append(data, m.State...)
- }
-
- data = append(data, m.Message...)
-
- return c.WritePacket(data)
-}
-
-func (c *Conn) writeEOF() error {
- data := make([]byte, 4, 9)
-
- data = append(data, EOF_HEADER)
- if c.capability&CLIENT_PROTOCOL_41 > 0 {
- data = append(data, 0, 0)
- data = append(data, byte(c.status), byte(c.status>>8))
- }
-
- return c.WritePacket(data)
-}
-
-// see: https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_connection_phase_packets_protocol_auth_switch_request.html
-func (c *Conn) writeAuthSwitchRequest(newAuthPluginName string) error {
- data := make([]byte, 4)
- data = append(data, EOF_HEADER)
- data = append(data, []byte(newAuthPluginName)...)
- data = append(data, 0x00)
- rnd, err := RandomBuf(20)
- if err != nil {
- return err
- }
- // new auth data
- c.salt = rnd
- data = append(data, c.salt...)
- // the online doc states it's a string.EOF, however, the actual MySQL server add a \NUL to the end, without it, the
- // official MySQL client will fail.
- data = append(data, 0x00)
- return c.WritePacket(data)
-}
-
-// see: https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_connection_phase_packets_protocol_auth_switch_response.html
-func (c *Conn) readAuthSwitchRequestResponse() ([]byte, error) {
- data, err := c.ReadPacket()
- if err != nil {
- return nil, err
- }
- if len(data) == 1 && data[0] == 0x00 {
- // \NUL
- return make([]byte, 0), nil
- }
- return data, nil
-}
-
-func (c *Conn) writeAuthMoreDataPubkey() error {
- data := make([]byte, 4)
- data = append(data, MORE_DATE_HEADER)
- data = append(data, c.serverConf.pubKey...)
- return c.WritePacket(data)
-}
-
-func (c *Conn) writeAuthMoreDataFullAuth() error {
- data := make([]byte, 4)
- data = append(data, MORE_DATE_HEADER)
- data = append(data, CACHE_SHA2_FULL_AUTH)
- return c.WritePacket(data)
-}
-
-func (c *Conn) writeAuthMoreDataFastAuth() error {
- data := make([]byte, 4)
- data = append(data, MORE_DATE_HEADER)
- data = append(data, CACHE_SHA2_FAST_AUTH)
- return c.WritePacket(data)
-}
-
-func (c *Conn) writeResultset(r *Resultset) error {
- columnLen := PutLengthEncodedInt(uint64(len(r.Fields)))
-
- data := make([]byte, 4, 1024)
-
- data = append(data, columnLen...)
- if err := c.WritePacket(data); err != nil {
- return err
- }
-
- for _, v := range r.Fields {
- data = data[0:4]
- data = append(data, v.Dump()...)
- if err := c.WritePacket(data); err != nil {
- return err
- }
- }
-
- if err := c.writeEOF(); err != nil {
- return err
- }
-
- for _, v := range r.RowDatas {
- data = data[0:4]
- data = append(data, v...)
- if err := c.WritePacket(data); err != nil {
- return err
- }
- }
-
- if err := c.writeEOF(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *Conn) writeFieldList(fs []*Field) error {
- data := make([]byte, 4, 1024)
-
- for _, v := range fs {
- data = data[0:4]
- data = append(data, v.Dump()...)
- if err := c.WritePacket(data); err != nil {
- return err
- }
- }
-
- if err := c.writeEOF(); err != nil {
- return err
- }
- return nil
-}
-
-type noResponse struct{}
-
-func (c *Conn) writeValue(value interface{}) error {
- switch v := value.(type) {
- case noResponse:
- return nil
- case error:
- return c.writeError(v)
- case nil:
- return c.writeOK(nil)
- case *Result:
- if v != nil && v.Resultset != nil {
- return c.writeResultset(v.Resultset)
- } else {
- return c.writeOK(v)
- }
- case []*Field:
- return c.writeFieldList(v)
- case *Stmt:
- return c.writePrepare(v)
- default:
- return fmt.Errorf("invalid response type %T", value)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/server_conf.go b/vendor/github.com/siddontang/go-mysql/server/server_conf.go
deleted file mode 100644
index 353595c..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/server_conf.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package server
-
-import (
- "crypto/tls"
- "fmt"
- "sync"
-
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-var defaultServer = NewDefaultServer()
-
-// Defines a basic MySQL server with configs.
-//
-// We do not aim at implementing the whole MySQL connection suite to have the best compatibilities for the clients.
-// The MySQL server can be configured to switch auth methods covering 'mysql_old_password', 'mysql_native_password',
-// 'mysql_clear_password', 'authentication_windows_client', 'sha256_password', 'caching_sha2_password', etc.
-//
-// However, since some old auth methods are considered broken with security issues. MySQL major versions like 5.7 and 8.0 default to
-// 'mysql_native_password' or 'caching_sha2_password', and most MySQL clients should have already supported at least one of the three auth
-// methods 'mysql_native_password', 'caching_sha2_password', and 'sha256_password'. Thus here we will only support these three
-// auth methods, and use 'mysql_native_password' as default for maximum compatibility with the clients and leave the other two as
-// config options.
-//
-// The MySQL doc states that 'mysql_old_password' will be used if 'CLIENT_PROTOCOL_41' or 'CLIENT_SECURE_CONNECTION' flag is not set.
-// We choose to drop the support for insecure 'mysql_old_password' auth method and require client capability 'CLIENT_PROTOCOL_41' and 'CLIENT_SECURE_CONNECTION'
-// are set. Besides, if 'CLIENT_PLUGIN_AUTH' is not set, we fallback to 'mysql_native_password' auth method.
-type Server struct {
- serverVersion string // e.g. "8.0.12"
- protocolVersion int // minimal 10
- capability uint32 // server capability flag
- collationId uint8
- defaultAuthMethod string // default authentication method, 'mysql_native_password'
- pubKey []byte
- tlsConfig *tls.Config
- cacheShaPassword *sync.Map // 'user@host' -> SHA256(SHA256(PASSWORD))
-}
-
-// New mysql server with default settings.
-//
-// NOTES:
-// TLS support will be enabled by default with auto-generated CA and server certificates (however, you can still use
-// non-TLS connection). By default, it will verify the client certificate if present. You can enable TLS support on
-// the client side without providing a client-side certificate. So only when you need the server to verify client
-// identity for maximum security, you need to set a signed certificate for the client.
-func NewDefaultServer() *Server {
- caPem, caKey := generateCA()
- certPem, keyPem := generateAndSignRSACerts(caPem, caKey)
- tlsConf := NewServerTLSConfig(caPem, certPem, keyPem, tls.VerifyClientCertIfGiven)
- return &Server{
- serverVersion: "5.7.0",
- protocolVersion: 10,
- capability: CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG | CLIENT_CONNECT_WITH_DB | CLIENT_PROTOCOL_41 |
- CLIENT_TRANSACTIONS | CLIENT_SECURE_CONNECTION | CLIENT_PLUGIN_AUTH | CLIENT_SSL | CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA,
- collationId: DEFAULT_COLLATION_ID,
- defaultAuthMethod: AUTH_NATIVE_PASSWORD,
- pubKey: getPublicKeyFromCert(certPem),
- tlsConfig: tlsConf,
- cacheShaPassword: new(sync.Map),
- }
-}
-
-// New mysql server with customized settings.
-//
-// NOTES:
-// You can control the authentication methods and TLS settings here.
-// For auth method, you can specify one of the supported methods 'mysql_native_password', 'caching_sha2_password', and 'sha256_password'.
-// The specified auth method will be enforced by the server in the connection phase. That means, client will be asked to switch auth method
-// if the supplied auth method is different from the server default.
-// And for TLS support, you can specify self-signed or CA-signed certificates and decide whether the client needs to provide
-// a signed or unsigned certificate to provide different level of security.
-func NewServer(serverVersion string, collationId uint8, defaultAuthMethod string, pubKey []byte, tlsConfig *tls.Config) *Server {
- if !isAuthMethodSupported(defaultAuthMethod) {
- panic(fmt.Sprintf("server authentication method '%s' is not supported", defaultAuthMethod))
- }
-
- //if !isAuthMethodAllowedByServer(defaultAuthMethod, allowedAuthMethods) {
- // panic(fmt.Sprintf("default auth method is not one of the allowed auth methods"))
- //}
- var capFlag = CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG | CLIENT_CONNECT_WITH_DB | CLIENT_PROTOCOL_41 |
- CLIENT_TRANSACTIONS | CLIENT_SECURE_CONNECTION | CLIENT_PLUGIN_AUTH | CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA
- if tlsConfig != nil {
- capFlag |= CLIENT_SSL
- }
- return &Server{
- serverVersion: serverVersion,
- protocolVersion: 10,
- capability: capFlag,
- collationId: collationId,
- defaultAuthMethod: defaultAuthMethod,
- pubKey: pubKey,
- tlsConfig: tlsConfig,
- cacheShaPassword: new(sync.Map),
- }
-}
-
-func isAuthMethodSupported(authMethod string) bool {
- return authMethod == AUTH_NATIVE_PASSWORD || authMethod == AUTH_CACHING_SHA2_PASSWORD || authMethod == AUTH_SHA256_PASSWORD
-}
-
-func (s *Server) InvalidateCache(username string, host string) {
- s.cacheShaPassword.Delete(fmt.Sprintf("%s@%s", username, host))
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/server_test.go b/vendor/github.com/siddontang/go-mysql/server/server_test.go
deleted file mode 100644
index 1f427fd..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/server_test.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package server
-
-import (
- "crypto/tls"
- "database/sql"
- "flag"
- "fmt"
- "net"
- "strings"
- "testing"
- "time"
-
- _ "github.com/go-sql-driver/mysql"
- "github.com/juju/errors"
- . "github.com/pingcap/check"
- "github.com/siddontang/go-log/log"
- "github.com/siddontang/go-mysql/mysql"
- "github.com/siddontang/go-mysql/test_util/test_keys"
-)
-
-var testAddr = flag.String("addr", "127.0.0.1:4000", "MySQL proxy server address")
-var testUser = flag.String("user", "root", "MySQL user")
-var testPassword = flag.String("pass", "123456", "MySQL password")
-var testDB = flag.String("db", "test", "MySQL test database")
-
-var tlsConf = NewServerTLSConfig(test_keys.CaPem, test_keys.CertPem, test_keys.KeyPem, tls.VerifyClientCertIfGiven)
-
-func prepareServerConf() []*Server {
- // add default server without TLS
- var servers = []*Server{
- // with default TLS
- NewDefaultServer(),
- // for key exchange, CLIENT_SSL must be enabled for the server and if the connection is not secured with TLS
- // server permits MYSQL_NATIVE_PASSWORD only
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_NATIVE_PASSWORD, test_keys.PubPem, tlsConf),
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_NATIVE_PASSWORD, test_keys.PubPem, tlsConf),
- // server permits SHA256_PASSWORD only
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_SHA256_PASSWORD, test_keys.PubPem, tlsConf),
- // server permits CACHING_SHA2_PASSWORD only
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_CACHING_SHA2_PASSWORD, test_keys.PubPem, tlsConf),
-
- // test auth switch: server permits SHA256_PASSWORD only but sent different method MYSQL_NATIVE_PASSWORD in handshake response
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_NATIVE_PASSWORD, test_keys.PubPem, tlsConf),
- // test auth switch: server permits CACHING_SHA2_PASSWORD only but sent different method MYSQL_NATIVE_PASSWORD in handshake response
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_NATIVE_PASSWORD, test_keys.PubPem, tlsConf),
- // test auth switch: server permits CACHING_SHA2_PASSWORD only but sent different method SHA256_PASSWORD in handshake response
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_SHA256_PASSWORD, test_keys.PubPem, tlsConf),
- // test auth switch: server permits MYSQL_NATIVE_PASSWORD only but sent different method SHA256_PASSWORD in handshake response
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_SHA256_PASSWORD, test_keys.PubPem, tlsConf),
- // test auth switch: server permits SHA256_PASSWORD only but sent different method CACHING_SHA2_PASSWORD in handshake response
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_CACHING_SHA2_PASSWORD, test_keys.PubPem, tlsConf),
- // test auth switch: server permits MYSQL_NATIVE_PASSWORD only but sent different method CACHING_SHA2_PASSWORD in handshake response
- NewServer("8.0.12", mysql.DEFAULT_COLLATION_ID, mysql.AUTH_CACHING_SHA2_PASSWORD, test_keys.PubPem, tlsConf),
- }
- return servers
-}
-
-func Test(t *testing.T) {
- log.SetLevel(log.LevelDebug)
-
- // general tests
- inMemProvider := NewInMemoryProvider()
- inMemProvider.AddUser(*testUser, *testPassword)
-
- servers := prepareServerConf()
- //no TLS
- for _, svr := range servers {
- Suite(&serverTestSuite{
- server: svr,
- credProvider: inMemProvider,
- tlsPara: "false",
- })
- }
-
- // TLS if server supports
- for _, svr := range servers {
- if svr.tlsConfig != nil {
- Suite(&serverTestSuite{
- server: svr,
- credProvider: inMemProvider,
- tlsPara: "skip-verify",
- })
- }
- }
-
- TestingT(t)
-}
-
-type serverTestSuite struct {
- server *Server
- credProvider CredentialProvider
-
- tlsPara string
-
- db *sql.DB
-
- l net.Listener
-}
-
-func (s *serverTestSuite) SetUpSuite(c *C) {
- var err error
-
- s.l, err = net.Listen("tcp", *testAddr)
- c.Assert(err, IsNil)
-
- go s.onAccept(c)
-
- time.Sleep(20 * time.Millisecond)
-
- s.db, err = sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?tls=%s", *testUser, *testPassword, *testAddr, *testDB, s.tlsPara))
- c.Assert(err, IsNil)
-
- s.db.SetMaxIdleConns(4)
-}
-
-func (s *serverTestSuite) TearDownSuite(c *C) {
- if s.db != nil {
- s.db.Close()
- }
-
- if s.l != nil {
- s.l.Close()
- }
-}
-
-func (s *serverTestSuite) onAccept(c *C) {
- for {
- conn, err := s.l.Accept()
- if err != nil {
- return
- }
-
- go s.onConn(conn, c)
- }
-}
-
-func (s *serverTestSuite) onConn(conn net.Conn, c *C) {
- //co, err := NewConn(conn, *testUser, *testPassword, &testHandler{s})
- co, err := NewCustomizedConn(conn, s.server, s.credProvider, &testHandler{s})
- c.Assert(err, IsNil)
- // set SSL if defined
- for {
- err = co.HandleCommand()
- if err != nil {
- return
- }
- }
-}
-
-func (s *serverTestSuite) TestSelect(c *C) {
- var a int64
- var b string
-
- err := s.db.QueryRow("SELECT a, b FROM tbl WHERE id=1").Scan(&a, &b)
- c.Assert(err, IsNil)
- c.Assert(a, Equals, int64(1))
- c.Assert(b, Equals, "hello world")
-}
-
-func (s *serverTestSuite) TestExec(c *C) {
- r, err := s.db.Exec("INSERT INTO tbl (a, b) values (1, \"hello world\")")
- c.Assert(err, IsNil)
- i, _ := r.LastInsertId()
- c.Assert(i, Equals, int64(1))
-
- r, err = s.db.Exec("REPLACE INTO tbl (a, b) values (1, \"hello world\")")
- c.Assert(err, IsNil)
- i, _ = r.RowsAffected()
- c.Assert(i, Equals, int64(1))
-
- r, err = s.db.Exec("UPDATE tbl SET b = \"abc\" where a = 1")
- c.Assert(err, IsNil)
- i, _ = r.RowsAffected()
- c.Assert(i, Equals, int64(1))
-
- r, err = s.db.Exec("DELETE FROM tbl where a = 1")
- c.Assert(err, IsNil)
- i, _ = r.RowsAffected()
- c.Assert(i, Equals, int64(1))
-}
-
-func (s *serverTestSuite) TestStmtSelect(c *C) {
- var a int64
- var b string
-
- err := s.db.QueryRow("SELECT a, b FROM tbl WHERE id=?", 1).Scan(&a, &b)
- c.Assert(err, IsNil)
- c.Assert(a, Equals, int64(1))
- c.Assert(b, Equals, "hello world")
-}
-
-func (s *serverTestSuite) TestStmtExec(c *C) {
- r, err := s.db.Exec("INSERT INTO tbl (a, b) values (?, ?)", 1, "hello world")
- c.Assert(err, IsNil)
- i, _ := r.LastInsertId()
- c.Assert(i, Equals, int64(1))
-
- r, err = s.db.Exec("REPLACE INTO tbl (a, b) values (?, ?)", 1, "hello world")
- c.Assert(err, IsNil)
- i, _ = r.RowsAffected()
- c.Assert(i, Equals, int64(1))
-
- r, err = s.db.Exec("UPDATE tbl SET b = \"abc\" where a = ?", 1)
- c.Assert(err, IsNil)
- i, _ = r.RowsAffected()
- c.Assert(i, Equals, int64(1))
-
- r, err = s.db.Exec("DELETE FROM tbl where a = ?", 1)
- c.Assert(err, IsNil)
- i, _ = r.RowsAffected()
- c.Assert(i, Equals, int64(1))
-}
-
-type testHandler struct {
- s *serverTestSuite
-}
-
-func (h *testHandler) UseDB(dbName string) error {
- return nil
-}
-
-func (h *testHandler) handleQuery(query string, binary bool) (*mysql.Result, error) {
- ss := strings.Split(query, " ")
- switch strings.ToLower(ss[0]) {
- case "select":
- var r *mysql.Resultset
- var err error
- //for handle go mysql driver select @@max_allowed_packet
- if strings.Contains(strings.ToLower(query), "max_allowed_packet") {
- r, err = mysql.BuildSimpleResultset([]string{"@@max_allowed_packet"}, [][]interface{}{
- {mysql.MaxPayloadLen},
- }, binary)
- } else {
- r, err = mysql.BuildSimpleResultset([]string{"a", "b"}, [][]interface{}{
- {1, "hello world"},
- }, binary)
- }
-
- if err != nil {
- return nil, errors.Trace(err)
- } else {
- return &mysql.Result{0, 0, 0, r}, nil
- }
- case "insert":
- return &mysql.Result{0, 1, 0, nil}, nil
- case "delete":
- return &mysql.Result{0, 0, 1, nil}, nil
- case "update":
- return &mysql.Result{0, 0, 1, nil}, nil
- case "replace":
- return &mysql.Result{0, 0, 1, nil}, nil
- default:
- return nil, fmt.Errorf("invalid query %s", query)
- }
-
- return nil, nil
-}
-
-func (h *testHandler) HandleQuery(query string) (*mysql.Result, error) {
- return h.handleQuery(query, false)
-}
-
-func (h *testHandler) HandleFieldList(table string, fieldWildcard string) ([]*mysql.Field, error) {
- return nil, nil
-}
-func (h *testHandler) HandleStmtPrepare(sql string) (params int, columns int, ctx interface{}, err error) {
- ss := strings.Split(sql, " ")
- switch strings.ToLower(ss[0]) {
- case "select":
- params = 1
- columns = 2
- case "insert":
- params = 2
- columns = 0
- case "replace":
- params = 2
- columns = 0
- case "update":
- params = 1
- columns = 0
- case "delete":
- params = 1
- columns = 0
- default:
- err = fmt.Errorf("invalid prepare %s", sql)
- }
- return params, columns, nil, err
-}
-
-func (h *testHandler) HandleStmtClose(context interface{}) error {
- return nil
-}
-
-func (h *testHandler) HandleStmtExecute(ctx interface{}, query string, args []interface{}) (*mysql.Result, error) {
- return h.handleQuery(query, true)
-}
-
-func (h *testHandler) HandleOtherCommand(cmd byte, data []byte) error {
- return mysql.NewError(mysql.ER_UNKNOWN_ERROR, fmt.Sprintf("command %d is not supported now", cmd))
-}
\ No newline at end of file
diff --git a/vendor/github.com/siddontang/go-mysql/server/ssl.go b/vendor/github.com/siddontang/go-mysql/server/ssl.go
deleted file mode 100644
index 1f8a9ed..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/ssl.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package server
-
-import (
- "crypto/rand"
- "crypto/rsa"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/pem"
- "math/big"
- "time"
-)
-
-// generate TLS config for server side
-// controlling the security level by authType
-func NewServerTLSConfig(caPem, certPem, keyPem []byte, authType tls.ClientAuthType) *tls.Config {
- pool := x509.NewCertPool()
- if !pool.AppendCertsFromPEM(caPem) {
- panic("failed to add ca PEM")
- }
-
- cert, err := tls.X509KeyPair(certPem, keyPem)
- if err != nil {
- panic(err)
- }
-
- config := &tls.Config{
- ClientAuth: authType,
- Certificates: []tls.Certificate{cert},
- ClientCAs: pool,
- }
- return config
-}
-
-// extract RSA public key from certificate
-func getPublicKeyFromCert(certPem []byte) []byte {
- block, _ := pem.Decode(certPem)
- crt, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- panic(err)
- }
- pubKey, err := x509.MarshalPKIXPublicKey(crt.PublicKey.(*rsa.PublicKey))
- if err != nil {
- panic(err)
- }
- return pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: pubKey})
-}
-
-// generate and sign RSA certificates with given CA
-// see: https://fale.io/blog/2017/06/05/create-a-pki-in-golang/
-func generateAndSignRSACerts(caPem, caKey []byte) ([]byte, []byte) {
- // Load CA
- catls, err := tls.X509KeyPair(caPem, caKey)
- if err != nil {
- panic(err)
- }
- ca, err := x509.ParseCertificate(catls.Certificate[0])
- if err != nil {
- panic(err)
- }
-
- // use the CA to sign certificates
- serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
- serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
- if err != nil {
- panic(err)
- }
- cert := &x509.Certificate{
- SerialNumber: serialNumber,
- Subject: pkix.Name{
- Organization: []string{"ORGANIZATION_NAME"},
- Country: []string{"COUNTRY_CODE"},
- Province: []string{"PROVINCE"},
- Locality: []string{"CITY"},
- StreetAddress: []string{"ADDRESS"},
- PostalCode: []string{"POSTAL_CODE"},
- },
- NotBefore: time.Now(),
- NotAfter: time.Now().AddDate(10, 0, 0),
- SubjectKeyId: []byte{1, 2, 3, 4, 6},
- ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
- KeyUsage: x509.KeyUsageDigitalSignature,
- }
- priv, _ := rsa.GenerateKey(rand.Reader, 2048)
-
- // sign the certificate
- cert_b, err := x509.CreateCertificate(rand.Reader, ca, cert, &priv.PublicKey, catls.PrivateKey)
- if err != nil {
- panic(err)
- }
- certPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert_b})
- keyPem := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
-
- return certPem, keyPem
-}
-
-// generate CA in PEM
-// see: https://github.com/golang/go/blob/master/src/crypto/tls/generate_cert.go
-func generateCA() ([]byte, []byte) {
- serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
- serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
- if err != nil {
- panic(err)
- }
- template := &x509.Certificate{
- SerialNumber: serialNumber,
- Subject: pkix.Name{
- Organization: []string{"ORGANIZATION_NAME"},
- Country: []string{"COUNTRY_CODE"},
- Province: []string{"PROVINCE"},
- Locality: []string{"CITY"},
- StreetAddress: []string{"ADDRESS"},
- PostalCode: []string{"POSTAL_CODE"},
- },
- NotBefore: time.Now(),
- NotAfter: time.Now().AddDate(10, 0, 0),
- IsCA: true,
- ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
- KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign | x509.KeyUsageKeyEncipherment,
- BasicConstraintsValid: true,
- }
-
- priv, _ := rsa.GenerateKey(rand.Reader, 2048)
- derBytes, err := x509.CreateCertificate(rand.Reader, template, template, &priv.PublicKey, priv)
- if err != nil {
- panic(err)
- }
-
- caPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
- caKey := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
-
- return caPem, caKey
-}
diff --git a/vendor/github.com/siddontang/go-mysql/server/stmt.go b/vendor/github.com/siddontang/go-mysql/server/stmt.go
deleted file mode 100644
index 9bef23e..0000000
--- a/vendor/github.com/siddontang/go-mysql/server/stmt.go
+++ /dev/null
@@ -1,363 +0,0 @@
-package server
-
-import (
- "encoding/binary"
- "fmt"
- "math"
- "strconv"
-
- "github.com/juju/errors"
- . "github.com/siddontang/go-mysql/mysql"
-)
-
-var paramFieldData []byte
-var columnFieldData []byte
-
-func init() {
- var p = &Field{Name: []byte("?")}
- var c = &Field{}
-
- paramFieldData = p.Dump()
- columnFieldData = c.Dump()
-}
-
-type Stmt struct {
- ID uint32
- Query string
-
- Params int
- Columns int
-
- Args []interface{}
-
- Context interface{}
-}
-
-func (s *Stmt) Rest(params int, columns int, context interface{}) {
- s.Params = params
- s.Columns = columns
- s.Context = context
- s.ResetParams()
-}
-
-func (s *Stmt) ResetParams() {
- s.Args = make([]interface{}, s.Params)
-}
-
-func (c *Conn) writePrepare(s *Stmt) error {
- data := make([]byte, 4, 128)
-
- //status ok
- data = append(data, 0)
- //stmt id
- data = append(data, Uint32ToBytes(s.ID)...)
- //number columns
- data = append(data, Uint16ToBytes(uint16(s.Columns))...)
- //number params
- data = append(data, Uint16ToBytes(uint16(s.Params))...)
- //filter [00]
- data = append(data, 0)
- //warning count
- data = append(data, 0, 0)
-
- if err := c.WritePacket(data); err != nil {
- return err
- }
-
- if s.Params > 0 {
- for i := 0; i < s.Params; i++ {
- data = data[0:4]
- data = append(data, []byte(paramFieldData)...)
-
- if err := c.WritePacket(data); err != nil {
- return errors.Trace(err)
- }
- }
-
- if err := c.writeEOF(); err != nil {
- return err
- }
- }
-
- if s.Columns > 0 {
- for i := 0; i < s.Columns; i++ {
- data = data[0:4]
- data = append(data, []byte(columnFieldData)...)
-
- if err := c.WritePacket(data); err != nil {
- return errors.Trace(err)
- }
- }
-
- if err := c.writeEOF(); err != nil {
- return err
- }
-
- }
- return nil
-}
-
-func (c *Conn) handleStmtExecute(data []byte) (*Result, error) {
- if len(data) < 9 {
- return nil, ErrMalformPacket
- }
-
- pos := 0
- id := binary.LittleEndian.Uint32(data[0:4])
- pos += 4
-
- s, ok := c.stmts[id]
- if !ok {
- return nil, NewDefaultError(ER_UNKNOWN_STMT_HANDLER,
- strconv.FormatUint(uint64(id), 10), "stmt_execute")
- }
-
- flag := data[pos]
- pos++
- //now we only support CURSOR_TYPE_NO_CURSOR flag
- if flag != 0 {
- return nil, NewError(ER_UNKNOWN_ERROR, fmt.Sprintf("unsupported flag %d", flag))
- }
-
- //skip iteration-count, always 1
- pos += 4
-
- var nullBitmaps []byte
- var paramTypes []byte
- var paramValues []byte
-
- paramNum := s.Params
-
- if paramNum > 0 {
- nullBitmapLen := (s.Params + 7) >> 3
- if len(data) < (pos + nullBitmapLen + 1) {
- return nil, ErrMalformPacket
- }
- nullBitmaps = data[pos : pos+nullBitmapLen]
- pos += nullBitmapLen
-
- //new param bound flag
- if data[pos] == 1 {
- pos++
- if len(data) < (pos + (paramNum << 1)) {
- return nil, ErrMalformPacket
- }
-
- paramTypes = data[pos : pos+(paramNum<<1)]
- pos += paramNum << 1
-
- paramValues = data[pos:]
- }
-
- if err := c.bindStmtArgs(s, nullBitmaps, paramTypes, paramValues); err != nil {
- return nil, errors.Trace(err)
- }
- }
-
- var r *Result
- var err error
- if r, err = c.h.HandleStmtExecute(s.Context, s.Query, s.Args); err != nil {
- return nil, errors.Trace(err)
- }
-
- s.ResetParams()
-
- return r, nil
-}
-
-func (c *Conn) bindStmtArgs(s *Stmt, nullBitmap, paramTypes, paramValues []byte) error {
- args := s.Args
-
- pos := 0
-
- var v []byte
- var n int = 0
- var isNull bool
- var err error
-
- for i := 0; i < s.Params; i++ {
- if nullBitmap[i>>3]&(1<<(uint(i)%8)) > 0 {
- args[i] = nil
- continue
- }
-
- tp := paramTypes[i<<1]
- isUnsigned := (paramTypes[(i<<1)+1] & 0x80) > 0
-
- switch tp {
- case MYSQL_TYPE_NULL:
- args[i] = nil
- continue
-
- case MYSQL_TYPE_TINY:
- if len(paramValues) < (pos + 1) {
- return ErrMalformPacket
- }
-
- if isUnsigned {
- args[i] = uint8(paramValues[pos])
- } else {
- args[i] = int8(paramValues[pos])
- }
-
- pos++
- continue
-
- case MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR:
- if len(paramValues) < (pos + 2) {
- return ErrMalformPacket
- }
-
- if isUnsigned {
- args[i] = uint16(binary.LittleEndian.Uint16(paramValues[pos : pos+2]))
- } else {
- args[i] = int16(binary.LittleEndian.Uint16(paramValues[pos : pos+2]))
- }
- pos += 2
- continue
-
- case MYSQL_TYPE_INT24, MYSQL_TYPE_LONG:
- if len(paramValues) < (pos + 4) {
- return ErrMalformPacket
- }
-
- if isUnsigned {
- args[i] = uint32(binary.LittleEndian.Uint32(paramValues[pos : pos+4]))
- } else {
- args[i] = int32(binary.LittleEndian.Uint32(paramValues[pos : pos+4]))
- }
- pos += 4
- continue
-
- case MYSQL_TYPE_LONGLONG:
- if len(paramValues) < (pos + 8) {
- return ErrMalformPacket
- }
-
- if isUnsigned {
- args[i] = binary.LittleEndian.Uint64(paramValues[pos : pos+8])
- } else {
- args[i] = int64(binary.LittleEndian.Uint64(paramValues[pos : pos+8]))
- }
- pos += 8
- continue
-
- case MYSQL_TYPE_FLOAT:
- if len(paramValues) < (pos + 4) {
- return ErrMalformPacket
- }
-
- args[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(paramValues[pos : pos+4])))
- pos += 4
- continue
-
- case MYSQL_TYPE_DOUBLE:
- if len(paramValues) < (pos + 8) {
- return ErrMalformPacket
- }
-
- args[i] = math.Float64frombits(binary.LittleEndian.Uint64(paramValues[pos : pos+8]))
- pos += 8
- continue
-
- case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR,
- MYSQL_TYPE_BIT, MYSQL_TYPE_ENUM, MYSQL_TYPE_SET, MYSQL_TYPE_TINY_BLOB,
- MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_BLOB,
- MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY,
- MYSQL_TYPE_DATE, MYSQL_TYPE_NEWDATE,
- MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIME:
- if len(paramValues) < (pos + 1) {
- return ErrMalformPacket
- }
-
- v, isNull, n, err = LengthEncodedString(paramValues[pos:])
- pos += n
- if err != nil {
- return errors.Trace(err)
- }
-
- if !isNull {
- args[i] = v
- continue
- } else {
- args[i] = nil
- continue
- }
- default:
- return errors.Errorf("Stmt Unknown FieldType %d", tp)
- }
- }
- return nil
-}
-
-// stmt send long data command has no response
-func (c *Conn) handleStmtSendLongData(data []byte) error {
- if len(data) < 6 {
- return nil
- }
-
- id := binary.LittleEndian.Uint32(data[0:4])
-
- s, ok := c.stmts[id]
- if !ok {
- return nil
- }
-
- paramId := binary.LittleEndian.Uint16(data[4:6])
- if paramId >= uint16(s.Params) {
- return nil
- }
-
- if s.Args[paramId] == nil {
- s.Args[paramId] = data[6:]
- } else {
- if b, ok := s.Args[paramId].([]byte); ok {
- b = append(b, data[6:]...)
- s.Args[paramId] = b
- } else {
- return nil
- }
- }
-
- return nil
-}
-
-func (c *Conn) handleStmtReset(data []byte) (*Result, error) {
- if len(data) < 4 {
- return nil, ErrMalformPacket
- }
-
- id := binary.LittleEndian.Uint32(data[0:4])
-
- s, ok := c.stmts[id]
- if !ok {
- return nil, NewDefaultError(ER_UNKNOWN_STMT_HANDLER,
- strconv.FormatUint(uint64(id), 10), "stmt_reset")
- }
-
- s.ResetParams()
-
- return &Result{}, nil
-}
-
-// stmt close command has no response
-func (c *Conn) handleStmtClose(data []byte) error {
- if len(data) < 4 {
- return nil
- }
-
- id := binary.LittleEndian.Uint32(data[0:4])
-
- stmt, ok := c.stmts[id]
- if !ok {
- return nil
- }
-
- if err := c.h.HandleStmtClose(stmt.Context); err != nil {
- return err
- }
-
- delete(c.stmts, id)
-
- return nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/test_util/test_keys/keys.go b/vendor/github.com/siddontang/go-mysql/test_util/test_keys/keys.go
deleted file mode 100644
index c1049b6..0000000
--- a/vendor/github.com/siddontang/go-mysql/test_util/test_keys/keys.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package test_keys
-
-// here we put the testing encryption keys here
-// NOTE THIS IS FOR TESTING ONLY, DO NOT USE THEM IN PRODUCTION!
-
-var PubPem = []byte(`-----BEGIN PUBLIC KEY-----
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsraCori69OXEkA07Ykp2
-Ju+aHz33PqgKj0qbSbPm6ePh2mer2GWOxC4q1wdRwzgddwTTqSdonhM4XuVyyNqq
-gM7uv9JoWCONcKo28cPRK7gH7up7nYFllNFXUAA0/XQ+95tqtdITNplQLIceFIXz
-5Bvi9fThcpf9M6qKdNUa2Wd24rM/n6qxoUG2ksDDVXQC30RAHkGCdNi10iya8Pj/
-ZaEG86NXFpvvnLHRHiih/gXe7nby1sR6BxaEG2bLZd0cjdL5MuWOPeQ450H6mCtV
-SX4poNq9YrdP4XW9M0N7nocRU0p5aUvLWxy6XrUTSP0iRkC7ppEPG0p2Xtsq7QGT
-MwIDAQAB
------END PUBLIC KEY-----`)
-
-var CertPem = []byte(`-----BEGIN CERTIFICATE-----
-MIIDBjCCAe4CCQDg06wCf7hcuDANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE4MDgxOTA4NDUyNVoXDTI4MDgxNjA4NDUyNVowRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-ALK2gqK4uvTlxJANO2JKdibvmh899z6oCo9Km0mz5unj4dpnq9hljsQuKtcHUcM4
-HXcE06knaJ4TOF7lcsjaqoDO7r/SaFgjjXCqNvHD0Su4B+7qe52BZZTRV1AANP10
-PvebarXSEzaZUCyHHhSF8+Qb4vX04XKX/TOqinTVGtlnduKzP5+qsaFBtpLAw1V0
-At9EQB5BgnTYtdIsmvD4/2WhBvOjVxab75yx0R4oof4F3u528tbEegcWhBtmy2Xd
-HI3S+TLljj3kOOdB+pgrVUl+KaDavWK3T+F1vTNDe56HEVNKeWlLy1scul61E0j9
-IkZAu6aRDxtKdl7bKu0BkzMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAma3yFqR7
-xkeaZBg4/1I3jSlaNe5+2JB4iybAkMOu77fG5zytLomTbzdhewsuBwpTVMJdga8T
-IdPeIFCin1U+5SkbjSMlpKf+krE+5CyrNJ5jAgO9ATIqx66oCTYXfGlNapGRLfSE
-sa0iMqCe/dr4GPU+flW2DZFWiyJVDSF1JjReQnfrWY+SD2SpP/lmlgltnY8MJngd
-xBLG5nsZCpUXGB713Q8ZyIm2ThVAMiskcxBleIZDDghLuhGvY/9eFJhZpvOkjWa6
-XGEi4E1G/SA+zVKFl41nHKCdqXdmIOnpcLlFBUVloQok5a95Kqc1TYw3f+WbdFff
-99dAgk3gWwWZQA==
------END CERTIFICATE-----`)
-
-var KeyPem = []byte(`-----BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAsraCori69OXEkA07Ykp2Ju+aHz33PqgKj0qbSbPm6ePh2mer
-2GWOxC4q1wdRwzgddwTTqSdonhM4XuVyyNqqgM7uv9JoWCONcKo28cPRK7gH7up7
-nYFllNFXUAA0/XQ+95tqtdITNplQLIceFIXz5Bvi9fThcpf9M6qKdNUa2Wd24rM/
-n6qxoUG2ksDDVXQC30RAHkGCdNi10iya8Pj/ZaEG86NXFpvvnLHRHiih/gXe7nby
-1sR6BxaEG2bLZd0cjdL5MuWOPeQ450H6mCtVSX4poNq9YrdP4XW9M0N7nocRU0p5
-aUvLWxy6XrUTSP0iRkC7ppEPG0p2Xtsq7QGTMwIDAQABAoIBAGh1m8hHWCg7gXh9
-838RbRx3IswuKS27hWiaQEiFWmzOIb7KqDy1qAxtu+ayRY1paHegH6QY/+Kd824s
-ibpzbgQacJ04/HrAVTVMmQ8Z2VLHoAN7lcPL1bd14aZGaLLZVtDeTDJ413grhxxv
-4ho27gcgcbo4Z+rWgk7H2WRPCAGYqWYAycm3yF5vy9QaO6edU+T588YsEQOos5iy
-5pVFSGDGZkcUp1ukL3BJYR+jvygn6WPCobQ/LScUdi+ucitaI9i+UdlLokZARVRG
-M/msqcTM73thR8yVRcexU6NUDxRBfZ/f7moSAEbBmGDXuxDcIyH9KGMQ2rMtN1X3
-lK8UNwkCgYEA2STJq/IUQHjdqd3Dqh/Q7Zm8/pMWFqLJSkqpnFtFsXPyUOx9zDOy
-KqkIfGeyKwvsj9X9BcZ0FUKj9zoct1/WpPY+h7i7+z0MIujBh4AMjAcDrt4o76yK
-UHuVmG2xKTdJoAbqOdToQeX6E82Ioal5pbB2W7AbCQScNBPZ52jxgtcCgYEA0rE7
-2dFiRm0YmuszFYxft2+GP6NgP3R2TQNEooi1uCXG2xgwObie1YCHzpZ5CfSqJIxP
-XB7DXpIWi7PxJoeai2F83LnmdFz6F1BPRobwDoSFNdaSKLg4Yf856zpgYNKhL1fE
-OoOXj4VBWBZh1XDfZV44fgwlMIf7edOF1XOagwUCgYAw953O+7FbdKYwF0V3iOM5
-oZDAK/UwN5eC/GFRVDfcM5RycVJRCVtlSWcTfuLr2C2Jpiz/72fgH34QU3eEVsV1
-v94MBznFB1hESw7ReqvZq/9FoO3EVrl+OtBaZmosLD6bKtQJJJ0Xtz/01UW5hxla
-pveZ55XBK9v51nwuNjk4UwKBgHD8fJUllSchUCWb5cwzeAz98Kdl7LJ6uQo5q2/i
-EllLYOWThiEeIYdrIuklholRPIDXAaPsF2c6vn5yo+q+o6EFSZlw0+YpCjDAb5Lp
-wAh5BprFk6HkkM/0t9Guf4rMyYWC8odSlE9x7YXYkuSMYDCTI4Zs6vCoq7I8PbQn
-B4AlAoGAZ6Ee5m/ph5UVp/3+cR6jCY7aHBUU/M3pbJSkVjBW+ymEBVJ6sUdz8k3P
-x8BiPEQggNN7faWBqRWP7KXPnDYHh6shYUgPJwI5HX6NE/ZDnnXjeysHRyf0oCo5
-S6tHXwHNKB5HS1c/KDyyNGjP2oi/MF4o/MGWNWEcK6TJA3RGOYM=
------END RSA PRIVATE KEY-----`)
-
-var CaPem = []byte(`-----BEGIN CERTIFICATE-----
-MIIDtTCCAp2gAwIBAgIJANeS1FOzWXlZMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
-BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
-aWRnaXRzIFB0eSBMdGQwHhcNMTgwODE2MTUxNDE5WhcNMjEwNjA1MTUxNDE5WjBF
-MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50
-ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
-CgKCAQEAsV6xlhFxMn14Pn7XBRGLt8/HXmhVVu20IKFgIOyX7gAZr0QLsuT1fGf5
-zH9HrlgOMkfdhV847U03KPfUnBsi9lS6/xOxnH/OzTYM0WW0eNMGF7eoxrS64GSb
-PVX4pLi5+uwrrZT5HmDgZi49ANmuX6UYmH/eRRvSIoYUTV6t0aYsLyKvlpEAtRAe
-4AlKB236j5ggmJ36QUhTFTbeNbeOOgloTEdPK8Y/kgpnhiqzMdPqqIc7IeXUc456
-yX8MJUgniTM2qCNTFdEw+C2Ok0RbU6TI2SuEgVF4jtCcVEKxZ8kYbioONaePQKFR
-/EhdXO+/ag1IEdXElH9knLOfB+zCgwIDAQABo4GnMIGkMB0GA1UdDgQWBBQgHiwD
-00upIbCOunlK4HRw89DhjjB1BgNVHSMEbjBsgBQgHiwD00upIbCOunlK4HRw89Dh
-jqFJpEcwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNV
-BAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZIIJANeS1FOzWXlZMAwGA1UdEwQF
-MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAFMZFQTFKU5tWIpWh8BbVZeVZcng0Kiq
-qwbhVwaTkqtfmbqw8/w+faOWylmLncQEMmgvnUltGMQlQKBwQM2byzPkz9phal3g
-uI0JWJYqtcMyIQUB9QbbhrDNC9kdt/ji/x6rrIqzaMRuiBXqH5LQ9h856yXzArqd
-cAQGzzYpbUCIv7ciSB93cKkU73fQLZVy5ZBy1+oAa1V9U4cb4G/20/PDmT+G3Gxz
-pEjeDKtz8XINoWgA2cSdfAhNZt5vqJaCIZ8qN0z6C7SUKwUBderERUMLUXdhUldC
-KTVHyEPvd0aULd5S5vEpKCnHcQmFcLdoN8t9k9pR9ZgwqXbyJHlxWFo=
------END CERTIFICATE-----`)
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/COPYING
deleted file mode 100644
index 5a8e332..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
deleted file mode 100644
index 5a8e332..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
deleted file mode 100644
index 5a8e332..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
deleted file mode 100644
index 5a8e332..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/decode.go
deleted file mode 100644
index b0fd51d..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/decode.go
+++ /dev/null
@@ -1,509 +0,0 @@
-package toml
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "reflect"
- "strings"
- "time"
-)
-
-func e(format string, args ...interface{}) error {
- return fmt.Errorf("toml: "+format, args...)
-}
-
-// Unmarshaler is the interface implemented by objects that can unmarshal a
-// TOML description of themselves.
-type Unmarshaler interface {
- UnmarshalTOML(interface{}) error
-}
-
-// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
-func Unmarshal(p []byte, v interface{}) error {
- _, err := Decode(string(p), v)
- return err
-}
-
-// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
-//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
-//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
-//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
-type Primitive struct {
- undecoded interface{}
- context Key
-}
-
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
- md := MetaData{decoded: make(map[string]bool)}
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// PrimitiveDecode is just like the other `Decode*` functions, except it
-// decodes a TOML value that has already been parsed. Valid primitive values
-// can *only* be obtained from values filled by the decoder functions,
-// including this method. (i.e., `v` may contain more `Primitive`
-// values.)
-//
-// Meta data for primitive values is included in the meta data returned by
-// the `Decode*` functions with one exception: keys returned by the Undecoded
-// method will only reflect keys that were decoded. Namely, any keys hidden
-// behind a Primitive will be considered undecoded. Executing this method will
-// update the undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
- md.context = primValue.context
- defer func() { md.context = nil }()
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
-//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
-//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
-//
-// TOML datetimes correspond to Go `time.Time` values.
-//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
-//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
-//
-// Key mapping
-//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
-//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
-//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
- rv := reflect.ValueOf(v)
- if rv.Kind() != reflect.Ptr {
- return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
- }
- if rv.IsNil() {
- return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
- }
- p, err := parse(data)
- if err != nil {
- return MetaData{}, err
- }
- md := MetaData{
- p.mapping, p.types, p.ordered,
- make(map[string]bool, len(p.ordered)), nil,
- }
- return md, md.unify(p.mapping, indirect(rv))
-}
-
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadFile(fpath)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadAll(r)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// unify performs a sort of type unification based on the structure of `rv`,
-// which is the client representation.
-//
-// Any type mismatch produces an error. Finding a type that we don't know
-// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
-
- // Special case. Look for a `Primitive` value.
- if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
- // Save the undecoded data and the key context into the primitive
- // value.
- context := make(Key, len(md.context))
- copy(context, md.context)
- rv.Set(reflect.ValueOf(Primitive{
- undecoded: data,
- context: context,
- }))
- return nil
- }
-
- // Special case. Unmarshaler Interface support.
- if rv.CanAddr() {
- if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
- return v.UnmarshalTOML(data)
- }
- }
-
- // Special case. Handle time.Time values specifically.
- // TODO: Remove this code when we decide to drop support for Go 1.1.
- // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
- // interfaces.
- if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
- return md.unifyDatetime(data, rv)
- }
-
- // Special case. Look for a value satisfying the TextUnmarshaler interface.
- if v, ok := rv.Interface().(TextUnmarshaler); ok {
- return md.unifyText(data, v)
- }
- // BUG(burntsushi)
- // The behavior here is incorrect whenever a Go type satisfies the
- // encoding.TextUnmarshaler interface but also corresponds to a TOML
- // hash or array. In particular, the unmarshaler should only be applied
- // to primitive TOML values. But at this point, it will be applied to
- // all kinds of values and produce an incorrect error whenever those values
- // are hashes or arrays (including arrays of tables).
-
- k := rv.Kind()
-
- // laziness
- if k >= reflect.Int && k <= reflect.Uint64 {
- return md.unifyInt(data, rv)
- }
- switch k {
- case reflect.Ptr:
- elem := reflect.New(rv.Type().Elem())
- err := md.unify(data, reflect.Indirect(elem))
- if err != nil {
- return err
- }
- rv.Set(elem)
- return nil
- case reflect.Struct:
- return md.unifyStruct(data, rv)
- case reflect.Map:
- return md.unifyMap(data, rv)
- case reflect.Array:
- return md.unifyArray(data, rv)
- case reflect.Slice:
- return md.unifySlice(data, rv)
- case reflect.String:
- return md.unifyString(data, rv)
- case reflect.Bool:
- return md.unifyBool(data, rv)
- case reflect.Interface:
- // we only support empty interfaces.
- if rv.NumMethod() > 0 {
- return e("unsupported type %s", rv.Type())
- }
- return md.unifyAnything(data, rv)
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- return md.unifyFloat64(data, rv)
- }
- return e("unsupported type %s", rv.Kind())
-}
-
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- if mapping == nil {
- return nil
- }
- return e("type mismatch for %s: expected table but found %T",
- rv.Type().String(), mapping)
- }
-
- for key, datum := range tmap {
- var f *field
- fields := cachedTypeFields(rv.Type())
- for i := range fields {
- ff := &fields[i]
- if ff.name == key {
- f = ff
- break
- }
- if f == nil && strings.EqualFold(ff.name, key) {
- f = ff
- }
- }
- if f != nil {
- subv := rv
- for _, i := range f.index {
- subv = indirect(subv.Field(i))
- }
- if isUnifiable(subv) {
- md.decoded[md.context.add(key).String()] = true
- md.context = append(md.context, key)
- if err := md.unify(datum, subv); err != nil {
- return err
- }
- md.context = md.context[0 : len(md.context)-1]
- } else if f.name != "" {
- // Bad user! No soup for you!
- return e("cannot write unexported field %s.%s",
- rv.Type().String(), f.name)
- }
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- if tmap == nil {
- return nil
- }
- return badtype("map", mapping)
- }
- if rv.IsNil() {
- rv.Set(reflect.MakeMap(rv.Type()))
- }
- for k, v := range tmap {
- md.decoded[md.context.add(k).String()] = true
- md.context = append(md.context, k)
-
- rvkey := indirect(reflect.New(rv.Type().Key()))
- rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
- if err := md.unify(v, rvval); err != nil {
- return err
- }
- md.context = md.context[0 : len(md.context)-1]
-
- rvkey.SetString(k)
- rv.SetMapIndex(rvkey, rvval)
- }
- return nil
-}
-
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- if !datav.IsValid() {
- return nil
- }
- return badtype("slice", data)
- }
- sliceLen := datav.Len()
- if sliceLen != rv.Len() {
- return e("expected array length %d; got TOML array of length %d",
- rv.Len(), sliceLen)
- }
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- if !datav.IsValid() {
- return nil
- }
- return badtype("slice", data)
- }
- n := datav.Len()
- if rv.IsNil() || rv.Cap() < n {
- rv.Set(reflect.MakeSlice(rv.Type(), n, n))
- }
- rv.SetLen(n)
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
- sliceLen := data.Len()
- for i := 0; i < sliceLen; i++ {
- v := data.Index(i).Interface()
- sliceval := indirect(rv.Index(i))
- if err := md.unify(v, sliceval); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
- if _, ok := data.(time.Time); ok {
- rv.Set(reflect.ValueOf(data))
- return nil
- }
- return badtype("time.Time", data)
-}
-
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
- if s, ok := data.(string); ok {
- rv.SetString(s)
- return nil
- }
- return badtype("string", data)
-}
-
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
- if num, ok := data.(float64); ok {
- switch rv.Kind() {
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- rv.SetFloat(num)
- default:
- panic("bug")
- }
- return nil
- }
- return badtype("float", data)
-}
-
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
- if num, ok := data.(int64); ok {
- if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
- switch rv.Kind() {
- case reflect.Int, reflect.Int64:
- // No bounds checking necessary.
- case reflect.Int8:
- if num < math.MinInt8 || num > math.MaxInt8 {
- return e("value %d is out of range for int8", num)
- }
- case reflect.Int16:
- if num < math.MinInt16 || num > math.MaxInt16 {
- return e("value %d is out of range for int16", num)
- }
- case reflect.Int32:
- if num < math.MinInt32 || num > math.MaxInt32 {
- return e("value %d is out of range for int32", num)
- }
- }
- rv.SetInt(num)
- } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
- unum := uint64(num)
- switch rv.Kind() {
- case reflect.Uint, reflect.Uint64:
- // No bounds checking necessary.
- case reflect.Uint8:
- if num < 0 || unum > math.MaxUint8 {
- return e("value %d is out of range for uint8", num)
- }
- case reflect.Uint16:
- if num < 0 || unum > math.MaxUint16 {
- return e("value %d is out of range for uint16", num)
- }
- case reflect.Uint32:
- if num < 0 || unum > math.MaxUint32 {
- return e("value %d is out of range for uint32", num)
- }
- }
- rv.SetUint(unum)
- } else {
- panic("unreachable")
- }
- return nil
- }
- return badtype("integer", data)
-}
-
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
- if b, ok := data.(bool); ok {
- rv.SetBool(b)
- return nil
- }
- return badtype("boolean", data)
-}
-
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
- rv.Set(reflect.ValueOf(data))
- return nil
-}
-
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
- var s string
- switch sdata := data.(type) {
- case TextMarshaler:
- text, err := sdata.MarshalText()
- if err != nil {
- return err
- }
- s = string(text)
- case fmt.Stringer:
- s = sdata.String()
- case string:
- s = sdata
- case bool:
- s = fmt.Sprintf("%v", sdata)
- case int64:
- s = fmt.Sprintf("%d", sdata)
- case float64:
- s = fmt.Sprintf("%f", sdata)
- default:
- return badtype("primitive (string-like)", data)
- }
- if err := v.UnmarshalText([]byte(s)); err != nil {
- return err
- }
- return nil
-}
-
-// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
- return indirect(reflect.ValueOf(v))
-}
-
-// indirect returns the value pointed to by a pointer.
-// Pointers are followed until the value is not a pointer.
-// New values are allocated for each nil pointer.
-//
-// An exception to this rule is if the value satisfies an interface of
-// interest to us (like encoding.TextUnmarshaler).
-func indirect(v reflect.Value) reflect.Value {
- if v.Kind() != reflect.Ptr {
- if v.CanSet() {
- pv := v.Addr()
- if _, ok := pv.Interface().(TextUnmarshaler); ok {
- return pv
- }
- }
- return v
- }
- if v.IsNil() {
- v.Set(reflect.New(v.Type().Elem()))
- }
- return indirect(reflect.Indirect(v))
-}
-
-func isUnifiable(rv reflect.Value) bool {
- if rv.CanSet() {
- return true
- }
- if _, ok := rv.Interface().(TextUnmarshaler); ok {
- return true
- }
- return false
-}
-
-func badtype(expected string, data interface{}) error {
- return e("cannot load TOML value of type %T into a Go %s", data, expected)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/decode_meta.go
deleted file mode 100644
index b9914a6..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package toml
-
-import "strings"
-
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
-type MetaData struct {
- mapping map[string]interface{}
- types map[string]tomlType
- keys []Key
- decoded map[string]bool
- context Key // Used only during decoding.
-}
-
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
-//
-// // access the TOML key 'a.b.c'
-// IsDefined("a", "b", "c")
-//
-// IsDefined will return false if an empty key given. Keys are case sensitive.
-func (md *MetaData) IsDefined(key ...string) bool {
- if len(key) == 0 {
- return false
- }
-
- var hash map[string]interface{}
- var ok bool
- var hashOrVal interface{} = md.mapping
- for _, k := range key {
- if hash, ok = hashOrVal.(map[string]interface{}); !ok {
- return false
- }
- if hashOrVal, ok = hash[k]; !ok {
- return false
- }
- }
- return true
-}
-
-// Type returns a string representation of the type of the key specified.
-//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
-func (md *MetaData) Type(key ...string) string {
- fullkey := strings.Join(key, ".")
- if typ, ok := md.types[fullkey]; ok {
- return typ.typeString()
- }
- return ""
-}
-
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
-type Key []string
-
-func (k Key) String() string {
- return strings.Join(k, ".")
-}
-
-func (k Key) maybeQuotedAll() string {
- var ss []string
- for i := range k {
- ss = append(ss, k.maybeQuoted(i))
- }
- return strings.Join(ss, ".")
-}
-
-func (k Key) maybeQuoted(i int) string {
- quote := false
- for _, c := range k[i] {
- if !isBareKeyChar(c) {
- quote = true
- break
- }
- }
- if quote {
- return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
- }
- return k[i]
-}
-
-func (k Key) add(piece string) Key {
- newKey := make(Key, len(k)+1)
- copy(newKey, k)
- newKey[len(k)] = piece
- return newKey
-}
-
-// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
-//
-// The list will have the same order as the keys appeared in the TOML data.
-//
-// All keys returned are non-empty.
-func (md *MetaData) Keys() []Key {
- return md.keys
-}
-
-// Undecoded returns all keys that have not been decoded in the order in which
-// they appear in the original TOML document.
-//
-// This includes keys that haven't been decoded because of a Primitive value.
-// Once the Primitive value is decoded, the keys will be considered decoded.
-//
-// Also note that decoding into an empty interface will result in no decoding,
-// and so no keys will be considered decoded.
-//
-// In this sense, the Undecoded keys correspond to keys in the TOML document
-// that do not have a concrete type in your representation.
-func (md *MetaData) Undecoded() []Key {
- undecoded := make([]Key, 0, len(md.keys))
- for _, key := range md.keys {
- if !md.decoded[key.String()] {
- undecoded = append(undecoded, key)
- }
- }
- return undecoded
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/doc.go
deleted file mode 100644
index b371f39..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/doc.go
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
-
-The specification implemented: https://github.com/toml-lang/toml
-
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
-
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
-*/
-package toml
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/encode.go
deleted file mode 100644
index d905c21..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/encode.go
+++ /dev/null
@@ -1,568 +0,0 @@
-package toml
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type tomlEncodeError struct{ error }
-
-var (
- errArrayMixedElementTypes = errors.New(
- "toml: cannot encode array with mixed element types")
- errArrayNilElement = errors.New(
- "toml: cannot encode array with nil element")
- errNonString = errors.New(
- "toml: cannot encode a map with non-string key type")
- errAnonNonStruct = errors.New(
- "toml: cannot encode an anonymous field that is not a struct")
- errArrayNoTable = errors.New(
- "toml: TOML array element cannot contain a table")
- errNoKey = errors.New(
- "toml: top-level values must be Go maps or structs")
- errAnything = errors.New("") // used in testing
-)
-
-var quotedReplacer = strings.NewReplacer(
- "\t", "\\t",
- "\n", "\\n",
- "\r", "\\r",
- "\"", "\\\"",
- "\\", "\\\\",
-)
-
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
-//
-// The indentation level can be controlled with the Indent field.
-type Encoder struct {
- // A single indentation level. By default it is two spaces.
- Indent string
-
- // hasWritten is whether we have written any output to w yet.
- hasWritten bool
- w *bufio.Writer
-}
-
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: bufio.NewWriter(w),
- Indent: " ",
- }
-}
-
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
-//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
-func (enc *Encoder) Encode(v interface{}) error {
- rv := eindirect(reflect.ValueOf(v))
- if err := enc.safeEncode(Key([]string{}), rv); err != nil {
- return err
- }
- return enc.w.Flush()
-}
-
-func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
- defer func() {
- if r := recover(); r != nil {
- if terr, ok := r.(tomlEncodeError); ok {
- err = terr.error
- return
- }
- panic(r)
- }
- }()
- enc.encode(key, rv)
- return nil
-}
-
-func (enc *Encoder) encode(key Key, rv reflect.Value) {
- // Special case. Time needs to be in ISO8601 format.
- // Special case. If we can marshal the type to text, then we used that.
- // Basically, this prevents the encoder for handling these types as
- // generic structs (or whatever the underlying type of a TextMarshaler is).
- switch rv.Interface().(type) {
- case time.Time, TextMarshaler:
- enc.keyEqElement(key, rv)
- return
- }
-
- k := rv.Kind()
- switch k {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64,
- reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
- enc.keyEqElement(key, rv)
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
- enc.eArrayOfTables(key, rv)
- } else {
- enc.keyEqElement(key, rv)
- }
- case reflect.Interface:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Map:
- if rv.IsNil() {
- return
- }
- enc.eTable(key, rv)
- case reflect.Ptr:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Struct:
- enc.eTable(key, rv)
- default:
- panic(e("unsupported type for key '%s': %s", key, k))
- }
-}
-
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
-func (enc *Encoder) eElement(rv reflect.Value) {
- switch v := rv.Interface().(type) {
- case time.Time:
- // Special case time.Time as a primitive. Has to come before
- // TextMarshaler below because time.Time implements
- // encoding.TextMarshaler, but we need to always use UTC.
- enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
- return
- case TextMarshaler:
- // Special case. Use text marshaler if it's available for this value.
- if s, err := v.MarshalText(); err != nil {
- encPanic(err)
- } else {
- enc.writeQuoted(string(s))
- }
- return
- }
- switch rv.Kind() {
- case reflect.Bool:
- enc.wf(strconv.FormatBool(rv.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64:
- enc.wf(strconv.FormatInt(rv.Int(), 10))
- case reflect.Uint, reflect.Uint8, reflect.Uint16,
- reflect.Uint32, reflect.Uint64:
- enc.wf(strconv.FormatUint(rv.Uint(), 10))
- case reflect.Float32:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
- case reflect.Float64:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
- case reflect.Array, reflect.Slice:
- enc.eArrayOrSliceElement(rv)
- case reflect.Interface:
- enc.eElement(rv.Elem())
- case reflect.String:
- enc.writeQuoted(rv.String())
- default:
- panic(e("unexpected primitive type: %s", rv.Kind()))
- }
-}
-
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
-func floatAddDecimal(fstr string) string {
- if !strings.Contains(fstr, ".") {
- return fstr + ".0"
- }
- return fstr
-}
-
-func (enc *Encoder) writeQuoted(s string) {
- enc.wf("\"%s\"", quotedReplacer.Replace(s))
-}
-
-func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
- length := rv.Len()
- enc.wf("[")
- for i := 0; i < length; i++ {
- elem := rv.Index(i)
- enc.eElement(elem)
- if i != length-1 {
- enc.wf(", ")
- }
- }
- enc.wf("]")
-}
-
-func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- for i := 0; i < rv.Len(); i++ {
- trv := rv.Index(i)
- if isNil(trv) {
- continue
- }
- panicIfInvalidKey(key)
- enc.newline()
- enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
- enc.newline()
- enc.eMapOrStruct(key, trv)
- }
-}
-
-func (enc *Encoder) eTable(key Key, rv reflect.Value) {
- panicIfInvalidKey(key)
- if len(key) == 1 {
- // Output an extra newline between top-level tables.
- // (The newline isn't written if nothing else has been written though.)
- enc.newline()
- }
- if len(key) > 0 {
- enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
- enc.newline()
- }
- enc.eMapOrStruct(key, rv)
-}
-
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
- switch rv := eindirect(rv); rv.Kind() {
- case reflect.Map:
- enc.eMap(key, rv)
- case reflect.Struct:
- enc.eStruct(key, rv)
- default:
- panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
- }
-}
-
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
- rt := rv.Type()
- if rt.Key().Kind() != reflect.String {
- encPanic(errNonString)
- }
-
- // Sort keys so that we have deterministic output. And write keys directly
- // underneath this key first, before writing sub-structs or sub-maps.
- var mapKeysDirect, mapKeysSub []string
- for _, mapKey := range rv.MapKeys() {
- k := mapKey.String()
- if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
- mapKeysSub = append(mapKeysSub, k)
- } else {
- mapKeysDirect = append(mapKeysDirect, k)
- }
- }
-
- var writeMapKeys = func(mapKeys []string) {
- sort.Strings(mapKeys)
- for _, mapKey := range mapKeys {
- mrv := rv.MapIndex(reflect.ValueOf(mapKey))
- if isNil(mrv) {
- // Don't write anything for nil fields.
- continue
- }
- enc.encode(key.add(mapKey), mrv)
- }
- }
- writeMapKeys(mapKeysDirect)
- writeMapKeys(mapKeysSub)
-}
-
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
- // Write keys for fields directly under this key first, because if we write
- // a field that creates a new table, then all keys under it will be in that
- // table (not the one we're writing here).
- rt := rv.Type()
- var fieldsDirect, fieldsSub [][]int
- var addFields func(rt reflect.Type, rv reflect.Value, start []int)
- addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
- for i := 0; i < rt.NumField(); i++ {
- f := rt.Field(i)
- // skip unexported fields
- if f.PkgPath != "" && !f.Anonymous {
- continue
- }
- frv := rv.Field(i)
- if f.Anonymous {
- t := f.Type
- switch t.Kind() {
- case reflect.Struct:
- // Treat anonymous struct fields with
- // tag names as though they are not
- // anonymous, like encoding/json does.
- if getOptions(f.Tag).name == "" {
- addFields(t, frv, f.Index)
- continue
- }
- case reflect.Ptr:
- if t.Elem().Kind() == reflect.Struct &&
- getOptions(f.Tag).name == "" {
- if !frv.IsNil() {
- addFields(t.Elem(), frv.Elem(), f.Index)
- }
- continue
- }
- // Fall through to the normal field encoding logic below
- // for non-struct anonymous fields.
- }
- }
-
- if typeIsHash(tomlTypeOfGo(frv)) {
- fieldsSub = append(fieldsSub, append(start, f.Index...))
- } else {
- fieldsDirect = append(fieldsDirect, append(start, f.Index...))
- }
- }
- }
- addFields(rt, rv, nil)
-
- var writeFields = func(fields [][]int) {
- for _, fieldIndex := range fields {
- sft := rt.FieldByIndex(fieldIndex)
- sf := rv.FieldByIndex(fieldIndex)
- if isNil(sf) {
- // Don't write anything for nil fields.
- continue
- }
-
- opts := getOptions(sft.Tag)
- if opts.skip {
- continue
- }
- keyName := sft.Name
- if opts.name != "" {
- keyName = opts.name
- }
- if opts.omitempty && isEmpty(sf) {
- continue
- }
- if opts.omitzero && isZero(sf) {
- continue
- }
-
- enc.encode(key.add(keyName), sf)
- }
- }
- writeFields(fieldsDirect)
- writeFields(fieldsSub)
-}
-
-// tomlTypeName returns the TOML type name of the Go value's type. It is
-// used to determine whether the types of array elements are mixed (which is
-// forbidden). If the Go value is nil, then it is illegal for it to be an array
-// element, and valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
-func tomlTypeOfGo(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() {
- return nil
- }
- switch rv.Kind() {
- case reflect.Bool:
- return tomlBool
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64:
- return tomlInteger
- case reflect.Float32, reflect.Float64:
- return tomlFloat
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlHash, tomlArrayType(rv)) {
- return tomlArrayHash
- }
- return tomlArray
- case reflect.Ptr, reflect.Interface:
- return tomlTypeOfGo(rv.Elem())
- case reflect.String:
- return tomlString
- case reflect.Map:
- return tomlHash
- case reflect.Struct:
- switch rv.Interface().(type) {
- case time.Time:
- return tomlDatetime
- case TextMarshaler:
- return tomlString
- default:
- return tomlHash
- }
- default:
- panic("unexpected reflect.Kind: " + rv.Kind().String())
- }
-}
-
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
- return nil
- }
- firstType := tomlTypeOfGo(rv.Index(0))
- if firstType == nil {
- encPanic(errArrayNilElement)
- }
-
- rvlen := rv.Len()
- for i := 1; i < rvlen; i++ {
- elem := rv.Index(i)
- switch elemType := tomlTypeOfGo(elem); {
- case elemType == nil:
- encPanic(errArrayNilElement)
- case !typeEqual(firstType, elemType):
- encPanic(errArrayMixedElementTypes)
- }
- }
- // If we have a nested array, then we must make sure that the nested
- // array contains ONLY primitives.
- // This checks arbitrarily nested arrays.
- if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
- nest := tomlArrayType(eindirect(rv.Index(0)))
- if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
- encPanic(errArrayNoTable)
- }
- }
- return firstType
-}
-
-type tagOptions struct {
- skip bool // "-"
- name string
- omitempty bool
- omitzero bool
-}
-
-func getOptions(tag reflect.StructTag) tagOptions {
- t := tag.Get("toml")
- if t == "-" {
- return tagOptions{skip: true}
- }
- var opts tagOptions
- parts := strings.Split(t, ",")
- opts.name = parts[0]
- for _, s := range parts[1:] {
- switch s {
- case "omitempty":
- opts.omitempty = true
- case "omitzero":
- opts.omitzero = true
- }
- }
- return opts
-}
-
-func isZero(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return rv.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return rv.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return rv.Float() == 0.0
- }
- return false
-}
-
-func isEmpty(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
- return rv.Len() == 0
- case reflect.Bool:
- return !rv.Bool()
- }
- return false
-}
-
-func (enc *Encoder) newline() {
- if enc.hasWritten {
- enc.wf("\n")
- }
-}
-
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- panicIfInvalidKey(key)
- enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
- enc.eElement(val)
- enc.newline()
-}
-
-func (enc *Encoder) wf(format string, v ...interface{}) {
- if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
- encPanic(err)
- }
- enc.hasWritten = true
-}
-
-func (enc *Encoder) indentStr(key Key) string {
- return strings.Repeat(enc.Indent, len(key)-1)
-}
-
-func encPanic(err error) {
- panic(tomlEncodeError{err})
-}
-
-func eindirect(v reflect.Value) reflect.Value {
- switch v.Kind() {
- case reflect.Ptr, reflect.Interface:
- return eindirect(v.Elem())
- default:
- return v
- }
-}
-
-func isNil(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return rv.IsNil()
- default:
- return false
- }
-}
-
-func panicIfInvalidKey(key Key) {
- for _, k := range key {
- if len(k) == 0 {
- encPanic(e("Key '%s' is not a valid table name. Key names "+
- "cannot be empty.", key.maybeQuotedAll()))
- }
- }
-}
-
-func isValidKeyName(s string) bool {
- return len(s) != 0
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/encoding_types.go
deleted file mode 100644
index d36e1dd..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/encoding_types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
- "encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
deleted file mode 100644
index e8d503d..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
- MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
- UnmarshalText(text []byte) error
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/lex.go
deleted file mode 100644
index 6dee7fc..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/lex.go
+++ /dev/null
@@ -1,953 +0,0 @@
-package toml
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-type itemType int
-
-const (
- itemError itemType = iota
- itemNIL // used in the parser to indicate no type
- itemEOF
- itemText
- itemString
- itemRawString
- itemMultilineString
- itemRawMultilineString
- itemBool
- itemInteger
- itemFloat
- itemDatetime
- itemArray // the start of an array
- itemArrayEnd
- itemTableStart
- itemTableEnd
- itemArrayTableStart
- itemArrayTableEnd
- itemKeyStart
- itemCommentStart
- itemInlineTableStart
- itemInlineTableEnd
-)
-
-const (
- eof = 0
- comma = ','
- tableStart = '['
- tableEnd = ']'
- arrayTableStart = '['
- arrayTableEnd = ']'
- tableSep = '.'
- keySep = '='
- arrayStart = '['
- arrayEnd = ']'
- commentStart = '#'
- stringStart = '"'
- stringEnd = '"'
- rawStringStart = '\''
- rawStringEnd = '\''
- inlineTableStart = '{'
- inlineTableEnd = '}'
-)
-
-type stateFn func(lx *lexer) stateFn
-
-type lexer struct {
- input string
- start int
- pos int
- line int
- state stateFn
- items chan item
-
- // Allow for backing up up to three runes.
- // This is necessary because TOML contains 3-rune tokens (""" and ''').
- prevWidths [3]int
- nprev int // how many of prevWidths are in use
- // If we emit an eof, we can still back up, but it is not OK to call
- // next again.
- atEOF bool
-
- // A stack of state functions used to maintain context.
- // The idea is to reuse parts of the state machine in various places.
- // For example, values can appear at the top level or within arbitrarily
- // nested arrays. The last state on the stack is used after a value has
- // been lexed. Similarly for comments.
- stack []stateFn
-}
-
-type item struct {
- typ itemType
- val string
- line int
-}
-
-func (lx *lexer) nextItem() item {
- for {
- select {
- case item := <-lx.items:
- return item
- default:
- lx.state = lx.state(lx)
- }
- }
-}
-
-func lex(input string) *lexer {
- lx := &lexer{
- input: input,
- state: lexTop,
- line: 1,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
- }
- return lx
-}
-
-func (lx *lexer) push(state stateFn) {
- lx.stack = append(lx.stack, state)
-}
-
-func (lx *lexer) pop() stateFn {
- if len(lx.stack) == 0 {
- return lx.errorf("BUG in lexer: no states to pop")
- }
- last := lx.stack[len(lx.stack)-1]
- lx.stack = lx.stack[0 : len(lx.stack)-1]
- return last
-}
-
-func (lx *lexer) current() string {
- return lx.input[lx.start:lx.pos]
-}
-
-func (lx *lexer) emit(typ itemType) {
- lx.items <- item{typ, lx.current(), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) emitTrim(typ itemType) {
- lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) next() (r rune) {
- if lx.atEOF {
- panic("next called after EOF")
- }
- if lx.pos >= len(lx.input) {
- lx.atEOF = true
- return eof
- }
-
- if lx.input[lx.pos] == '\n' {
- lx.line++
- }
- lx.prevWidths[2] = lx.prevWidths[1]
- lx.prevWidths[1] = lx.prevWidths[0]
- if lx.nprev < 3 {
- lx.nprev++
- }
- r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
- lx.prevWidths[0] = w
- lx.pos += w
- return r
-}
-
-// ignore skips over the pending input before this point.
-func (lx *lexer) ignore() {
- lx.start = lx.pos
-}
-
-// backup steps back one rune. Can be called only twice between calls to next.
-func (lx *lexer) backup() {
- if lx.atEOF {
- lx.atEOF = false
- return
- }
- if lx.nprev < 1 {
- panic("backed up too far")
- }
- w := lx.prevWidths[0]
- lx.prevWidths[0] = lx.prevWidths[1]
- lx.prevWidths[1] = lx.prevWidths[2]
- lx.nprev--
- lx.pos -= w
- if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
- lx.line--
- }
-}
-
-// accept consumes the next rune if it's equal to `valid`.
-func (lx *lexer) accept(valid rune) bool {
- if lx.next() == valid {
- return true
- }
- lx.backup()
- return false
-}
-
-// peek returns but does not consume the next rune in the input.
-func (lx *lexer) peek() rune {
- r := lx.next()
- lx.backup()
- return r
-}
-
-// skip ignores all input that matches the given predicate.
-func (lx *lexer) skip(pred func(rune) bool) {
- for {
- r := lx.next()
- if pred(r) {
- continue
- }
- lx.backup()
- lx.ignore()
- return
- }
-}
-
-// errorf stops all lexing by emitting an error and returning `nil`.
-// Note that any value that is a character is escaped if it's a special
-// character (newlines, tabs, etc.).
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
- lx.items <- item{
- itemError,
- fmt.Sprintf(format, values...),
- lx.line,
- }
- return nil
-}
-
-// lexTop consumes elements at the top level of TOML data.
-func lexTop(lx *lexer) stateFn {
- r := lx.next()
- if isWhitespace(r) || isNL(r) {
- return lexSkip(lx, lexTop)
- }
- switch r {
- case commentStart:
- lx.push(lexTop)
- return lexCommentStart
- case tableStart:
- return lexTableStart
- case eof:
- if lx.pos > lx.start {
- return lx.errorf("unexpected EOF")
- }
- lx.emit(itemEOF)
- return nil
- }
-
- // At this point, the only valid item can be a key, so we back up
- // and let the key lexer do the rest.
- lx.backup()
- lx.push(lexTopEnd)
- return lexKeyStart
-}
-
-// lexTopEnd is entered whenever a top-level item has been consumed. (A value
-// or a table.) It must see only whitespace, and will turn back to lexTop
-// upon a newline. If it sees EOF, it will quit the lexer successfully.
-func lexTopEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == commentStart:
- // a comment will read to a newline for us.
- lx.push(lexTop)
- return lexCommentStart
- case isWhitespace(r):
- return lexTopEnd
- case isNL(r):
- lx.ignore()
- return lexTop
- case r == eof:
- lx.emit(itemEOF)
- return nil
- }
- return lx.errorf("expected a top-level item to end with a newline, "+
- "comment, or EOF, but got %q instead", r)
-}
-
-// lexTable lexes the beginning of a table. Namely, it makes sure that
-// it starts with a character other than '.' and ']'.
-// It assumes that '[' has already been consumed.
-// It also handles the case that this is an item in an array of tables.
-// e.g., '[[name]]'.
-func lexTableStart(lx *lexer) stateFn {
- if lx.peek() == arrayTableStart {
- lx.next()
- lx.emit(itemArrayTableStart)
- lx.push(lexArrayTableEnd)
- } else {
- lx.emit(itemTableStart)
- lx.push(lexTableEnd)
- }
- return lexTableNameStart
-}
-
-func lexTableEnd(lx *lexer) stateFn {
- lx.emit(itemTableEnd)
- return lexTopEnd
-}
-
-func lexArrayTableEnd(lx *lexer) stateFn {
- if r := lx.next(); r != arrayTableEnd {
- return lx.errorf("expected end of table array name delimiter %q, "+
- "but got %q instead", arrayTableEnd, r)
- }
- lx.emit(itemArrayTableEnd)
- return lexTopEnd
-}
-
-func lexTableNameStart(lx *lexer) stateFn {
- lx.skip(isWhitespace)
- switch r := lx.peek(); {
- case r == tableEnd || r == eof:
- return lx.errorf("unexpected end of table name " +
- "(table names cannot be empty)")
- case r == tableSep:
- return lx.errorf("unexpected table separator " +
- "(table names cannot be empty)")
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.push(lexTableNameEnd)
- return lexValue // reuse string lexing
- default:
- return lexBareTableName
- }
-}
-
-// lexBareTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
- r := lx.next()
- if isBareKeyChar(r) {
- return lexBareTableName
- }
- lx.backup()
- lx.emit(itemText)
- return lexTableNameEnd
-}
-
-// lexTableNameEnd reads the end of a piece of a table name, optionally
-// consuming whitespace.
-func lexTableNameEnd(lx *lexer) stateFn {
- lx.skip(isWhitespace)
- switch r := lx.next(); {
- case isWhitespace(r):
- return lexTableNameEnd
- case r == tableSep:
- lx.ignore()
- return lexTableNameStart
- case r == tableEnd:
- return lx.pop()
- default:
- return lx.errorf("expected '.' or ']' to end table name, "+
- "but got %q instead", r)
- }
-}
-
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
- r := lx.peek()
- switch {
- case r == keySep:
- return lx.errorf("unexpected key separator %q", keySep)
- case isWhitespace(r) || isNL(r):
- lx.next()
- return lexSkip(lx, lexKeyStart)
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.emit(itemKeyStart)
- lx.push(lexKeyEnd)
- return lexValue // reuse string lexing
- default:
- lx.ignore()
- lx.emit(itemKeyStart)
- return lexBareKey
- }
-}
-
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
- switch r := lx.next(); {
- case isBareKeyChar(r):
- return lexBareKey
- case isWhitespace(r):
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- case r == keySep:
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- default:
- return lx.errorf("bare keys cannot contain %q", r)
- }
-}
-
-// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
-// separator).
-func lexKeyEnd(lx *lexer) stateFn {
- switch r := lx.next(); {
- case r == keySep:
- return lexSkip(lx, lexValue)
- case isWhitespace(r):
- return lexSkip(lx, lexKeyEnd)
- default:
- return lx.errorf("expected key separator %q, but got %q instead",
- keySep, r)
- }
-}
-
-// lexValue starts the consumption of a value anywhere a value is expected.
-// lexValue will ignore whitespace.
-// After a value is lexed, the last state on the next is popped and returned.
-func lexValue(lx *lexer) stateFn {
- // We allow whitespace to precede a value, but NOT newlines.
- // In array syntax, the array states are responsible for ignoring newlines.
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexValue)
- case isDigit(r):
- lx.backup() // avoid an extra state and use the same as above
- return lexNumberOrDateStart
- }
- switch r {
- case arrayStart:
- lx.ignore()
- lx.emit(itemArray)
- return lexArrayValue
- case inlineTableStart:
- lx.ignore()
- lx.emit(itemInlineTableStart)
- return lexInlineTableValue
- case stringStart:
- if lx.accept(stringStart) {
- if lx.accept(stringStart) {
- lx.ignore() // Ignore """
- return lexMultilineString
- }
- lx.backup()
- }
- lx.ignore() // ignore the '"'
- return lexString
- case rawStringStart:
- if lx.accept(rawStringStart) {
- if lx.accept(rawStringStart) {
- lx.ignore() // Ignore """
- return lexMultilineRawString
- }
- lx.backup()
- }
- lx.ignore() // ignore the "'"
- return lexRawString
- case '+', '-':
- return lexNumberStart
- case '.': // special error case, be kind to users
- return lx.errorf("floats must start with a digit, not '.'")
- }
- if unicode.IsLetter(r) {
- // Be permissive here; lexBool will give a nice error if the
- // user wrote something like
- // x = foo
- // (i.e. not 'true' or 'false' but is something else word-like.)
- lx.backup()
- return lexBool
- }
- return lx.errorf("expected value but found %q instead", r)
-}
-
-// lexArrayValue consumes one value in an array. It assumes that '[' or ','
-// have already been consumed. All whitespace and newlines are ignored.
-func lexArrayValue(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValue)
- case r == commentStart:
- lx.push(lexArrayValue)
- return lexCommentStart
- case r == comma:
- return lx.errorf("unexpected comma")
- case r == arrayEnd:
- // NOTE(caleb): The spec isn't clear about whether you can have
- // a trailing comma or not, so we'll allow it.
- return lexArrayEnd
- }
-
- lx.backup()
- lx.push(lexArrayValueEnd)
- return lexValue
-}
-
-// lexArrayValueEnd consumes everything between the end of an array value and
-// the next value (or the end of the array): it ignores whitespace and newlines
-// and expects either a ',' or a ']'.
-func lexArrayValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValueEnd)
- case r == commentStart:
- lx.push(lexArrayValueEnd)
- return lexCommentStart
- case r == comma:
- lx.ignore()
- return lexArrayValue // move on to the next value
- case r == arrayEnd:
- return lexArrayEnd
- }
- return lx.errorf(
- "expected a comma or array terminator %q, but got %q instead",
- arrayEnd, r,
- )
-}
-
-// lexArrayEnd finishes the lexing of an array.
-// It assumes that a ']' has just been consumed.
-func lexArrayEnd(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemArrayEnd)
- return lx.pop()
-}
-
-// lexInlineTableValue consumes one key/value pair in an inline table.
-// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
-func lexInlineTableValue(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexInlineTableValue)
- case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
- lx.push(lexInlineTableValue)
- return lexCommentStart
- case r == comma:
- return lx.errorf("unexpected comma")
- case r == inlineTableEnd:
- return lexInlineTableEnd
- }
- lx.backup()
- lx.push(lexInlineTableValueEnd)
- return lexKeyStart
-}
-
-// lexInlineTableValueEnd consumes everything between the end of an inline table
-// key/value pair and the next pair (or the end of the table):
-// it ignores whitespace and expects either a ',' or a '}'.
-func lexInlineTableValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexInlineTableValueEnd)
- case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
- lx.push(lexInlineTableValueEnd)
- return lexCommentStart
- case r == comma:
- lx.ignore()
- return lexInlineTableValue
- case r == inlineTableEnd:
- return lexInlineTableEnd
- }
- return lx.errorf("expected a comma or an inline table terminator %q, "+
- "but got %q instead", inlineTableEnd, r)
-}
-
-// lexInlineTableEnd finishes the lexing of an inline table.
-// It assumes that a '}' has just been consumed.
-func lexInlineTableEnd(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemInlineTableEnd)
- return lx.pop()
-}
-
-// lexString consumes the inner contents of a string. It assumes that the
-// beginning '"' has already been consumed and ignored.
-func lexString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == eof:
- return lx.errorf("unexpected EOF")
- case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == '\\':
- lx.push(lexString)
- return lexStringEscape
- case r == stringEnd:
- lx.backup()
- lx.emit(itemString)
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- return lexString
-}
-
-// lexMultilineString consumes the inner contents of a string. It assumes that
-// the beginning '"""' has already been consumed and ignored.
-func lexMultilineString(lx *lexer) stateFn {
- switch lx.next() {
- case eof:
- return lx.errorf("unexpected EOF")
- case '\\':
- return lexMultilineStringEscape
- case stringEnd:
- if lx.accept(stringEnd) {
- if lx.accept(stringEnd) {
- lx.backup()
- lx.backup()
- lx.backup()
- lx.emit(itemMultilineString)
- lx.next()
- lx.next()
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- lx.backup()
- }
- }
- return lexMultilineString
-}
-
-// lexRawString consumes a raw string. Nothing can be escaped in such a string.
-// It assumes that the beginning "'" has already been consumed and ignored.
-func lexRawString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == eof:
- return lx.errorf("unexpected EOF")
- case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == rawStringEnd:
- lx.backup()
- lx.emit(itemRawString)
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- return lexRawString
-}
-
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning "'''" has already been consumed and
-// ignored.
-func lexMultilineRawString(lx *lexer) stateFn {
- switch lx.next() {
- case eof:
- return lx.errorf("unexpected EOF")
- case rawStringEnd:
- if lx.accept(rawStringEnd) {
- if lx.accept(rawStringEnd) {
- lx.backup()
- lx.backup()
- lx.backup()
- lx.emit(itemRawMultilineString)
- lx.next()
- lx.next()
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- lx.backup()
- }
- }
- return lexMultilineRawString
-}
-
-// lexMultilineStringEscape consumes an escaped character. It assumes that the
-// preceding '\\' has already been consumed.
-func lexMultilineStringEscape(lx *lexer) stateFn {
- // Handle the special case first:
- if isNL(lx.next()) {
- return lexMultilineString
- }
- lx.backup()
- lx.push(lexMultilineString)
- return lexStringEscape(lx)
-}
-
-func lexStringEscape(lx *lexer) stateFn {
- r := lx.next()
- switch r {
- case 'b':
- fallthrough
- case 't':
- fallthrough
- case 'n':
- fallthrough
- case 'f':
- fallthrough
- case 'r':
- fallthrough
- case '"':
- fallthrough
- case '\\':
- return lx.pop()
- case 'u':
- return lexShortUnicodeEscape
- case 'U':
- return lexLongUnicodeEscape
- }
- return lx.errorf("invalid escape character %q; only the following "+
- "escape characters are allowed: "+
- `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
-}
-
-func lexShortUnicodeEscape(lx *lexer) stateFn {
- var r rune
- for i := 0; i < 4; i++ {
- r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected four hexadecimal digits after '\u', `+
- "but got %q instead", lx.current())
- }
- }
- return lx.pop()
-}
-
-func lexLongUnicodeEscape(lx *lexer) stateFn {
- var r rune
- for i := 0; i < 8; i++ {
- r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected eight hexadecimal digits after '\U', `+
- "but got %q instead", lx.current())
- }
- }
- return lx.pop()
-}
-
-// lexNumberOrDateStart consumes either an integer, a float, or datetime.
-func lexNumberOrDateStart(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
- switch r {
- case '_':
- return lexNumber
- case 'e', 'E':
- return lexFloat
- case '.':
- return lx.errorf("floats must start with a digit, not '.'")
- }
- return lx.errorf("expected a digit but got %q", r)
-}
-
-// lexNumberOrDate consumes either an integer, float or datetime.
-func lexNumberOrDate(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
- switch r {
- case '-':
- return lexDatetime
- case '_':
- return lexNumber
- case '.', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexDatetime consumes a Datetime, to a first approximation.
-// The parser validates that it matches one of the accepted formats.
-func lexDatetime(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexDatetime
- }
- switch r {
- case '-', 'T', ':', '.', 'Z':
- return lexDatetime
- }
-
- lx.backup()
- lx.emit(itemDatetime)
- return lx.pop()
-}
-
-// lexNumberStart consumes either an integer or a float. It assumes that a sign
-// has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
- // We MUST see a digit. Even floats have to start with a digit.
- r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("floats must start with a digit, not '.'")
- }
- return lx.errorf("expected a digit but got %q", r)
- }
- return lexNumber
-}
-
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumber
- }
- switch r {
- case '_':
- return lexNumber
- case '.', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexFloat consumes the elements of a float. It allows any sequence of
-// float-like characters, so floats emitted by the lexer are only a first
-// approximation and must be validated by the parser.
-func lexFloat(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexFloat
- }
- switch r {
- case '_', '.', '-', '+', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemFloat)
- return lx.pop()
-}
-
-// lexBool consumes a bool string: 'true' or 'false.
-func lexBool(lx *lexer) stateFn {
- var rs []rune
- for {
- r := lx.next()
- if !unicode.IsLetter(r) {
- lx.backup()
- break
- }
- rs = append(rs, r)
- }
- s := string(rs)
- switch s {
- case "true", "false":
- lx.emit(itemBool)
- return lx.pop()
- }
- return lx.errorf("expected value but found %q instead", s)
-}
-
-// lexCommentStart begins the lexing of a comment. It will emit
-// itemCommentStart and consume no characters, passing control to lexComment.
-func lexCommentStart(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemCommentStart)
- return lexComment
-}
-
-// lexComment lexes an entire comment. It assumes that '#' has been consumed.
-// It will consume *up to* the first newline character, and pass control
-// back to the last state on the stack.
-func lexComment(lx *lexer) stateFn {
- r := lx.peek()
- if isNL(r) || r == eof {
- lx.emit(itemText)
- return lx.pop()
- }
- lx.next()
- return lexComment
-}
-
-// lexSkip ignores all slurped input and moves on to the next state.
-func lexSkip(lx *lexer, nextState stateFn) stateFn {
- return func(lx *lexer) stateFn {
- lx.ignore()
- return nextState
- }
-}
-
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
- return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
- return r == '\n' || r == '\r'
-}
-
-func isDigit(r rune) bool {
- return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') ||
- (r >= 'a' && r <= 'f') ||
- (r >= 'A' && r <= 'F')
-}
-
-func isBareKeyChar(r rune) bool {
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' ||
- r == '-'
-}
-
-func (itype itemType) String() string {
- switch itype {
- case itemError:
- return "Error"
- case itemNIL:
- return "NIL"
- case itemEOF:
- return "EOF"
- case itemText:
- return "Text"
- case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
- return "String"
- case itemBool:
- return "Bool"
- case itemInteger:
- return "Integer"
- case itemFloat:
- return "Float"
- case itemDatetime:
- return "DateTime"
- case itemTableStart:
- return "TableStart"
- case itemTableEnd:
- return "TableEnd"
- case itemKeyStart:
- return "KeyStart"
- case itemArray:
- return "Array"
- case itemArrayEnd:
- return "ArrayEnd"
- case itemCommentStart:
- return "CommentStart"
- }
- panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
-}
-
-func (item item) String() string {
- return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/parse.go
deleted file mode 100644
index 50869ef..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/parse.go
+++ /dev/null
@@ -1,592 +0,0 @@
-package toml
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
- "unicode"
- "unicode/utf8"
-)
-
-type parser struct {
- mapping map[string]interface{}
- types map[string]tomlType
- lx *lexer
-
- // A list of keys in the order that they appear in the TOML data.
- ordered []Key
-
- // the full key for the current hash in scope
- context Key
-
- // the base key name for everything except hashes
- currentKey string
-
- // rough approximation of line number
- approxLine int
-
- // A map of 'key.group.names' to whether they were created implicitly.
- implicits map[string]bool
-}
-
-type parseError string
-
-func (pe parseError) Error() string {
- return string(pe)
-}
-
-func parse(data string) (p *parser, err error) {
- defer func() {
- if r := recover(); r != nil {
- var ok bool
- if err, ok = r.(parseError); ok {
- return
- }
- panic(r)
- }
- }()
-
- p = &parser{
- mapping: make(map[string]interface{}),
- types: make(map[string]tomlType),
- lx: lex(data),
- ordered: make([]Key, 0),
- implicits: make(map[string]bool),
- }
- for {
- item := p.next()
- if item.typ == itemEOF {
- break
- }
- p.topLevel(item)
- }
-
- return p, nil
-}
-
-func (p *parser) panicf(format string, v ...interface{}) {
- msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
- p.approxLine, p.current(), fmt.Sprintf(format, v...))
- panic(parseError(msg))
-}
-
-func (p *parser) next() item {
- it := p.lx.nextItem()
- if it.typ == itemError {
- p.panicf("%s", it.val)
- }
- return it
-}
-
-func (p *parser) bug(format string, v ...interface{}) {
- panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
-}
-
-func (p *parser) expect(typ itemType) item {
- it := p.next()
- p.assertEqual(typ, it.typ)
- return it
-}
-
-func (p *parser) assertEqual(expected, got itemType) {
- if expected != got {
- p.bug("Expected '%s' but got '%s'.", expected, got)
- }
-}
-
-func (p *parser) topLevel(item item) {
- switch item.typ {
- case itemCommentStart:
- p.approxLine = item.line
- p.expect(itemText)
- case itemTableStart:
- kg := p.next()
- p.approxLine = kg.line
-
- var key Key
- for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
- }
- p.assertEqual(itemTableEnd, kg.typ)
-
- p.establishContext(key, false)
- p.setType("", tomlHash)
- p.ordered = append(p.ordered, key)
- case itemArrayTableStart:
- kg := p.next()
- p.approxLine = kg.line
-
- var key Key
- for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
- }
- p.assertEqual(itemArrayTableEnd, kg.typ)
-
- p.establishContext(key, true)
- p.setType("", tomlArrayHash)
- p.ordered = append(p.ordered, key)
- case itemKeyStart:
- kname := p.next()
- p.approxLine = kname.line
- p.currentKey = p.keyString(kname)
-
- val, typ := p.value(p.next())
- p.setValue(p.currentKey, val)
- p.setType(p.currentKey, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- p.currentKey = ""
- default:
- p.bug("Unexpected type at top level: %s", item.typ)
- }
-}
-
-// Gets a string for a key (or part of a key in a table name).
-func (p *parser) keyString(it item) string {
- switch it.typ {
- case itemText:
- return it.val
- case itemString, itemMultilineString,
- itemRawString, itemRawMultilineString:
- s, _ := p.value(it)
- return s.(string)
- default:
- p.bug("Unexpected key type: %s", it.typ)
- panic("unreachable")
- }
-}
-
-// value translates an expected value from the lexer into a Go value wrapped
-// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
- switch it.typ {
- case itemString:
- return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
- case itemMultilineString:
- trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
- return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
- case itemRawString:
- return it.val, p.typeOfPrimitive(it)
- case itemRawMultilineString:
- return stripFirstNewline(it.val), p.typeOfPrimitive(it)
- case itemBool:
- switch it.val {
- case "true":
- return true, p.typeOfPrimitive(it)
- case "false":
- return false, p.typeOfPrimitive(it)
- }
- p.bug("Expected boolean value, but got '%s'.", it.val)
- case itemInteger:
- if !numUnderscoresOK(it.val) {
- p.panicf("Invalid integer %q: underscores must be surrounded by digits",
- it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- // Distinguish integer values. Normally, it'd be a bug if the lexer
- // provides an invalid integer, but it's possible that the number is
- // out of range of valid values (which the lexer cannot determine).
- // So mark the former as a bug but the latter as a legitimate user
- // error.
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Integer '%s' is out of the range of 64-bit "+
- "signed integers.", it.val)
- } else {
- p.bug("Expected integer value, but got '%s'.", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemFloat:
- parts := strings.FieldsFunc(it.val, func(r rune) bool {
- switch r {
- case '.', 'e', 'E':
- return true
- }
- return false
- })
- for _, part := range parts {
- if !numUnderscoresOK(part) {
- p.panicf("Invalid float %q: underscores must be "+
- "surrounded by digits", it.val)
- }
- }
- if !numPeriodsOK(it.val) {
- // As a special case, numbers like '123.' or '1.e2',
- // which are valid as far as Go/strconv are concerned,
- // must be rejected because TOML says that a fractional
- // part consists of '.' followed by 1+ digits.
- p.panicf("Invalid float %q: '.' must be followed "+
- "by one or more digits", it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseFloat(val, 64)
- if err != nil {
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Float '%s' is out of the range of 64-bit "+
- "IEEE-754 floating-point numbers.", it.val)
- } else {
- p.panicf("Invalid float value: %q", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemDatetime:
- var t time.Time
- var ok bool
- var err error
- for _, format := range []string{
- "2006-01-02T15:04:05Z07:00",
- "2006-01-02T15:04:05",
- "2006-01-02",
- } {
- t, err = time.ParseInLocation(format, it.val, time.Local)
- if err == nil {
- ok = true
- break
- }
- }
- if !ok {
- p.panicf("Invalid TOML Datetime: %q.", it.val)
- }
- return t, p.typeOfPrimitive(it)
- case itemArray:
- array := make([]interface{}, 0)
- types := make([]tomlType, 0)
-
- for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
-
- val, typ := p.value(it)
- array = append(array, val)
- types = append(types, typ)
- }
- return array, p.typeOfArray(types)
- case itemInlineTableStart:
- var (
- hash = make(map[string]interface{})
- outerContext = p.context
- outerKey = p.currentKey
- )
-
- p.context = append(p.context, p.currentKey)
- p.currentKey = ""
- for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
- if it.typ != itemKeyStart {
- p.bug("Expected key start but instead found %q, around line %d",
- it.val, p.approxLine)
- }
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
-
- // retrieve key
- k := p.next()
- p.approxLine = k.line
- kname := p.keyString(k)
-
- // retrieve value
- p.currentKey = kname
- val, typ := p.value(p.next())
- // make sure we keep metadata up to date
- p.setType(kname, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- hash[kname] = val
- }
- p.context = outerContext
- p.currentKey = outerKey
- return hash, tomlHash
- }
- p.bug("Unexpected value type: %s", it.typ)
- panic("unreachable")
-}
-
-// numUnderscoresOK checks whether each underscore in s is surrounded by
-// characters that are not underscores.
-func numUnderscoresOK(s string) bool {
- accept := false
- for _, r := range s {
- if r == '_' {
- if !accept {
- return false
- }
- accept = false
- continue
- }
- accept = true
- }
- return accept
-}
-
-// numPeriodsOK checks whether every period in s is followed by a digit.
-func numPeriodsOK(s string) bool {
- period := false
- for _, r := range s {
- if period && !isDigit(r) {
- return false
- }
- period = r == '.'
- }
- return !period
-}
-
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
-//
-// Establishing the context also makes sure that the key isn't a duplicate, and
-// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
- var ok bool
-
- // Always start at the top level and drill down for our context.
- hashContext := p.mapping
- keyContext := make(Key, 0)
-
- // We only need implicit hashes for key[0:-1]
- for _, k := range key[0 : len(key)-1] {
- _, ok = hashContext[k]
- keyContext = append(keyContext, k)
-
- // No key? Make an implicit hash and move on.
- if !ok {
- p.addImplicit(keyContext)
- hashContext[k] = make(map[string]interface{})
- }
-
- // If the hash context is actually an array of tables, then set
- // the hash context to the last element in that array.
- //
- // Otherwise, it better be a table, since this MUST be a key group (by
- // virtue of it not being the last element in a key).
- switch t := hashContext[k].(type) {
- case []map[string]interface{}:
- hashContext = t[len(t)-1]
- case map[string]interface{}:
- hashContext = t
- default:
- p.panicf("Key '%s' was already created as a hash.", keyContext)
- }
- }
-
- p.context = keyContext
- if array {
- // If this is the first element for this array, then allocate a new
- // list of tables for it.
- k := key[len(key)-1]
- if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 5)
- }
-
- // Add a new table. But make sure the key hasn't already been used
- // for something else.
- if hash, ok := hashContext[k].([]map[string]interface{}); ok {
- hashContext[k] = append(hash, make(map[string]interface{}))
- } else {
- p.panicf("Key '%s' was already created and cannot be used as "+
- "an array.", keyContext)
- }
- } else {
- p.setValue(key[len(key)-1], make(map[string]interface{}))
- }
- p.context = append(p.context, key[len(key)-1])
-}
-
-// setValue sets the given key to the given value in the current context.
-// It will make sure that the key hasn't already been defined, account for
-// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
- var tmpHash interface{}
- var ok bool
-
- hash := p.mapping
- keyContext := make(Key, 0)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- if tmpHash, ok = hash[k]; !ok {
- p.bug("Context for key '%s' has not been established.", keyContext)
- }
- switch t := tmpHash.(type) {
- case []map[string]interface{}:
- // The context is a table of hashes. Pick the most recent table
- // defined as the current hash.
- hash = t[len(t)-1]
- case map[string]interface{}:
- hash = t
- default:
- p.bug("Expected hash to have type 'map[string]interface{}', but "+
- "it has '%T' instead.", tmpHash)
- }
- }
- keyContext = append(keyContext, key)
-
- if _, ok := hash[key]; ok {
- // Typically, if the given key has already been set, then we have
- // to raise an error since duplicate keys are disallowed. However,
- // it's possible that a key was previously defined implicitly. In this
- // case, it is allowed to be redefined concretely. (See the
- // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
- //
- // But we have to make sure to stop marking it as an implicit. (So that
- // another redefinition provokes an error.)
- //
- // Note that since it has already been defined (as a hash), we don't
- // want to overwrite it. So our business is done.
- if p.isImplicit(keyContext) {
- p.removeImplicit(keyContext)
- return
- }
-
- // Otherwise, we have a concrete key trying to override a previous
- // key, which is *always* wrong.
- p.panicf("Key '%s' has already been defined.", keyContext)
- }
- hash[key] = value
-}
-
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
-//
-// Note that if `key` is empty, then the type given will be applied to the
-// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
- keyContext := make(Key, 0, len(p.context)+1)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- }
- if len(key) > 0 { // allow type setting for hashes
- keyContext = append(keyContext, key)
- }
- p.types[keyContext.String()] = typ
-}
-
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
- p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
- p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
- return p.implicits[key.String()]
-}
-
-// current returns the full key name of the current context.
-func (p *parser) current() string {
- if len(p.currentKey) == 0 {
- return p.context.String()
- }
- if len(p.context) == 0 {
- return p.currentKey
- }
- return fmt.Sprintf("%s.%s", p.context, p.currentKey)
-}
-
-func stripFirstNewline(s string) string {
- if len(s) == 0 || s[0] != '\n' {
- return s
- }
- return s[1:]
-}
-
-func stripEscapedWhitespace(s string) string {
- esc := strings.Split(s, "\\\n")
- if len(esc) > 1 {
- for i := 1; i < len(esc); i++ {
- esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
- }
- }
- return strings.Join(esc, "")
-}
-
-func (p *parser) replaceEscapes(str string) string {
- var replaced []rune
- s := []byte(str)
- r := 0
- for r < len(s) {
- if s[r] != '\\' {
- c, size := utf8.DecodeRune(s[r:])
- r += size
- replaced = append(replaced, c)
- continue
- }
- r += 1
- if r >= len(s) {
- p.bug("Escape sequence at end of string.")
- return ""
- }
- switch s[r] {
- default:
- p.bug("Expected valid escape code after \\, but got %q.", s[r])
- return ""
- case 'b':
- replaced = append(replaced, rune(0x0008))
- r += 1
- case 't':
- replaced = append(replaced, rune(0x0009))
- r += 1
- case 'n':
- replaced = append(replaced, rune(0x000A))
- r += 1
- case 'f':
- replaced = append(replaced, rune(0x000C))
- r += 1
- case 'r':
- replaced = append(replaced, rune(0x000D))
- r += 1
- case '"':
- replaced = append(replaced, rune(0x0022))
- r += 1
- case '\\':
- replaced = append(replaced, rune(0x005C))
- r += 1
- case 'u':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+5). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
- replaced = append(replaced, escaped)
- r += 5
- case 'U':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+9). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
- replaced = append(replaced, escaped)
- r += 9
- }
- }
- return string(replaced)
-}
-
-func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
- s := string(bs)
- hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
- if err != nil {
- p.bug("Could not parse '%s' as a hexadecimal number, but the "+
- "lexer claims it's OK: %s", s, err)
- }
- if !utf8.ValidRune(rune(hex)) {
- p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
- }
- return rune(hex)
-}
-
-func isStringType(ty itemType) bool {
- return ty == itemString || ty == itemMultilineString ||
- ty == itemRawString || ty == itemRawMultilineString
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/type_check.go
deleted file mode 100644
index c73f8af..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/type_check.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package toml
-
-// tomlType represents any Go type that corresponds to a TOML type.
-// While the first draft of the TOML spec has a simplistic type system that
-// probably doesn't need this level of sophistication, we seem to be militating
-// toward adding real composite types.
-type tomlType interface {
- typeString() string
-}
-
-// typeEqual accepts any two types and returns true if they are equal.
-func typeEqual(t1, t2 tomlType) bool {
- if t1 == nil || t2 == nil {
- return false
- }
- return t1.typeString() == t2.typeString()
-}
-
-func typeIsHash(t tomlType) bool {
- return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
-}
-
-type tomlBaseType string
-
-func (btype tomlBaseType) typeString() string {
- return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
- return btype.typeString()
-}
-
-var (
- tomlInteger tomlBaseType = "Integer"
- tomlFloat tomlBaseType = "Float"
- tomlDatetime tomlBaseType = "Datetime"
- tomlString tomlBaseType = "String"
- tomlBool tomlBaseType = "Bool"
- tomlArray tomlBaseType = "Array"
- tomlHash tomlBaseType = "Hash"
- tomlArrayHash tomlBaseType = "ArrayHash"
-)
-
-// typeOfPrimitive returns a tomlType of any primitive value in TOML.
-// Primitive values are: Integer, Float, Datetime, String and Bool.
-//
-// Passing a lexer item other than the following will cause a BUG message
-// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
-func (p *parser) typeOfPrimitive(lexItem item) tomlType {
- switch lexItem.typ {
- case itemInteger:
- return tomlInteger
- case itemFloat:
- return tomlFloat
- case itemDatetime:
- return tomlDatetime
- case itemString:
- return tomlString
- case itemMultilineString:
- return tomlString
- case itemRawString:
- return tomlString
- case itemRawMultilineString:
- return tomlString
- case itemBool:
- return tomlBool
- }
- p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
- panic("unreachable")
-}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
- // Empty arrays are cool.
- if len(types) == 0 {
- return tomlArray
- }
-
- theType := types[0]
- for _, t := range types[1:] {
- if !typeEqual(theType, t) {
- p.panicf("Array contains values of type '%s' and '%s', but "+
- "arrays must be homogeneous.", theType, t)
- }
- }
- return tomlArray
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/type_fields.go
deleted file mode 100644
index 608997c..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/BurntSushi/toml/type_fields.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package toml
-
-// Struct field handling is adapted from code in encoding/json:
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the Go distribution.
-
-import (
- "reflect"
- "sort"
- "sync"
-)
-
-// A field represents a single field found in a struct.
-type field struct {
- name string // the name of the field (`toml` tag included)
- tag bool // whether field has a `toml` tag
- index []int // represents the depth of an anonymous field
- typ reflect.Type // the type of the field
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from toml tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
- if x[i].name != x[j].name {
- return x[i].name < x[j].name
- }
- if len(x[i].index) != len(x[j].index) {
- return len(x[i].index) < len(x[j].index)
- }
- if x[i].tag != x[j].tag {
- return x[i].tag
- }
- return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
- for k, xik := range x[i].index {
- if k >= len(x[j].index) {
- return false
- }
- if xik != x[j].index[k] {
- return xik < x[j].index[k]
- }
- }
- return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that TOML should recognize for the given
-// type. The algorithm is breadth-first search over the set of structs to
-// include - the top struct and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
- // Anonymous fields to explore at the current level and the next.
- current := []field{}
- next := []field{{typ: t}}
-
- // Count of queued names for current level and the next.
- count := map[reflect.Type]int{}
- nextCount := map[reflect.Type]int{}
-
- // Types already visited at an earlier level.
- visited := map[reflect.Type]bool{}
-
- // Fields found.
- var fields []field
-
- for len(next) > 0 {
- current, next = next, current[:0]
- count, nextCount = nextCount, map[reflect.Type]int{}
-
- for _, f := range current {
- if visited[f.typ] {
- continue
- }
- visited[f.typ] = true
-
- // Scan f.typ for fields to include.
- for i := 0; i < f.typ.NumField(); i++ {
- sf := f.typ.Field(i)
- if sf.PkgPath != "" && !sf.Anonymous { // unexported
- continue
- }
- opts := getOptions(sf.Tag)
- if opts.skip {
- continue
- }
- index := make([]int, len(f.index)+1)
- copy(index, f.index)
- index[len(f.index)] = i
-
- ft := sf.Type
- if ft.Name() == "" && ft.Kind() == reflect.Ptr {
- // Follow pointer.
- ft = ft.Elem()
- }
-
- // Record found field and index sequence.
- if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
- tagged := opts.name != ""
- name := opts.name
- if name == "" {
- name = sf.Name
- }
- fields = append(fields, field{name, tagged, index, ft})
- if count[f.typ] > 1 {
- // If there were multiple instances, add a second,
- // so that the annihilation code will see a duplicate.
- // It only cares about the distinction between 1 or 2,
- // so don't bother generating any more copies.
- fields = append(fields, fields[len(fields)-1])
- }
- continue
- }
-
- // Record new anonymous struct to explore in next round.
- nextCount[ft]++
- if nextCount[ft] == 1 {
- f := field{name: ft.Name(), index: index, typ: ft}
- next = append(next, f)
- }
- }
- }
- }
-
- sort.Sort(byName(fields))
-
- // Delete all fields that are hidden by the Go rules for embedded fields,
- // except that fields with TOML tags are promoted.
-
- // The fields are sorted in primary order of name, secondary order
- // of field index length. Loop over names; for each name, delete
- // hidden fields by choosing the one dominant field that survives.
- out := fields[:0]
- for advance, i := 0, 0; i < len(fields); i += advance {
- // One iteration per name.
- // Find the sequence of fields with the name of this first field.
- fi := fields[i]
- name := fi.name
- for advance = 1; i+advance < len(fields); advance++ {
- fj := fields[i+advance]
- if fj.name != name {
- break
- }
- }
- if advance == 1 { // Only one field with this name
- out = append(out, fi)
- continue
- }
- dominant, ok := dominantField(fields[i : i+advance])
- if ok {
- out = append(out, dominant)
- }
- }
-
- fields = out
- sort.Sort(byIndex(fields))
-
- return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// TOML tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
- // The fields are sorted in increasing index-length order. The winner
- // must therefore be one with the shortest index length. Drop all
- // longer entries, which is easy: just truncate the slice.
- length := len(fields[0].index)
- tagged := -1 // Index of first tagged field.
- for i, f := range fields {
- if len(f.index) > length {
- fields = fields[:i]
- break
- }
- if f.tag {
- if tagged >= 0 {
- // Multiple tagged fields at the same level: conflict.
- // Return no field.
- return field{}, false
- }
- tagged = i
- }
- }
- if tagged >= 0 {
- return fields[tagged], true
- }
- // All remaining fields have the same length. If there's more than one,
- // we have a conflict (two fields named "X" at the same level) and we
- // return no field.
- if len(fields) > 1 {
- return field{}, false
- }
- return fields[0], true
-}
-
-var fieldCache struct {
- sync.RWMutex
- m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
- fieldCache.RLock()
- f := fieldCache.m[t]
- fieldCache.RUnlock()
- if f != nil {
- return f
- }
-
- // Compute fields without lock.
- // Might duplicate effort but won't hold other computations back.
- f = typeFields(t)
- if f == nil {
- f = []field{}
- }
-
- fieldCache.Lock()
- if fieldCache.m == nil {
- fieldCache.m = map[reflect.Type][]field{}
- }
- fieldCache.m[t] = f
- fieldCache.Unlock()
- return f
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index d9771f1..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,21 +0,0 @@
-### Issue description
-Tell us what should happen and what happens instead
-
-### Example code
-```go
-If possible, please enter some example code here to reproduce the issue.
-```
-
-### Error log
-```
-If you have an error log, please paste it here.
-```
-
-### Configuration
-*Driver version (or git SHA):*
-
-*Go version:* run `go version` in your console
-
-*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
-
-*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 6f5c7eb..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-### Description
-Please explain the changes you made here.
-
-### Checklist
-- [ ] Code compiles correctly
-- [ ] Created tests which fail without the change (if possible)
-- [ ] All tests passing
-- [ ] Extended the README / documentation, if necessary
-- [ ] Added myself / the copyright holder to the AUTHORS file
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.gitignore
deleted file mode 100644
index 2de28da..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-.DS_Store
-.DS_Store?
-._*
-.Spotlight-V100
-.Trashes
-Icon?
-ehthumbs.db
-Thumbs.db
-.idea
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis.yml
deleted file mode 100644
index eae311b..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis.yml
+++ /dev/null
@@ -1,128 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.9.x
- - 1.10.x
- - 1.11.x
- - 1.12.x
- - master
-
-before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
-
-before_script:
- - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
- - sudo service mysql restart
- - .travis/wait_mysql.sh
- - mysql -e 'create database gotest;'
-
-matrix:
- include:
- - env: DB=MYSQL8
- sudo: required
- dist: trusty
- go: 1.10.x
- services:
- - docker
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- - docker pull mysql:8.0
- - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
- mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- - cp .travis/docker.cnf ~/.my.cnf
- - .travis/wait_mysql.sh
- before_script:
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3307
- - export MYSQL_TEST_CONCURRENT=1
-
- - env: DB=MYSQL57
- sudo: required
- dist: trusty
- go: 1.10.x
- services:
- - docker
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- - docker pull mysql:5.7
- - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
- mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- - cp .travis/docker.cnf ~/.my.cnf
- - .travis/wait_mysql.sh
- before_script:
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3307
- - export MYSQL_TEST_CONCURRENT=1
-
- - env: DB=MARIA55
- sudo: required
- dist: trusty
- go: 1.10.x
- services:
- - docker
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- - docker pull mariadb:5.5
- - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
- mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- - cp .travis/docker.cnf ~/.my.cnf
- - .travis/wait_mysql.sh
- before_script:
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3307
- - export MYSQL_TEST_CONCURRENT=1
-
- - env: DB=MARIA10_1
- sudo: required
- dist: trusty
- go: 1.10.x
- services:
- - docker
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- - docker pull mariadb:10.1
- - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
- mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- - cp .travis/docker.cnf ~/.my.cnf
- - .travis/wait_mysql.sh
- before_script:
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3307
- - export MYSQL_TEST_CONCURRENT=1
-
- - os: osx
- osx_image: xcode10.1
- addons:
- homebrew:
- packages:
- - mysql
- go: 1.12.x
- before_install:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
- before_script:
- - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB\nlocal_infile=1" >> /usr/local/etc/my.cnf
- - mysql.server start
- - mysql -uroot -e 'CREATE USER gotest IDENTIFIED BY "secret"'
- - mysql -uroot -e 'GRANT ALL ON *.* TO gotest'
- - mysql -uroot -e 'create database gotest;'
- - export MYSQL_TEST_USER=gotest
- - export MYSQL_TEST_PASS=secret
- - export MYSQL_TEST_ADDR=127.0.0.1:3306
- - export MYSQL_TEST_CONCURRENT=1
-
-script:
- - go test -v -covermode=count -coverprofile=coverage.out
- - go vet ./...
- - .travis/gofmt.sh
-after_script:
- - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
deleted file mode 100644
index e57754e..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
+++ /dev/null
@@ -1,5 +0,0 @@
-[client]
-user = gotest
-password = secret
-host = 127.0.0.1
-port = 3307
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
deleted file mode 100755
index 9bf0d16..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-set -ev
-
-# Only check for go1.10+ since the gofmt style changed
-if [[ $(go version) =~ go1\.([0-9]+) ]] && ((${BASH_REMATCH[1]} >= 10)); then
- test -z "$(gofmt -d -s . | tee /dev/stderr)"
-fi
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
deleted file mode 100755
index e87993e..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-while :
-do
- if mysql -e 'select version()' 2>&1 | grep 'version()\|ERROR 2059 (HY000):'; then
- break
- fi
- sleep 3
-done
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/AUTHORS
deleted file mode 100644
index bfe74c4..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ /dev/null
@@ -1,101 +0,0 @@
-# This is the official list of Go-MySQL-Driver authors for copyright purposes.
-
-# If you are submitting a patch, please add your name or the name of the
-# organization which holds the copyright to this list in alphabetical order.
-
-# Names should be added to this file as
-# Name
-# The email address is not required for organizations.
-# Please keep the list sorted.
-
-
-# Individual Persons
-
-Aaron Hopkins
-Achille Roussel
-Alexey Palazhchenko
-Andrew Reid
-Arne Hormann
-Asta Xie
-Bulat Gaifullin
-Carlos Nieto
-Chris Moos
-Craig Wilson
-Daniel Montoya
-Daniel Nichter
-Daniël van Eeden
-Dave Protasowski
-DisposaBoy
-Egor Smolyakov
-Erwan Martin
-Evan Shaw
-Frederick Mayle
-Gustavo Kristic
-Hajime Nakagami
-Hanno Braun
-Henri Yandell
-Hirotaka Yamamoto
-Huyiguang
-ICHINOSE Shogo
-Ilia Cimpoes
-INADA Naoki
-Jacek Szwec
-James Harr
-Jeff Hodges
-Jeffrey Charles
-Jerome Meyer
-Jian Zhen
-Joshua Prunier
-Julien Lefevre
-Julien Schmidt
-Justin Li
-Justin Nuß
-Kamil Dziedzic
-Kevin Malachowski
-Kieron Woodhouse
-Lennart Rudolph
-Leonardo YongUk Kim
-Linh Tran Tuan
-Lion Yang
-Luca Looz
-Lucas Liu
-Luke Scott
-Maciej Zimnoch
-Michael Woolnough
-Nicola Peduzzi
-Olivier Mengué
-oscarzhao
-Paul Bonser
-Peter Schultz
-Rebecca Chin
-Reed Allman
-Richard Wilkes
-Robert Russell
-Runrioter Wung
-Shuode Li
-Simon J Mudd
-Soroush Pour
-Stan Putrya
-Stanley Gunawan
-Steven Hartland
-Thomas Wodarek
-Tim Ruffles
-Tom Jenkinson
-Xiangyu Hu
-Xiaobing Jiang
-Xiuming Chen
-Zhenye Xie
-
-# Organizations
-
-Barracuda Networks, Inc.
-Counting Ltd.
-Facebook Inc.
-GitHub Inc.
-Google Inc.
-InfoSum Ltd.
-Keybase Inc.
-Multiplay Ltd.
-Percona LLC
-Pivotal Inc.
-Stripe Inc.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
deleted file mode 100644
index 2d87d74..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ /dev/null
@@ -1,167 +0,0 @@
-## Version 1.4 (2018-06-03)
-
-Changes:
-
- - Documentation fixes (#530, #535, #567)
- - Refactoring (#575, #579, #580, #581, #603, #615, #704)
- - Cache column names (#444)
- - Sort the DSN parameters in DSNs generated from a config (#637)
- - Allow native password authentication by default (#644)
- - Use the default port if it is missing in the DSN (#668)
- - Removed the `strict` mode (#676)
- - Do not query `max_allowed_packet` by default (#680)
- - Dropped support Go 1.6 and lower (#696)
- - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
- - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
- - Improved the compatibility of the authentication system (#807)
-
-New Features:
-
- - Multi-Results support (#537)
- - `rejectReadOnly` DSN option (#604)
- - `context.Context` support (#608, #612, #627, #761)
- - Transaction isolation level support (#619, #744)
- - Read-Only transactions support (#618, #634)
- - `NewConfig` function which initializes a config with default values (#679)
- - Implemented the `ColumnType` interfaces (#667, #724)
- - Support for custom string types in `ConvertValue` (#623)
- - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
- - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
- - Implemented `driver.SessionResetter` (#779)
- - `sha256_password` authentication plugin support (#808)
-
-Bugfixes:
-
- - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
- - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
- - Removed columns definition cache since it sometimes cached invalid data (#592)
- - Don't mutate registered TLS configs (#600)
- - Make RegisterTLSConfig concurrency-safe (#613)
- - Handle missing auth data in the handshake packet correctly (#646)
- - Do not retry queries when data was written to avoid data corruption (#302, #736)
- - Cache the connection pointer for error handling before invalidating it (#678)
- - Fixed imports for appengine/cloudsql (#700)
- - Fix sending STMT_LONG_DATA for 0 byte data (#734)
- - Set correct capacity for []bytes read from length-encoded strings (#766)
- - Make RegisterDial concurrency-safe (#773)
-
-
-## Version 1.3 (2016-12-01)
-
-Changes:
-
- - Go 1.1 is no longer supported
- - Use decimals fields in MySQL to format time types (#249)
- - Buffer optimizations (#269)
- - TLS ServerName defaults to the host (#283)
- - Refactoring (#400, #410, #437)
- - Adjusted documentation for second generation CloudSQL (#485)
- - Documented DSN system var quoting rules (#502)
- - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
-
-New Features:
-
- - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- - Support for returning table alias on Columns() (#289, #359, #382)
- - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- - Support for uint64 parameters with high bit set (#332, #345)
- - Cleartext authentication plugin support (#327)
- - Exported ParseDSN function and the Config struct (#403, #419, #429)
- - Read / Write timeouts (#401)
- - Support for JSON field type (#414)
- - Support for multi-statements and multi-results (#411, #431)
- - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
- - Native password authentication plugin support (#494, #524)
-
-Bugfixes:
-
- - Fixed handling of queries without columns and rows (#255)
- - Fixed a panic when SetKeepAlive() failed (#298)
- - Handle ERR packets while reading rows (#321)
- - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
- - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
- - Actually zero out bytes in handshake response (#378)
- - Fixed race condition in registering LOAD DATA INFILE handler (#383)
- - Fixed tests with MySQL 5.7.9+ (#380)
- - QueryUnescape TLS config names (#397)
- - Fixed "broken pipe" error by writing to closed socket (#390)
- - Fixed LOAD LOCAL DATA INFILE buffering (#424)
- - Fixed parsing of floats into float64 when placeholders are used (#434)
- - Fixed DSN tests with Go 1.7+ (#459)
- - Handle ERR packets while waiting for EOF (#473)
- - Invalidate connection on error while discarding additional results (#513)
- - Allow terminating packets of length 0 (#516)
-
-
-## Version 1.2 (2014-06-03)
-
-Changes:
-
- - We switched back to a "rolling release". `go get` installs the current master branch again
- - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
- - Exported errors to allow easy checking from application code
- - Enabled TCP Keepalives on TCP connections
- - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
- - The DSN parser also checks for a missing separating slash
- - Faster binary date / datetime to string formatting
- - Also exported the MySQLWarning type
- - mysqlConn.Close returns the first error encountered instead of ignoring all errors
- - writePacket() automatically writes the packet size to the header
- - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
-
-New Features:
-
- - `RegisterDial` allows the usage of a custom dial function to establish the network connection
- - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
- - Logging of critical errors is configurable with `SetLogger`
- - Google CloudSQL support
-
-Bugfixes:
-
- - Allow more than 32 parameters in prepared statements
- - Various old_password fixes
- - Fixed TestConcurrent test to pass Go's race detection
- - Fixed appendLengthEncodedInteger for large numbers
- - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
-
-
-## Version 1.1 (2013-11-02)
-
-Changes:
-
- - Go-MySQL-Driver now requires Go 1.1
- - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
- - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
- - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
- - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
- - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
- - Optimized the buffer for reading
- - stmt.Query now caches column metadata
- - New Logo
- - Changed the copyright header to include all contributors
- - Improved the LOAD INFILE documentation
- - The driver struct is now exported to make the driver directly accessible
- - Refactored the driver tests
- - Added more benchmarks and moved all to a separate file
- - Other small refactoring
-
-New Features:
-
- - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
- - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
- - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
-
-Bugfixes:
-
- - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- - Convert to DB timezone when inserting `time.Time`
- - Splitted packets (more than 16MB) are now merged correctly
- - Fixed false positive `io.EOF` errors when the data was fully read
- - Avoid panics on reuse of closed connections
- - Fixed empty string producing false nil values
- - Fixed sign byte for positive TIME fields
-
-
-## Version 1.0 (2013-05-14)
-
-Initial Release
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
deleted file mode 100644
index 8fe16bc..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Contributing Guidelines
-
-## Reporting Issues
-
-Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
-
-## Contributing Code
-
-By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
-Don't forget to add yourself to the AUTHORS file.
-
-### Code Review
-
-Everyone is invited to review and comment on pull requests.
-If it looks fine to you, comment with "LGTM" (Looks good to me).
-
-If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
-
-Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
-
-## Development Ideas
-
-If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/LICENSE
deleted file mode 100644
index 14e2f77..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/LICENSE
+++ /dev/null
@@ -1,373 +0,0 @@
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
- means each individual or legal entity that creates, contributes to
- the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
- means the combination of the Contributions of others (if any) used
- by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
- means Source Code Form to which the initial Contributor has attached
- the notice in Exhibit A, the Executable Form of such Source Code
- Form, and Modifications of such Source Code Form, in each case
- including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- (a) that the initial Contributor has attached the notice described
- in Exhibit B to the Covered Software; or
-
- (b) that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the
- terms of a Secondary License.
-
-1.6. "Executable Form"
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
- a separate file or files, that is not Covered Software.
-
-1.8. "License"
- means this document.
-
-1.9. "Licensable"
- means having the right to grant, to the maximum extent possible,
- whether at the time of the initial grant or subsequently, any and
- all of the rights conveyed by this License.
-
-1.10. "Modifications"
- means any of the following:
-
- (a) any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered
- Software; or
-
- (b) any new file in Source Code Form that contains any Covered
- Software.
-
-1.11. "Patent Claims" of a Contributor
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the
- License, by the making, using, selling, offering for sale, having
- made, import, or transfer of either its Contributions or its
- Contributor Version.
-
-1.12. "Secondary License"
- means either the GNU General Public License, Version 2.0, the GNU
- Lesser General Public License, Version 2.1, the GNU Affero General
- Public License, Version 3.0, or any later versions of those
- licenses.
-
-1.13. "Source Code Form"
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that
- controls, is controlled by, or is under common control with You. For
- purposes of this definition, "control" means (a) the power, direct
- or indirect, to cause the direction or management of such entity,
- whether by contract or otherwise, or (b) ownership of more than
- fifty percent (50%) of the outstanding shares or beneficial
- ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
- for sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
- or
-
-(b) for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
- Form, as described in Section 3.1, and You must inform recipients of
- the Executable Form how they can obtain a copy of such Source Code
- Form by reasonable means in a timely manner, at a charge no more
- than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter
- the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-* *
-* 6. Disclaimer of Warranty *
-* ------------------------- *
-* *
-* Covered Software is provided under this License on an "as is" *
-* basis, without warranty of any kind, either expressed, implied, or *
-* statutory, including, without limitation, warranties that the *
-* Covered Software is free of defects, merchantable, fit for a *
-* particular purpose or non-infringing. The entire risk as to the *
-* quality and performance of the Covered Software is with You. *
-* Should any Covered Software prove defective in any respect, You *
-* (not any Contributor) assume the cost of any necessary servicing, *
-* repair, or correction. This disclaimer of warranty constitutes an *
-* essential part of this License. No use of any Covered Software is *
-* authorized under this License except under this disclaimer. *
-* *
-************************************************************************
-
-************************************************************************
-* *
-* 7. Limitation of Liability *
-* -------------------------- *
-* *
-* Under no circumstances and under no legal theory, whether tort *
-* (including negligence), contract, or otherwise, shall any *
-* Contributor, or anyone who distributes Covered Software as *
-* permitted above, be liable to You for any direct, indirect, *
-* special, incidental, or consequential damages of any character *
-* including, without limitation, damages for lost profits, loss of *
-* goodwill, work stoppage, computer failure or malfunction, or any *
-* and all other commercial damages or losses, even if such party *
-* shall have been informed of the possibility of such damages. This *
-* limitation of liability shall not apply to liability for death or *
-* personal injury resulting from such party's negligence to the *
-* extent applicable law prohibits such limitation. Some *
-* jurisdictions do not allow the exclusion or limitation of *
-* incidental or consequential damages, so this exclusion and *
-* limitation may not apply to You. *
-* *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
- This Source Code Form is "Incompatible With Secondary Licenses", as
- defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/README.md
deleted file mode 100644
index c6adf1d..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/README.md
+++ /dev/null
@@ -1,495 +0,0 @@
-# Go-MySQL-Driver
-
-A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
-
-![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
-
----------------------------------------
- * [Features](#features)
- * [Requirements](#requirements)
- * [Installation](#installation)
- * [Usage](#usage)
- * [DSN (Data Source Name)](#dsn-data-source-name)
- * [Password](#password)
- * [Protocol](#protocol)
- * [Address](#address)
- * [Parameters](#parameters)
- * [Examples](#examples)
- * [Connection pool and timeouts](#connection-pool-and-timeouts)
- * [context.Context Support](#contextcontext-support)
- * [ColumnType Support](#columntype-support)
- * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
- * [time.Time support](#timetime-support)
- * [Unicode support](#unicode-support)
- * [Testing / Development](#testing--development)
- * [License](#license)
-
----------------------------------------
-
-## Features
- * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
- * Native Go implementation. No C-bindings, just pure Go
- * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
- * Automatic handling of broken connections
- * Automatic Connection Pooling *(by database/sql package)*
- * Supports queries larger than 16MB
- * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
- * Intelligent `LONG DATA` handling in prepared statements
- * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
- * Optional `time.Time` parsing
- * Optional placeholder interpolation
-
-## Requirements
- * Go 1.9 or higher. We aim to support the 3 latest versions of Go.
- * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
-
----------------------------------------
-
-## Installation
-Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
-```bash
-$ go get -u github.com/go-sql-driver/mysql
-```
-Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
-
-## Usage
-_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
-
-Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
-```go
-import "database/sql"
-import _ "github.com/go-sql-driver/mysql"
-
-db, err := sql.Open("mysql", "user:password@/dbname")
-```
-
-[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
-
-
-### DSN (Data Source Name)
-
-The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
-```
-[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
-```
-
-A DSN in its fullest form:
-```
-username:password@protocol(address)/dbname?param=value
-```
-
-Except for the databasename, all values are optional. So the minimal DSN is:
-```
-/dbname
-```
-
-If you do not want to preselect a database, leave `dbname` empty:
-```
-/
-```
-This has the same effect as an empty DSN string:
-```
-
-```
-
-Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
-
-#### Password
-Passwords can consist of any character. Escaping is **not** necessary.
-
-#### Protocol
-See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
-In general you should use an Unix domain socket if available and TCP otherwise for best performance.
-
-#### Address
-For TCP and UDP networks, addresses have the form `host[:port]`.
-If `port` is omitted, the default port will be used.
-If `host` is a literal IPv6 address, it must be enclosed in square brackets.
-The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
-
-For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
-
-#### Parameters
-*Parameters are case-sensitive!*
-
-Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
-
-##### `allowAllFiles`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
-[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
-
-##### `allowCleartextPasswords`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
-
-##### `allowNativePasswords`
-
-```
-Type: bool
-Valid Values: true, false
-Default: true
-```
-`allowNativePasswords=false` disallows the usage of MySQL native password method.
-
-##### `allowOldPasswords`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
-
-##### `charset`
-
-```
-Type: string
-Valid Values:
-Default: none
-```
-
-Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
-
-Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
-Unless you need the fallback behavior, please use `collation` instead.
-
-##### `collation`
-
-```
-Type: string
-Valid Values:
-Default: utf8mb4_general_ci
-```
-
-Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
-
-A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
-
-The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
-
-Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
-
-
-##### `clientFoundRows`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
-
-##### `columnsWithAlias`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
-
-```
-SELECT u.id FROM users as u
-```
-
-will return `u.id` instead of just `id` if `columnsWithAlias=true`.
-
-##### `interpolateParams`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
-
-*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
-
-##### `loc`
-
-```
-Type: string
-Valid Values:
-Default: UTC
-```
-
-Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
-
-Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
-
-Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
-
-##### `maxAllowedPacket`
-```
-Type: decimal number
-Default: 4194304
-```
-
-Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
-
-##### `multiStatements`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
-
-When `multiStatements` is used, `?` parameters must only be used in the first statement.
-
-##### `parseTime`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
-The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
-
-
-##### `readTimeout`
-
-```
-Type: duration
-Default: 0
-```
-
-I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-
-##### `rejectReadOnly`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-
-`rejectReadOnly=true` causes the driver to reject read-only connections. This
-is for a possible race condition during an automatic failover, where the mysql
-client gets connected to a read-only replica after the failover.
-
-Note that this should be a fairly rare case, as an automatic failover normally
-happens when the primary is down, and the race condition shouldn't happen
-unless it comes back up online as soon as the failover is kicked off. On the
-other hand, when this happens, a MySQL application can get stuck on a
-read-only connection until restarted. It is however fairly easy to reproduce,
-for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
-
-If you are not relying on read-only transactions to reject writes that aren't
-supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
-is safer for failovers.
-
-Note that ERROR 1290 can be returned for a `read-only` server and this option will
-cause a retry for that error. However the same error number is used for some
-other cases. You should ensure your application will never cause an ERROR 1290
-except for `read-only` mode when enabling this option.
-
-
-##### `serverPubKey`
-
-```
-Type: string
-Valid Values:
-Default: none
-```
-
-Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
-Public keys are used to transmit encrypted data, e.g. for authentication.
-If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
-
-
-##### `timeout`
-
-```
-Type: duration
-Default: OS default
-```
-
-Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-
-
-##### `tls`
-
-```
-Type: bool / string
-Valid Values: true, false, skip-verify, preferred,
-Default: false
-```
-
-`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
-
-
-##### `writeTimeout`
-
-```
-Type: duration
-Default: 0
-```
-
-I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-
-
-##### System Variables
-
-Any other parameters are interpreted as system variables:
- * `=`: `SET =`
- * `=`: `SET =`
- * `=%27%27`: `SET =''`
-
-Rules:
-* The values for string variables must be quoted with `'`.
-* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
- (which implies values of string variables must be wrapped with `%27`).
-
-Examples:
- * `autocommit=1`: `SET autocommit=1`
- * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
- * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
-
-
-#### Examples
-```
-user@unix(/path/to/socket)/dbname
-```
-
-```
-root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
-```
-
-```
-user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
-```
-
-Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
-```
-user:password@/dbname?sql_mode=TRADITIONAL
-```
-
-TCP via IPv6:
-```
-user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
-```
-
-TCP on a remote host, e.g. Amazon RDS:
-```
-id:password@tcp(your-amazonaws-uri.com:3306)/dbname
-```
-
-Google Cloud SQL on App Engine (First Generation MySQL Server):
-```
-user@cloudsql(project-id:instance-name)/dbname
-```
-
-Google Cloud SQL on App Engine (Second Generation MySQL Server):
-```
-user@cloudsql(project-id:regionname:instance-name)/dbname
-```
-
-TCP using default port (3306) on localhost:
-```
-user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
-```
-
-Use the default protocol (tcp) and host (localhost:3306):
-```
-user:password@/dbname
-```
-
-No Database preselected:
-```
-user:password@/
-```
-
-
-### Connection pool and timeouts
-The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
-
-## `ColumnType` Support
-This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
-
-## `context.Context` Support
-Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
-See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
-
-
-### `LOAD DATA LOCAL INFILE` support
-For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
-```go
-import "github.com/go-sql-driver/mysql"
-```
-
-Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
-
-To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
-
-See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
-
-
-### `time.Time` support
-The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
-
-However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
-
-**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
-
-Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
-
-
-### Unicode support
-Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
-
-Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
-
-Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
-
-See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
-
-## Testing / Development
-To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
-
-Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
-If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
-
-See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
-
----------------------------------------
-
-## License
-Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
-
-Mozilla summarizes the license scope as follows:
-> MPL: The copyleft applies to any files containing MPLed code.
-
-
-That means:
- * You can **use** the **unchanged** source code both in private and commercially.
- * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
- * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
-
-Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
-
-You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
-
-![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
-
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/appengine.go
deleted file mode 100644
index 914e662..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/appengine.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// +build appengine
-
-package mysql
-
-import (
- "context"
- "net"
-
- "google.golang.org/appengine/cloudsql"
-)
-
-func init() {
- RegisterDialContext("cloudsql", func(_ context.Context, instance string) (net.Conn, error) {
- // XXX: the cloudsql driver still does not export a Context-aware dialer.
- return cloudsql.Dial(instance)
- })
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/auth.go
deleted file mode 100644
index fec7040..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/auth.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/x509"
- "encoding/pem"
- "sync"
-)
-
-// server pub keys registry
-var (
- serverPubKeyLock sync.RWMutex
- serverPubKeyRegistry map[string]*rsa.PublicKey
-)
-
-// RegisterServerPubKey registers a server RSA public key which can be used to
-// send data in a secure manner to the server without receiving the public key
-// in a potentially insecure way from the server first.
-// Registered keys can afterwards be used adding serverPubKey= to the DSN.
-//
-// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
-// after registering it and may not be modified.
-//
-// data, err := ioutil.ReadFile("mykey.pem")
-// if err != nil {
-// log.Fatal(err)
-// }
-//
-// block, _ := pem.Decode(data)
-// if block == nil || block.Type != "PUBLIC KEY" {
-// log.Fatal("failed to decode PEM block containing public key")
-// }
-//
-// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
-// if err != nil {
-// log.Fatal(err)
-// }
-//
-// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
-// mysql.RegisterServerPubKey("mykey", rsaPubKey)
-// } else {
-// log.Fatal("not a RSA public key")
-// }
-//
-func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
- serverPubKeyLock.Lock()
- if serverPubKeyRegistry == nil {
- serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
- }
-
- serverPubKeyRegistry[name] = pubKey
- serverPubKeyLock.Unlock()
-}
-
-// DeregisterServerPubKey removes the public key registered with the given name.
-func DeregisterServerPubKey(name string) {
- serverPubKeyLock.Lock()
- if serverPubKeyRegistry != nil {
- delete(serverPubKeyRegistry, name)
- }
- serverPubKeyLock.Unlock()
-}
-
-func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
- serverPubKeyLock.RLock()
- if v, ok := serverPubKeyRegistry[name]; ok {
- pubKey = v
- }
- serverPubKeyLock.RUnlock()
- return
-}
-
-// Hash password using pre 4.1 (old password) method
-// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
-type myRnd struct {
- seed1, seed2 uint32
-}
-
-const myRndMaxVal = 0x3FFFFFFF
-
-// Pseudo random number generator
-func newMyRnd(seed1, seed2 uint32) *myRnd {
- return &myRnd{
- seed1: seed1 % myRndMaxVal,
- seed2: seed2 % myRndMaxVal,
- }
-}
-
-// Tested to be equivalent to MariaDB's floating point variant
-// http://play.golang.org/p/QHvhd4qved
-// http://play.golang.org/p/RG0q4ElWDx
-func (r *myRnd) NextByte() byte {
- r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
- r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
-
- return byte(uint64(r.seed1) * 31 / myRndMaxVal)
-}
-
-// Generate binary hash from byte string using insecure pre 4.1 method
-func pwHash(password []byte) (result [2]uint32) {
- var add uint32 = 7
- var tmp uint32
-
- result[0] = 1345345333
- result[1] = 0x12345671
-
- for _, c := range password {
- // skip spaces and tabs in password
- if c == ' ' || c == '\t' {
- continue
- }
-
- tmp = uint32(c)
- result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
- result[1] += (result[1] << 8) ^ result[0]
- add += tmp
- }
-
- // Remove sign bit (1<<31)-1)
- result[0] &= 0x7FFFFFFF
- result[1] &= 0x7FFFFFFF
-
- return
-}
-
-// Hash password using insecure pre 4.1 method
-func scrambleOldPassword(scramble []byte, password string) []byte {
- if len(password) == 0 {
- return nil
- }
-
- scramble = scramble[:8]
-
- hashPw := pwHash([]byte(password))
- hashSc := pwHash(scramble)
-
- r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
-
- var out [8]byte
- for i := range out {
- out[i] = r.NextByte() + 64
- }
-
- mask := r.NextByte()
- for i := range out {
- out[i] ^= mask
- }
-
- return out[:]
-}
-
-// Hash password using 4.1+ method (SHA1)
-func scramblePassword(scramble []byte, password string) []byte {
- if len(password) == 0 {
- return nil
- }
-
- // stage1Hash = SHA1(password)
- crypt := sha1.New()
- crypt.Write([]byte(password))
- stage1 := crypt.Sum(nil)
-
- // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
- // inner Hash
- crypt.Reset()
- crypt.Write(stage1)
- hash := crypt.Sum(nil)
-
- // outer Hash
- crypt.Reset()
- crypt.Write(scramble)
- crypt.Write(hash)
- scramble = crypt.Sum(nil)
-
- // token = scrambleHash XOR stage1Hash
- for i := range scramble {
- scramble[i] ^= stage1[i]
- }
- return scramble
-}
-
-// Hash password using MySQL 8+ method (SHA256)
-func scrambleSHA256Password(scramble []byte, password string) []byte {
- if len(password) == 0 {
- return nil
- }
-
- // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
-
- crypt := sha256.New()
- crypt.Write([]byte(password))
- message1 := crypt.Sum(nil)
-
- crypt.Reset()
- crypt.Write(message1)
- message1Hash := crypt.Sum(nil)
-
- crypt.Reset()
- crypt.Write(message1Hash)
- crypt.Write(scramble)
- message2 := crypt.Sum(nil)
-
- for i := range message1 {
- message1[i] ^= message2[i]
- }
-
- return message1
-}
-
-func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
- plain := make([]byte, len(password)+1)
- copy(plain, password)
- for i := range plain {
- j := i % len(seed)
- plain[i] ^= seed[j]
- }
- sha1 := sha1.New()
- return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
-}
-
-func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
- enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
- if err != nil {
- return err
- }
- return mc.writeAuthSwitchPacket(enc)
-}
-
-func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
- switch plugin {
- case "caching_sha2_password":
- authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
- return authResp, nil
-
- case "mysql_old_password":
- if !mc.cfg.AllowOldPasswords {
- return nil, ErrOldPassword
- }
- // Note: there are edge cases where this should work but doesn't;
- // this is currently "wontfix":
- // https://github.com/go-sql-driver/mysql/issues/184
- authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
- return authResp, nil
-
- case "mysql_clear_password":
- if !mc.cfg.AllowCleartextPasswords {
- return nil, ErrCleartextPassword
- }
- // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
- // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
- return append([]byte(mc.cfg.Passwd), 0), nil
-
- case "mysql_native_password":
- if !mc.cfg.AllowNativePasswords {
- return nil, ErrNativePassword
- }
- // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
- // Native password authentication only need and will need 20-byte challenge.
- authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
- return authResp, nil
-
- case "sha256_password":
- if len(mc.cfg.Passwd) == 0 {
- return []byte{0}, nil
- }
- if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
- // write cleartext auth packet
- return append([]byte(mc.cfg.Passwd), 0), nil
- }
-
- pubKey := mc.cfg.pubKey
- if pubKey == nil {
- // request public key from server
- return []byte{1}, nil
- }
-
- // encrypted password
- enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
- return enc, err
-
- default:
- errLog.Print("unknown auth plugin:", plugin)
- return nil, ErrUnknownPlugin
- }
-}
-
-func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
- // Read Result Packet
- authData, newPlugin, err := mc.readAuthResult()
- if err != nil {
- return err
- }
-
- // handle auth plugin switch, if requested
- if newPlugin != "" {
- // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
- // sent and we have to keep using the cipher sent in the init packet.
- if authData == nil {
- authData = oldAuthData
- } else {
- // copy data from read buffer to owned slice
- copy(oldAuthData, authData)
- }
-
- plugin = newPlugin
-
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- return err
- }
- if err = mc.writeAuthSwitchPacket(authResp); err != nil {
- return err
- }
-
- // Read Result Packet
- authData, newPlugin, err = mc.readAuthResult()
- if err != nil {
- return err
- }
-
- // Do not allow to change the auth plugin more than once
- if newPlugin != "" {
- return ErrMalformPkt
- }
- }
-
- switch plugin {
-
- // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
- case "caching_sha2_password":
- switch len(authData) {
- case 0:
- return nil // auth successful
- case 1:
- switch authData[0] {
- case cachingSha2PasswordFastAuthSuccess:
- if err = mc.readResultOK(); err == nil {
- return nil // auth successful
- }
-
- case cachingSha2PasswordPerformFullAuthentication:
- if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
- // write cleartext auth packet
- err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
- if err != nil {
- return err
- }
- } else {
- pubKey := mc.cfg.pubKey
- if pubKey == nil {
- // request public key from server
- data, err := mc.buf.takeSmallBuffer(4 + 1)
- if err != nil {
- return err
- }
- data[4] = cachingSha2PasswordRequestPublicKey
- mc.writePacket(data)
-
- // parse public key
- if data, err = mc.readPacket(); err != nil {
- return err
- }
-
- block, _ := pem.Decode(data[1:])
- pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
- if err != nil {
- return err
- }
- pubKey = pkix.(*rsa.PublicKey)
- }
-
- // send encrypted password
- err = mc.sendEncryptedPassword(oldAuthData, pubKey)
- if err != nil {
- return err
- }
- }
- return mc.readResultOK()
-
- default:
- return ErrMalformPkt
- }
- default:
- return ErrMalformPkt
- }
-
- case "sha256_password":
- switch len(authData) {
- case 0:
- return nil // auth successful
- default:
- block, _ := pem.Decode(authData)
- pub, err := x509.ParsePKIXPublicKey(block.Bytes)
- if err != nil {
- return err
- }
-
- // send encrypted password
- err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
- if err != nil {
- return err
- }
- return mc.readResultOK()
- }
-
- default:
- return nil // auth successful
- }
-
- return err
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/auth_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/auth_test.go
deleted file mode 100644
index 1920ef3..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/auth_test.go
+++ /dev/null
@@ -1,1330 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "crypto/rsa"
- "crypto/tls"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "testing"
-)
-
-var testPubKey = []byte("-----BEGIN PUBLIC KEY-----\n" +
- "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAol0Z8G8U+25Btxk/g/fm\n" +
- "UAW/wEKjQCTjkibDE4B+qkuWeiumg6miIRhtilU6m9BFmLQSy1ltYQuu4k17A4tQ\n" +
- "rIPpOQYZges/qsDFkZh3wyK5jL5WEFVdOasf6wsfszExnPmcZS4axxoYJfiuilrN\n" +
- "hnwinBAqfi3S0sw5MpSI4Zl1AbOrHG4zDI62Gti2PKiMGyYDZTS9xPrBLbN95Kby\n" +
- "FFclQLEzA9RJcS1nHFsWtRgHjGPhhjCQxEm9NQ1nePFhCfBfApyfH1VM2VCOQum6\n" +
- "Ci9bMuHWjTjckC84mzF99kOxOWVU7mwS6gnJqBzpuz8t3zq8/iQ2y7QrmZV+jTJP\n" +
- "WQIDAQAB\n" +
- "-----END PUBLIC KEY-----\n")
-
-var testPubKeyRSA *rsa.PublicKey
-
-func init() {
- block, _ := pem.Decode(testPubKey)
- pub, err := x509.ParsePKIXPublicKey(block.Bytes)
- if err != nil {
- panic(err)
- }
- testPubKeyRSA = pub.(*rsa.PublicKey)
-}
-
-func TestScrambleOldPass(t *testing.T) {
- scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2}
- vectors := []struct {
- pass string
- out string
- }{
- {" pass", "47575c5a435b4251"},
- {"pass ", "47575c5a435b4251"},
- {"123\t456", "575c47505b5b5559"},
- {"C0mpl!ca ted#PASS123", "5d5d554849584a45"},
- }
- for _, tuple := range vectors {
- ours := scrambleOldPassword(scramble, tuple.pass)
- if tuple.out != fmt.Sprintf("%x", ours) {
- t.Errorf("Failed old password %q", tuple.pass)
- }
- }
-}
-
-func TestScrambleSHA256Pass(t *testing.T) {
- scramble := []byte{10, 47, 74, 111, 75, 73, 34, 48, 88, 76, 114, 74, 37, 13, 3, 80, 82, 2, 23, 21}
- vectors := []struct {
- pass string
- out string
- }{
- {"secret", "f490e76f66d9d86665ce54d98c78d0acfe2fb0b08b423da807144873d30b312c"},
- {"secret2", "abc3934a012cf342e876071c8ee202de51785b430258a7a0138bc79c4d800bc6"},
- }
- for _, tuple := range vectors {
- ours := scrambleSHA256Password(scramble, tuple.pass)
- if tuple.out != fmt.Sprintf("%x", ours) {
- t.Errorf("Failed SHA256 password %q", tuple.pass)
- }
- }
-}
-
-func TestAuthFastCachingSHA256PasswordCached(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{90, 105, 74, 126, 30, 48, 37, 56, 3, 23, 115, 127, 69,
- 22, 41, 84, 32, 123, 43, 118}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{102, 32, 5, 35, 143, 161, 140, 241, 171, 232, 56,
- 139, 43, 14, 107, 196, 249, 170, 147, 60, 220, 204, 120, 178, 214, 15,
- 184, 150, 26, 61, 57, 235}
- if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 2, 0, 0, 2, 1, 3, // Fast Auth Success
- 7, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastCachingSHA256PasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = ""
-
- authData := []byte{90, 105, 74, 126, 30, 48, 37, 56, 3, 23, 115, 127, 69,
- 22, 41, 84, 32, 123, 43, 118}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- if writtenAuthRespLen != 0 {
- t.Fatalf("unexpected written auth response (%d bytes): %v",
- writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastCachingSHA256PasswordFullRSA(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
- 49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
- 110, 40, 139, 124, 41}
- if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 2, 0, 0, 2, 1, 4, // Perform Full Authentication
- }
- conn.queuedReplies = [][]byte{
- // pub key response
- append([]byte{byte(1 + len(testPubKey)), 1, 0, 4, 1}, testPubKey...),
-
- // OK
- {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.HasPrefix(conn.written, []byte{1, 0, 0, 3, 2, 0, 1, 0, 5}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastCachingSHA256PasswordFullRSAWithKey(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
- mc.cfg.pubKey = testPubKeyRSA
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
- 49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
- 110, 40, 139, 124, 41}
- if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 2, 0, 0, 2, 1, 4, // Perform Full Authentication
- }
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastCachingSHA256PasswordFullSecure(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "caching_sha2_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // Hack to make the caching_sha2_password plugin believe that the connection
- // is secure
- mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{171, 201, 138, 146, 89, 159, 11, 170, 0, 67, 165,
- 49, 175, 94, 218, 68, 177, 109, 110, 86, 34, 33, 44, 190, 67, 240, 70,
- 110, 40, 139, 124, 41}
- if writtenAuthRespLen != 32 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 2, 0, 0, 2, 1, 4, // Perform Full Authentication
- }
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.Equal(conn.written, []byte{7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastCleartextPasswordNotAllowed(t *testing.T) {
- _, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_clear_password"
-
- // Send Client Authentication Packet
- _, err := mc.auth(authData, plugin)
- if err != ErrCleartextPassword {
- t.Errorf("expected ErrCleartextPassword, got %v", err)
- }
-}
-
-func TestAuthFastCleartextPassword(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
- mc.cfg.AllowCleartextPasswords = true
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_clear_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{115, 101, 99, 114, 101, 116, 0}
- if writtenAuthRespLen != 7 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastCleartextPasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = ""
- mc.cfg.AllowCleartextPasswords = true
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_clear_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{0}
- if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastNativePasswordNotAllowed(t *testing.T) {
- _, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
- mc.cfg.AllowNativePasswords = false
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_native_password"
-
- // Send Client Authentication Packet
- _, err := mc.auth(authData, plugin)
- if err != ErrNativePassword {
- t.Errorf("expected ErrNativePassword, got %v", err)
- }
-}
-
-func TestAuthFastNativePassword(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_native_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{53, 177, 140, 159, 251, 189, 127, 53, 109, 252,
- 172, 50, 211, 192, 240, 164, 26, 48, 207, 45}
- if writtenAuthRespLen != 20 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastNativePasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = ""
-
- authData := []byte{70, 114, 92, 94, 1, 38, 11, 116, 63, 114, 23, 101, 126,
- 103, 26, 95, 81, 17, 24, 21}
- plugin := "mysql_native_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- if writtenAuthRespLen != 0 {
- t.Fatalf("unexpected written auth response (%d bytes): %v",
- writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response
- conn.data = []byte{
- 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, // OK
- }
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastSHA256PasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = ""
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "sha256_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{0}
- if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response (pub key response)
- conn.data = append([]byte{byte(1 + len(testPubKey)), 1, 0, 2, 1}, testPubKey...)
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastSHA256PasswordRSA(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "sha256_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{1}
- if writtenAuthRespLen != 1 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response (pub key response)
- conn.data = append([]byte{byte(1 + len(testPubKey)), 1, 0, 2, 1}, testPubKey...)
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.HasPrefix(conn.written, []byte{0, 1, 0, 3}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthFastSHA256PasswordRSAWithKey(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
- mc.cfg.pubKey = testPubKeyRSA
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "sha256_password"
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // auth response (OK)
- conn.data = []byte{7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0}
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-}
-
-func TestAuthFastSHA256PasswordSecure(t *testing.T) {
- conn, mc := newRWMockConn(1)
- mc.cfg.User = "root"
- mc.cfg.Passwd = "secret"
-
- // hack to make the caching_sha2_password plugin believe that the connection
- // is secure
- mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-
- authData := []byte{6, 81, 96, 114, 14, 42, 50, 30, 76, 47, 1, 95, 126, 81,
- 62, 94, 83, 80, 52, 85}
- plugin := "sha256_password"
-
- // send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // unset TLS config to prevent the actual establishment of a TLS wrapper
- mc.cfg.tls = nil
-
- err = mc.writeHandshakeResponsePacket(authResp, plugin)
- if err != nil {
- t.Fatal(err)
- }
-
- // check written auth response
- authRespStart := 4 + 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1
- authRespEnd := authRespStart + 1 + len(authResp)
- writtenAuthRespLen := conn.written[authRespStart]
- writtenAuthResp := conn.written[authRespStart+1 : authRespEnd]
- expectedAuthResp := []byte{115, 101, 99, 114, 101, 116, 0}
- if writtenAuthRespLen != 7 || !bytes.Equal(writtenAuthResp, expectedAuthResp) {
- t.Fatalf("unexpected written auth response (%d bytes): %v", writtenAuthRespLen, writtenAuthResp)
- }
- conn.written = nil
-
- // auth response (OK)
- conn.data = []byte{7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0}
- conn.maxReads = 1
-
- // Handle response to auth packet
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- if !bytes.Equal(conn.written, []byte{}) {
- t.Errorf("unexpected written data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordCached(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}, // OK
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{
- // 1. Packet: Hash
- 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
- 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
- 153, 9, 130,
- }
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{0, 0, 0, 3}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordFullRSA(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- conn.queuedReplies = [][]byte{
- // Perform Full Authentication
- {2, 0, 0, 4, 1, 4},
-
- // Pub Key Response
- append([]byte{byte(1 + len(testPubKey)), 1, 0, 6, 1}, testPubKey...),
-
- // OK
- {7, 0, 0, 8, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 4
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Hash
- 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
- 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
- 153, 9, 130,
-
- // 2. Packet: Pub Key Request
- 1, 0, 0, 5, 2,
-
- // 3. Packet: Encrypted Password
- 0, 1, 0, 7, // [changing bytes]
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordFullRSAWithKey(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
- mc.cfg.pubKey = testPubKeyRSA
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- conn.queuedReplies = [][]byte{
- // Perform Full Authentication
- {2, 0, 0, 4, 1, 4},
-
- // OK
- {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Hash
- 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
- 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
- 153, 9, 130,
-
- // 2. Packet: Encrypted Password
- 0, 1, 0, 5, // [changing bytes]
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCachingSHA256PasswordFullSecure(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // Hack to make the caching_sha2_password plugin believe that the connection
- // is secure
- mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 99, 97, 99, 104, 105, 110, 103, 95,
- 115, 104, 97, 50, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 101,
- 11, 26, 18, 94, 97, 22, 72, 2, 46, 70, 106, 29, 55, 45, 94, 76, 90, 84,
- 50, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{
- {2, 0, 0, 4, 1, 4}, // Perform Full Authentication
- {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0}, // OK
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{
- // 1. Packet: Hash
- 32, 0, 0, 3, 129, 93, 132, 95, 114, 48, 79, 215, 128, 62, 193, 118, 128,
- 54, 75, 208, 159, 252, 227, 215, 129, 15, 242, 97, 19, 159, 31, 20, 58,
- 153, 9, 130,
-
- // 2. Packet: Cleartext password
- 7, 0, 0, 5, 115, 101, 99, 114, 101, 116, 0,
- }
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCleartextPasswordNotAllowed(t *testing.T) {
- conn, mc := newRWMockConn(2)
-
- conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
- 101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
- conn.maxReads = 1
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
- err := mc.handleAuthResult(authData, plugin)
- if err != ErrCleartextPassword {
- t.Errorf("expected ErrCleartextPassword, got %v", err)
- }
-}
-
-func TestAuthSwitchCleartextPassword(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowCleartextPasswords = true
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
- 101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchCleartextPasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowCleartextPasswords = true
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{22, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 99, 108,
- 101, 97, 114, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{1, 0, 0, 3, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchNativePasswordNotAllowed(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowNativePasswords = false
-
- conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
- 116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
- 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
- 31, 0}
- conn.maxReads = 1
- authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
- 48, 31, 89, 39, 55, 31}
- plugin := "caching_sha2_password"
- err := mc.handleAuthResult(authData, plugin)
- if err != ErrNativePassword {
- t.Errorf("expected ErrNativePassword, got %v", err)
- }
-}
-
-func TestAuthSwitchNativePassword(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowNativePasswords = true
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
- 116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
- 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
- 31, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
- 48, 31, 89, 39, 55, 31}
- plugin := "caching_sha2_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{20, 0, 0, 3, 202, 41, 195, 164, 34, 226, 49, 103,
- 21, 211, 167, 199, 227, 116, 8, 48, 57, 71, 149, 146}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchNativePasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowNativePasswords = true
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{44, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 110, 97,
- 116, 105, 118, 101, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 96,
- 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31, 48, 31, 89, 39, 55,
- 31, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{96, 71, 63, 8, 1, 58, 75, 12, 69, 95, 66, 60, 117, 31,
- 48, 31, 89, 39, 55, 31}
- plugin := "caching_sha2_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{0, 0, 0, 3}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchOldPasswordNotAllowed(t *testing.T) {
- conn, mc := newRWMockConn(2)
-
- conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
- 100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
- 49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
- conn.maxReads = 1
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
- err := mc.handleAuthResult(authData, plugin)
- if err != ErrOldPassword {
- t.Errorf("expected ErrOldPassword, got %v", err)
- }
-}
-
-// Same to TestAuthSwitchOldPasswordNotAllowed, but use OldAuthSwitch request.
-func TestOldAuthSwitchNotAllowed(t *testing.T) {
- conn, mc := newRWMockConn(2)
-
- // OldAuthSwitch request
- conn.data = []byte{1, 0, 0, 2, 0xfe}
- conn.maxReads = 1
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
- err := mc.handleAuthResult(authData, plugin)
- if err != ErrOldPassword {
- t.Errorf("expected ErrOldPassword, got %v", err)
- }
-}
-
-func TestAuthSwitchOldPassword(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowOldPasswords = true
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
- 100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
- 49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{9, 0, 0, 3, 86, 83, 83, 79, 74, 78, 65, 66, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-// Same to TestAuthSwitchOldPassword, but use OldAuthSwitch request.
-func TestOldAuthSwitch(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowOldPasswords = true
- mc.cfg.Passwd = "secret"
-
- // OldAuthSwitch request
- conn.data = []byte{1, 0, 0, 2, 0xfe}
-
- // auth response
- conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{9, 0, 0, 3, 86, 83, 83, 79, 74, 78, 65, 66, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-func TestAuthSwitchOldPasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowOldPasswords = true
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{41, 0, 0, 2, 254, 109, 121, 115, 113, 108, 95, 111, 108,
- 100, 95, 112, 97, 115, 115, 119, 111, 114, 100, 0, 95, 84, 103, 43, 61,
- 49, 123, 61, 91, 50, 40, 113, 35, 84, 96, 101, 92, 123, 121, 107, 0}
-
- // auth response
- conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{1, 0, 0, 3, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-// Same to TestAuthSwitchOldPasswordEmpty, but use OldAuthSwitch request.
-func TestOldAuthSwitchPasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.AllowOldPasswords = true
- mc.cfg.Passwd = ""
-
- // OldAuthSwitch request.
- conn.data = []byte{1, 0, 0, 2, 0xfe}
-
- // auth response
- conn.queuedReplies = [][]byte{{8, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 0}}
- conn.maxReads = 2
-
- authData := []byte{95, 84, 103, 43, 61, 49, 123, 61, 91, 50, 40, 113, 35,
- 84, 96, 101, 92, 123, 121, 107}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReply := []byte{1, 0, 0, 3, 0}
- if !bytes.Equal(conn.written, expectedReply) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchSHA256PasswordEmpty(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = ""
-
- // auth switch request
- conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
- 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
- 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Empty Password
- 1, 0, 0, 3, 0,
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchSHA256PasswordRSA(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // auth switch request
- conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
- 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
- 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-
- conn.queuedReplies = [][]byte{
- // Pub Key Response
- append([]byte{byte(1 + len(testPubKey)), 1, 0, 4, 1}, testPubKey...),
-
- // OK
- {7, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 3
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Pub Key Request
- 1, 0, 0, 3, 1,
-
- // 2. Packet: Encrypted Password
- 0, 1, 0, 5, // [changing bytes]
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchSHA256PasswordRSAWithKey(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
- mc.cfg.pubKey = testPubKeyRSA
-
- // auth switch request
- conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
- 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
- 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Encrypted Password
- 0, 1, 0, 3, // [changing bytes]
- }
- if !bytes.HasPrefix(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
-
-func TestAuthSwitchSHA256PasswordSecure(t *testing.T) {
- conn, mc := newRWMockConn(2)
- mc.cfg.Passwd = "secret"
-
- // Hack to make the caching_sha2_password plugin believe that the connection
- // is secure
- mc.cfg.tls = &tls.Config{InsecureSkipVerify: true}
-
- // auth switch request
- conn.data = []byte{38, 0, 0, 2, 254, 115, 104, 97, 50, 53, 54, 95, 112, 97,
- 115, 115, 119, 111, 114, 100, 0, 78, 82, 62, 40, 100, 1, 59, 31, 44, 69,
- 33, 112, 8, 81, 51, 96, 65, 82, 16, 114, 0}
-
- conn.queuedReplies = [][]byte{
- // OK
- {7, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0},
- }
- conn.maxReads = 2
-
- authData := []byte{123, 87, 15, 84, 20, 58, 37, 121, 91, 117, 51, 24, 19,
- 47, 43, 9, 41, 112, 67, 110}
- plugin := "mysql_native_password"
-
- if err := mc.handleAuthResult(authData, plugin); err != nil {
- t.Errorf("got error: %v", err)
- }
-
- expectedReplyPrefix := []byte{
- // 1. Packet: Cleartext Password
- 7, 0, 0, 3, 115, 101, 99, 114, 101, 116, 0,
- }
- if !bytes.Equal(conn.written, expectedReplyPrefix) {
- t.Errorf("got unexpected data: %v", conn.written)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
deleted file mode 100644
index 3e25a3b..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/benchmark_test.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "context"
- "database/sql"
- "database/sql/driver"
- "fmt"
- "math"
- "runtime"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-)
-
-type TB testing.B
-
-func (tb *TB) check(err error) {
- if err != nil {
- tb.Fatal(err)
- }
-}
-
-func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB {
- tb.check(err)
- return db
-}
-
-func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows {
- tb.check(err)
- return rows
-}
-
-func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
- tb.check(err)
- return stmt
-}
-
-func initDB(b *testing.B, queries ...string) *sql.DB {
- tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
- for _, query := range queries {
- if _, err := db.Exec(query); err != nil {
- b.Fatalf("error on %q: %v", query, err)
- }
- }
- return db
-}
-
-const concurrencyLevel = 10
-
-func BenchmarkQuery(b *testing.B) {
- tb := (*TB)(b)
- b.StopTimer()
- b.ReportAllocs()
- db := initDB(b,
- "DROP TABLE IF EXISTS foo",
- "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
- `INSERT INTO foo VALUES (1, "one")`,
- `INSERT INTO foo VALUES (2, "two")`,
- )
- db.SetMaxIdleConns(concurrencyLevel)
- defer db.Close()
-
- stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?"))
- defer stmt.Close()
-
- remain := int64(b.N)
- var wg sync.WaitGroup
- wg.Add(concurrencyLevel)
- defer wg.Wait()
- b.StartTimer()
-
- for i := 0; i < concurrencyLevel; i++ {
- go func() {
- for {
- if atomic.AddInt64(&remain, -1) < 0 {
- wg.Done()
- return
- }
-
- var got string
- tb.check(stmt.QueryRow(1).Scan(&got))
- if got != "one" {
- b.Errorf("query = %q; want one", got)
- wg.Done()
- return
- }
- }
- }()
- }
-}
-
-func BenchmarkExec(b *testing.B) {
- tb := (*TB)(b)
- b.StopTimer()
- b.ReportAllocs()
- db := tb.checkDB(sql.Open("mysql", dsn))
- db.SetMaxIdleConns(concurrencyLevel)
- defer db.Close()
-
- stmt := tb.checkStmt(db.Prepare("DO 1"))
- defer stmt.Close()
-
- remain := int64(b.N)
- var wg sync.WaitGroup
- wg.Add(concurrencyLevel)
- defer wg.Wait()
- b.StartTimer()
-
- for i := 0; i < concurrencyLevel; i++ {
- go func() {
- for {
- if atomic.AddInt64(&remain, -1) < 0 {
- wg.Done()
- return
- }
-
- if _, err := stmt.Exec(); err != nil {
- b.Fatal(err.Error())
- }
- }
- }()
- }
-}
-
-// data, but no db writes
-var roundtripSample []byte
-
-func initRoundtripBenchmarks() ([]byte, int, int) {
- if roundtripSample == nil {
- roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024))
- }
- return roundtripSample, 16, len(roundtripSample)
-}
-
-func BenchmarkRoundtripTxt(b *testing.B) {
- b.StopTimer()
- sample, min, max := initRoundtripBenchmarks()
- sampleString := string(sample)
- b.ReportAllocs()
- tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
- defer db.Close()
- b.StartTimer()
- var result string
- for i := 0; i < b.N; i++ {
- length := min + i
- if length > max {
- length = max
- }
- test := sampleString[0:length]
- rows := tb.checkRows(db.Query(`SELECT "` + test + `"`))
- if !rows.Next() {
- rows.Close()
- b.Fatalf("crashed")
- }
- err := rows.Scan(&result)
- if err != nil {
- rows.Close()
- b.Fatalf("crashed")
- }
- if result != test {
- rows.Close()
- b.Errorf("mismatch")
- }
- rows.Close()
- }
-}
-
-func BenchmarkRoundtripBin(b *testing.B) {
- b.StopTimer()
- sample, min, max := initRoundtripBenchmarks()
- b.ReportAllocs()
- tb := (*TB)(b)
- db := tb.checkDB(sql.Open("mysql", dsn))
- defer db.Close()
- stmt := tb.checkStmt(db.Prepare("SELECT ?"))
- defer stmt.Close()
- b.StartTimer()
- var result sql.RawBytes
- for i := 0; i < b.N; i++ {
- length := min + i
- if length > max {
- length = max
- }
- test := sample[0:length]
- rows := tb.checkRows(stmt.Query(test))
- if !rows.Next() {
- rows.Close()
- b.Fatalf("crashed")
- }
- err := rows.Scan(&result)
- if err != nil {
- rows.Close()
- b.Fatalf("crashed")
- }
- if !bytes.Equal(result, test) {
- rows.Close()
- b.Errorf("mismatch")
- }
- rows.Close()
- }
-}
-
-func BenchmarkInterpolation(b *testing.B) {
- mc := &mysqlConn{
- cfg: &Config{
- InterpolateParams: true,
- Loc: time.UTC,
- },
- maxAllowedPacket: maxPacketSize,
- maxWriteSize: maxPacketSize - 1,
- buf: newBuffer(nil),
- }
-
- args := []driver.Value{
- int64(42424242),
- float64(math.Pi),
- false,
- time.Unix(1423411542, 807015000),
- []byte("bytes containing special chars ' \" \a \x00"),
- "string containing special chars ' \" \a \x00",
- }
- q := "SELECT ?, ?, ?, ?, ?, ?"
-
- b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- _, err := mc.interpolateParams(q, args)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
-
- tb := (*TB)(b)
- stmt := tb.checkStmt(db.PrepareContext(ctx, "SELECT val FROM foo WHERE id=?"))
- defer stmt.Close()
-
- b.SetParallelism(p)
- b.ReportAllocs()
- b.ResetTimer()
- b.RunParallel(func(pb *testing.PB) {
- var got string
- for pb.Next() {
- tb.check(stmt.QueryRow(1).Scan(&got))
- if got != "one" {
- b.Fatalf("query = %q; want one", got)
- }
- }
- })
-}
-
-func BenchmarkQueryContext(b *testing.B) {
- db := initDB(b,
- "DROP TABLE IF EXISTS foo",
- "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
- `INSERT INTO foo VALUES (1, "one")`,
- `INSERT INTO foo VALUES (2, "two")`,
- )
- defer db.Close()
- for _, p := range []int{1, 2, 3, 4} {
- b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
- benchmarkQueryContext(b, db, p)
- })
- }
-}
-
-func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
-
- tb := (*TB)(b)
- stmt := tb.checkStmt(db.PrepareContext(ctx, "DO 1"))
- defer stmt.Close()
-
- b.SetParallelism(p)
- b.ReportAllocs()
- b.ResetTimer()
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if _, err := stmt.ExecContext(ctx); err != nil {
- b.Fatal(err)
- }
- }
- })
-}
-
-func BenchmarkExecContext(b *testing.B) {
- db := initDB(b,
- "DROP TABLE IF EXISTS foo",
- "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
- `INSERT INTO foo VALUES (1, "one")`,
- `INSERT INTO foo VALUES (2, "two")`,
- )
- defer db.Close()
- for _, p := range []int{1, 2, 3, 4} {
- b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
- benchmarkQueryContext(b, db, p)
- })
- }
-}
-
-// BenchmarkQueryRawBytes benchmarks fetching 100 blobs using sql.RawBytes.
-// "size=" means size of each blobs.
-func BenchmarkQueryRawBytes(b *testing.B) {
- var sizes []int = []int{100, 1000, 2000, 4000, 8000, 12000, 16000, 32000, 64000, 256000}
- db := initDB(b,
- "DROP TABLE IF EXISTS bench_rawbytes",
- "CREATE TABLE bench_rawbytes (id INT PRIMARY KEY, val LONGBLOB)",
- )
- defer db.Close()
-
- blob := make([]byte, sizes[len(sizes)-1])
- for i := range blob {
- blob[i] = 42
- }
- for i := 0; i < 100; i++ {
- _, err := db.Exec("INSERT INTO bench_rawbytes VALUES (?, ?)", i, blob)
- if err != nil {
- b.Fatal(err)
- }
- }
-
- for _, s := range sizes {
- b.Run(fmt.Sprintf("size=%v", s), func(b *testing.B) {
- db.SetMaxIdleConns(0)
- db.SetMaxIdleConns(1)
- b.ReportAllocs()
- b.ResetTimer()
-
- for j := 0; j < b.N; j++ {
- rows, err := db.Query("SELECT LEFT(val, ?) as v FROM bench_rawbytes", s)
- if err != nil {
- b.Fatal(err)
- }
- nrows := 0
- for rows.Next() {
- var buf sql.RawBytes
- err := rows.Scan(&buf)
- if err != nil {
- b.Fatal(err)
- }
- if len(buf) != s {
- b.Fatalf("size mismatch: expected %v, got %v", s, len(buf))
- }
- nrows++
- }
- rows.Close()
- if nrows != 100 {
- b.Fatalf("numbers of rows mismatch: expected %v, got %v", 100, nrows)
- }
- }
- })
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/buffer.go
deleted file mode 100644
index 0774c5c..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/buffer.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "io"
- "net"
- "time"
-)
-
-const defaultBufSize = 4096
-const maxCachedBufSize = 256 * 1024
-
-// A buffer which is used for both reading and writing.
-// This is possible since communication on each connection is synchronous.
-// In other words, we can't write and read simultaneously on the same connection.
-// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
-// Also highly optimized for this particular use case.
-// This buffer is backed by two byte slices in a double-buffering scheme
-type buffer struct {
- buf []byte // buf is a byte buffer who's length and capacity are equal.
- nc net.Conn
- idx int
- length int
- timeout time.Duration
- dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
- flipcnt uint // flipccnt is the current buffer counter for double-buffering
-}
-
-// newBuffer allocates and returns a new buffer.
-func newBuffer(nc net.Conn) buffer {
- fg := make([]byte, defaultBufSize)
- return buffer{
- buf: fg,
- nc: nc,
- dbuf: [2][]byte{fg, nil},
- }
-}
-
-// flip replaces the active buffer with the background buffer
-// this is a delayed flip that simply increases the buffer counter;
-// the actual flip will be performed the next time we call `buffer.fill`
-func (b *buffer) flip() {
- b.flipcnt += 1
-}
-
-// fill reads into the buffer until at least _need_ bytes are in it
-func (b *buffer) fill(need int) error {
- n := b.length
- // fill data into its double-buffering target: if we've called
- // flip on this buffer, we'll be copying to the background buffer,
- // and then filling it with network data; otherwise we'll just move
- // the contents of the current buffer to the front before filling it
- dest := b.dbuf[b.flipcnt&1]
-
- // grow buffer if necessary to fit the whole packet.
- if need > len(dest) {
- // Round up to the next multiple of the default size
- dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
-
- // if the allocated buffer is not too large, move it to backing storage
- // to prevent extra allocations on applications that perform large reads
- if len(dest) <= maxCachedBufSize {
- b.dbuf[b.flipcnt&1] = dest
- }
- }
-
- // if we're filling the fg buffer, move the existing data to the start of it.
- // if we're filling the bg buffer, copy over the data
- if n > 0 {
- copy(dest[:n], b.buf[b.idx:])
- }
-
- b.buf = dest
- b.idx = 0
-
- for {
- if b.timeout > 0 {
- if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
- return err
- }
- }
-
- nn, err := b.nc.Read(b.buf[n:])
- n += nn
-
- switch err {
- case nil:
- if n < need {
- continue
- }
- b.length = n
- return nil
-
- case io.EOF:
- if n >= need {
- b.length = n
- return nil
- }
- return io.ErrUnexpectedEOF
-
- default:
- return err
- }
- }
-}
-
-// returns next N bytes from buffer.
-// The returned slice is only guaranteed to be valid until the next read
-func (b *buffer) readNext(need int) ([]byte, error) {
- if b.length < need {
- // refill
- if err := b.fill(need); err != nil {
- return nil, err
- }
- }
-
- offset := b.idx
- b.idx += need
- b.length -= need
- return b.buf[offset:b.idx], nil
-}
-
-// takeBuffer returns a buffer with the requested size.
-// If possible, a slice from the existing buffer is returned.
-// Otherwise a bigger buffer is made.
-// Only one buffer (total) can be used at a time.
-func (b *buffer) takeBuffer(length int) ([]byte, error) {
- if b.length > 0 {
- return nil, ErrBusyBuffer
- }
-
- // test (cheap) general case first
- if length <= cap(b.buf) {
- return b.buf[:length], nil
- }
-
- if length < maxPacketSize {
- b.buf = make([]byte, length)
- return b.buf, nil
- }
-
- // buffer is larger than we want to store.
- return make([]byte, length), nil
-}
-
-// takeSmallBuffer is shortcut which can be used if length is
-// known to be smaller than defaultBufSize.
-// Only one buffer (total) can be used at a time.
-func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
- if b.length > 0 {
- return nil, ErrBusyBuffer
- }
- return b.buf[:length], nil
-}
-
-// takeCompleteBuffer returns the complete existing buffer.
-// This can be used if the necessary buffer size is unknown.
-// cap and len of the returned buffer will be equal.
-// Only one buffer (total) can be used at a time.
-func (b *buffer) takeCompleteBuffer() ([]byte, error) {
- if b.length > 0 {
- return nil, ErrBusyBuffer
- }
- return b.buf, nil
-}
-
-// store stores buf, an updated buffer, if its suitable to do so.
-func (b *buffer) store(buf []byte) error {
- if b.length > 0 {
- return ErrBusyBuffer
- } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
- b.buf = buf[:cap(buf)]
- }
- return nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/collations.go
deleted file mode 100644
index 8d2b556..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/collations.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-const defaultCollation = "utf8mb4_general_ci"
-const binaryCollation = "binary"
-
-// A list of available collations mapped to the internal ID.
-// To update this map use the following MySQL query:
-// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID
-//
-// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255.
-//
-// ucs2, utf16, and utf32 can't be used for connection charset.
-// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset
-// They are commented out to reduce this map.
-var collations = map[string]byte{
- "big5_chinese_ci": 1,
- "latin2_czech_cs": 2,
- "dec8_swedish_ci": 3,
- "cp850_general_ci": 4,
- "latin1_german1_ci": 5,
- "hp8_english_ci": 6,
- "koi8r_general_ci": 7,
- "latin1_swedish_ci": 8,
- "latin2_general_ci": 9,
- "swe7_swedish_ci": 10,
- "ascii_general_ci": 11,
- "ujis_japanese_ci": 12,
- "sjis_japanese_ci": 13,
- "cp1251_bulgarian_ci": 14,
- "latin1_danish_ci": 15,
- "hebrew_general_ci": 16,
- "tis620_thai_ci": 18,
- "euckr_korean_ci": 19,
- "latin7_estonian_cs": 20,
- "latin2_hungarian_ci": 21,
- "koi8u_general_ci": 22,
- "cp1251_ukrainian_ci": 23,
- "gb2312_chinese_ci": 24,
- "greek_general_ci": 25,
- "cp1250_general_ci": 26,
- "latin2_croatian_ci": 27,
- "gbk_chinese_ci": 28,
- "cp1257_lithuanian_ci": 29,
- "latin5_turkish_ci": 30,
- "latin1_german2_ci": 31,
- "armscii8_general_ci": 32,
- "utf8_general_ci": 33,
- "cp1250_czech_cs": 34,
- //"ucs2_general_ci": 35,
- "cp866_general_ci": 36,
- "keybcs2_general_ci": 37,
- "macce_general_ci": 38,
- "macroman_general_ci": 39,
- "cp852_general_ci": 40,
- "latin7_general_ci": 41,
- "latin7_general_cs": 42,
- "macce_bin": 43,
- "cp1250_croatian_ci": 44,
- "utf8mb4_general_ci": 45,
- "utf8mb4_bin": 46,
- "latin1_bin": 47,
- "latin1_general_ci": 48,
- "latin1_general_cs": 49,
- "cp1251_bin": 50,
- "cp1251_general_ci": 51,
- "cp1251_general_cs": 52,
- "macroman_bin": 53,
- //"utf16_general_ci": 54,
- //"utf16_bin": 55,
- //"utf16le_general_ci": 56,
- "cp1256_general_ci": 57,
- "cp1257_bin": 58,
- "cp1257_general_ci": 59,
- //"utf32_general_ci": 60,
- //"utf32_bin": 61,
- //"utf16le_bin": 62,
- "binary": 63,
- "armscii8_bin": 64,
- "ascii_bin": 65,
- "cp1250_bin": 66,
- "cp1256_bin": 67,
- "cp866_bin": 68,
- "dec8_bin": 69,
- "greek_bin": 70,
- "hebrew_bin": 71,
- "hp8_bin": 72,
- "keybcs2_bin": 73,
- "koi8r_bin": 74,
- "koi8u_bin": 75,
- "utf8_tolower_ci": 76,
- "latin2_bin": 77,
- "latin5_bin": 78,
- "latin7_bin": 79,
- "cp850_bin": 80,
- "cp852_bin": 81,
- "swe7_bin": 82,
- "utf8_bin": 83,
- "big5_bin": 84,
- "euckr_bin": 85,
- "gb2312_bin": 86,
- "gbk_bin": 87,
- "sjis_bin": 88,
- "tis620_bin": 89,
- //"ucs2_bin": 90,
- "ujis_bin": 91,
- "geostd8_general_ci": 92,
- "geostd8_bin": 93,
- "latin1_spanish_ci": 94,
- "cp932_japanese_ci": 95,
- "cp932_bin": 96,
- "eucjpms_japanese_ci": 97,
- "eucjpms_bin": 98,
- "cp1250_polish_ci": 99,
- //"utf16_unicode_ci": 101,
- //"utf16_icelandic_ci": 102,
- //"utf16_latvian_ci": 103,
- //"utf16_romanian_ci": 104,
- //"utf16_slovenian_ci": 105,
- //"utf16_polish_ci": 106,
- //"utf16_estonian_ci": 107,
- //"utf16_spanish_ci": 108,
- //"utf16_swedish_ci": 109,
- //"utf16_turkish_ci": 110,
- //"utf16_czech_ci": 111,
- //"utf16_danish_ci": 112,
- //"utf16_lithuanian_ci": 113,
- //"utf16_slovak_ci": 114,
- //"utf16_spanish2_ci": 115,
- //"utf16_roman_ci": 116,
- //"utf16_persian_ci": 117,
- //"utf16_esperanto_ci": 118,
- //"utf16_hungarian_ci": 119,
- //"utf16_sinhala_ci": 120,
- //"utf16_german2_ci": 121,
- //"utf16_croatian_ci": 122,
- //"utf16_unicode_520_ci": 123,
- //"utf16_vietnamese_ci": 124,
- //"ucs2_unicode_ci": 128,
- //"ucs2_icelandic_ci": 129,
- //"ucs2_latvian_ci": 130,
- //"ucs2_romanian_ci": 131,
- //"ucs2_slovenian_ci": 132,
- //"ucs2_polish_ci": 133,
- //"ucs2_estonian_ci": 134,
- //"ucs2_spanish_ci": 135,
- //"ucs2_swedish_ci": 136,
- //"ucs2_turkish_ci": 137,
- //"ucs2_czech_ci": 138,
- //"ucs2_danish_ci": 139,
- //"ucs2_lithuanian_ci": 140,
- //"ucs2_slovak_ci": 141,
- //"ucs2_spanish2_ci": 142,
- //"ucs2_roman_ci": 143,
- //"ucs2_persian_ci": 144,
- //"ucs2_esperanto_ci": 145,
- //"ucs2_hungarian_ci": 146,
- //"ucs2_sinhala_ci": 147,
- //"ucs2_german2_ci": 148,
- //"ucs2_croatian_ci": 149,
- //"ucs2_unicode_520_ci": 150,
- //"ucs2_vietnamese_ci": 151,
- //"ucs2_general_mysql500_ci": 159,
- //"utf32_unicode_ci": 160,
- //"utf32_icelandic_ci": 161,
- //"utf32_latvian_ci": 162,
- //"utf32_romanian_ci": 163,
- //"utf32_slovenian_ci": 164,
- //"utf32_polish_ci": 165,
- //"utf32_estonian_ci": 166,
- //"utf32_spanish_ci": 167,
- //"utf32_swedish_ci": 168,
- //"utf32_turkish_ci": 169,
- //"utf32_czech_ci": 170,
- //"utf32_danish_ci": 171,
- //"utf32_lithuanian_ci": 172,
- //"utf32_slovak_ci": 173,
- //"utf32_spanish2_ci": 174,
- //"utf32_roman_ci": 175,
- //"utf32_persian_ci": 176,
- //"utf32_esperanto_ci": 177,
- //"utf32_hungarian_ci": 178,
- //"utf32_sinhala_ci": 179,
- //"utf32_german2_ci": 180,
- //"utf32_croatian_ci": 181,
- //"utf32_unicode_520_ci": 182,
- //"utf32_vietnamese_ci": 183,
- "utf8_unicode_ci": 192,
- "utf8_icelandic_ci": 193,
- "utf8_latvian_ci": 194,
- "utf8_romanian_ci": 195,
- "utf8_slovenian_ci": 196,
- "utf8_polish_ci": 197,
- "utf8_estonian_ci": 198,
- "utf8_spanish_ci": 199,
- "utf8_swedish_ci": 200,
- "utf8_turkish_ci": 201,
- "utf8_czech_ci": 202,
- "utf8_danish_ci": 203,
- "utf8_lithuanian_ci": 204,
- "utf8_slovak_ci": 205,
- "utf8_spanish2_ci": 206,
- "utf8_roman_ci": 207,
- "utf8_persian_ci": 208,
- "utf8_esperanto_ci": 209,
- "utf8_hungarian_ci": 210,
- "utf8_sinhala_ci": 211,
- "utf8_german2_ci": 212,
- "utf8_croatian_ci": 213,
- "utf8_unicode_520_ci": 214,
- "utf8_vietnamese_ci": 215,
- "utf8_general_mysql500_ci": 223,
- "utf8mb4_unicode_ci": 224,
- "utf8mb4_icelandic_ci": 225,
- "utf8mb4_latvian_ci": 226,
- "utf8mb4_romanian_ci": 227,
- "utf8mb4_slovenian_ci": 228,
- "utf8mb4_polish_ci": 229,
- "utf8mb4_estonian_ci": 230,
- "utf8mb4_spanish_ci": 231,
- "utf8mb4_swedish_ci": 232,
- "utf8mb4_turkish_ci": 233,
- "utf8mb4_czech_ci": 234,
- "utf8mb4_danish_ci": 235,
- "utf8mb4_lithuanian_ci": 236,
- "utf8mb4_slovak_ci": 237,
- "utf8mb4_spanish2_ci": 238,
- "utf8mb4_roman_ci": 239,
- "utf8mb4_persian_ci": 240,
- "utf8mb4_esperanto_ci": 241,
- "utf8mb4_hungarian_ci": 242,
- "utf8mb4_sinhala_ci": 243,
- "utf8mb4_german2_ci": 244,
- "utf8mb4_croatian_ci": 245,
- "utf8mb4_unicode_520_ci": 246,
- "utf8mb4_vietnamese_ci": 247,
- "gb18030_chinese_ci": 248,
- "gb18030_bin": 249,
- "gb18030_unicode_520_ci": 250,
- "utf8mb4_0900_ai_ci": 255,
-}
-
-// A blacklist of collations which is unsafe to interpolate parameters.
-// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
-var unsafeCollations = map[string]bool{
- "big5_chinese_ci": true,
- "sjis_japanese_ci": true,
- "gbk_chinese_ci": true,
- "big5_bin": true,
- "gb2312_bin": true,
- "gbk_bin": true,
- "sjis_bin": true,
- "cp932_japanese_ci": true,
- "cp932_bin": true,
- "gb18030_chinese_ci": true,
- "gb18030_bin": true,
- "gb18030_unicode_520_ci": true,
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/conncheck.go
deleted file mode 100644
index cc47aa5..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/conncheck.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// +build !windows,!appengine
-
-package mysql
-
-import (
- "errors"
- "io"
- "net"
- "syscall"
-)
-
-var errUnexpectedRead = errors.New("unexpected read from socket")
-
-func connCheck(c net.Conn) error {
- var (
- n int
- err error
- buff [1]byte
- )
-
- sconn, ok := c.(syscall.Conn)
- if !ok {
- return nil
- }
- rc, err := sconn.SyscallConn()
- if err != nil {
- return err
- }
- rerr := rc.Read(func(fd uintptr) bool {
- n, err = syscall.Read(int(fd), buff[:])
- return true
- })
- switch {
- case rerr != nil:
- return rerr
- case n == 0 && err == nil:
- return io.EOF
- case n > 0:
- return errUnexpectedRead
- case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
- return nil
- default:
- return err
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/conncheck_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/conncheck_test.go
deleted file mode 100644
index b7234b0..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/conncheck_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// +build go1.10,!windows
-
-package mysql
-
-import (
- "testing"
- "time"
-)
-
-func TestStaleConnectionChecks(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("SET @@SESSION.wait_timeout = 2")
-
- if err := dbt.db.Ping(); err != nil {
- dbt.Fatal(err)
- }
-
- // wait for MySQL to close our connection
- time.Sleep(3 * time.Second)
-
- tx, err := dbt.db.Begin()
- if err != nil {
- dbt.Fatal(err)
- }
-
- if err := tx.Rollback(); err != nil {
- dbt.Fatal(err)
- }
- })
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/connection.go
deleted file mode 100644
index 565a548..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/connection.go
+++ /dev/null
@@ -1,649 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "io"
- "net"
- "strconv"
- "strings"
- "time"
-)
-
-type mysqlConn struct {
- buf buffer
- netConn net.Conn
- rawConn net.Conn // underlying connection when netConn is TLS connection.
- affectedRows uint64
- insertId uint64
- cfg *Config
- maxAllowedPacket int
- maxWriteSize int
- writeTimeout time.Duration
- flags clientFlag
- status statusFlag
- sequence uint8
- parseTime bool
- reset bool // set when the Go SQL package calls ResetSession
-
- // for context support (Go 1.8+)
- watching bool
- watcher chan<- context.Context
- closech chan struct{}
- finished chan<- struct{}
- canceled atomicError // set non-nil if conn is canceled
- closed atomicBool // set when conn is closed, before closech is closed
-}
-
-// Handles parameters set in DSN after the connection is established
-func (mc *mysqlConn) handleParams() (err error) {
- for param, val := range mc.cfg.Params {
- switch param {
- // Charset
- case "charset":
- charsets := strings.Split(val, ",")
- for i := range charsets {
- // ignore errors here - a charset may not exist
- err = mc.exec("SET NAMES " + charsets[i])
- if err == nil {
- break
- }
- }
- if err != nil {
- return
- }
-
- // System Vars
- default:
- err = mc.exec("SET " + param + "=" + val + "")
- if err != nil {
- return
- }
- }
- }
-
- return
-}
-
-func (mc *mysqlConn) markBadConn(err error) error {
- if mc == nil {
- return err
- }
- if err != errBadConnNoWrite {
- return err
- }
- return driver.ErrBadConn
-}
-
-func (mc *mysqlConn) Begin() (driver.Tx, error) {
- return mc.begin(false)
-}
-
-func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
- if mc.closed.IsSet() {
- errLog.Print(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- var q string
- if readOnly {
- q = "START TRANSACTION READ ONLY"
- } else {
- q = "START TRANSACTION"
- }
- err := mc.exec(q)
- if err == nil {
- return &mysqlTx{mc}, err
- }
- return nil, mc.markBadConn(err)
-}
-
-func (mc *mysqlConn) Close() (err error) {
- // Makes Close idempotent
- if !mc.closed.IsSet() {
- err = mc.writeCommandPacket(comQuit)
- }
-
- mc.cleanup()
-
- return
-}
-
-// Closes the network connection and unsets internal variables. Do not call this
-// function after successfully authentication, call Close instead. This function
-// is called before auth or on auth failure because MySQL will have already
-// closed the network connection.
-func (mc *mysqlConn) cleanup() {
- if !mc.closed.TrySet(true) {
- return
- }
-
- // Makes cleanup idempotent
- close(mc.closech)
- if mc.netConn == nil {
- return
- }
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
- }
-}
-
-func (mc *mysqlConn) error() error {
- if mc.closed.IsSet() {
- if err := mc.canceled.Value(); err != nil {
- return err
- }
- return ErrInvalidConn
- }
- return nil
-}
-
-func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
- if mc.closed.IsSet() {
- errLog.Print(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- // Send command
- err := mc.writeCommandPacketStr(comStmtPrepare, query)
- if err != nil {
- return nil, mc.markBadConn(err)
- }
-
- stmt := &mysqlStmt{
- mc: mc,
- }
-
- // Read Result
- columnCount, err := stmt.readPrepareResultPacket()
- if err == nil {
- if stmt.paramCount > 0 {
- if err = mc.readUntilEOF(); err != nil {
- return nil, err
- }
- }
-
- if columnCount > 0 {
- err = mc.readUntilEOF()
- }
- }
-
- return stmt, err
-}
-
-func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
- // Number of ? should be same to len(args)
- if strings.Count(query, "?") != len(args) {
- return "", driver.ErrSkip
- }
-
- buf, err := mc.buf.takeCompleteBuffer()
- if err != nil {
- // can not take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return "", ErrInvalidConn
- }
- buf = buf[:0]
- argPos := 0
-
- for i := 0; i < len(query); i++ {
- q := strings.IndexByte(query[i:], '?')
- if q == -1 {
- buf = append(buf, query[i:]...)
- break
- }
- buf = append(buf, query[i:i+q]...)
- i += q
-
- arg := args[argPos]
- argPos++
-
- if arg == nil {
- buf = append(buf, "NULL"...)
- continue
- }
-
- switch v := arg.(type) {
- case int64:
- buf = strconv.AppendInt(buf, v, 10)
- case uint64:
- // Handle uint64 explicitly because our custom ConvertValue emits unsigned values
- buf = strconv.AppendUint(buf, v, 10)
- case float64:
- buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
- case bool:
- if v {
- buf = append(buf, '1')
- } else {
- buf = append(buf, '0')
- }
- case time.Time:
- if v.IsZero() {
- buf = append(buf, "'0000-00-00'"...)
- } else {
- v := v.In(mc.cfg.Loc)
- v = v.Add(time.Nanosecond * 500) // To round under microsecond
- year := v.Year()
- year100 := year / 100
- year1 := year % 100
- month := v.Month()
- day := v.Day()
- hour := v.Hour()
- minute := v.Minute()
- second := v.Second()
- micro := v.Nanosecond() / 1000
-
- buf = append(buf, []byte{
- '\'',
- digits10[year100], digits01[year100],
- digits10[year1], digits01[year1],
- '-',
- digits10[month], digits01[month],
- '-',
- digits10[day], digits01[day],
- ' ',
- digits10[hour], digits01[hour],
- ':',
- digits10[minute], digits01[minute],
- ':',
- digits10[second], digits01[second],
- }...)
-
- if micro != 0 {
- micro10000 := micro / 10000
- micro100 := micro / 100 % 100
- micro1 := micro % 100
- buf = append(buf, []byte{
- '.',
- digits10[micro10000], digits01[micro10000],
- digits10[micro100], digits01[micro100],
- digits10[micro1], digits01[micro1],
- }...)
- }
- buf = append(buf, '\'')
- }
- case []byte:
- if v == nil {
- buf = append(buf, "NULL"...)
- } else {
- buf = append(buf, "_binary'"...)
- if mc.status&statusNoBackslashEscapes == 0 {
- buf = escapeBytesBackslash(buf, v)
- } else {
- buf = escapeBytesQuotes(buf, v)
- }
- buf = append(buf, '\'')
- }
- case string:
- buf = append(buf, '\'')
- if mc.status&statusNoBackslashEscapes == 0 {
- buf = escapeStringBackslash(buf, v)
- } else {
- buf = escapeStringQuotes(buf, v)
- }
- buf = append(buf, '\'')
- default:
- return "", driver.ErrSkip
- }
-
- if len(buf)+4 > mc.maxAllowedPacket {
- return "", driver.ErrSkip
- }
- }
- if argPos != len(args) {
- return "", driver.ErrSkip
- }
- return string(buf), nil
-}
-
-func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
- if mc.closed.IsSet() {
- errLog.Print(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- if len(args) != 0 {
- if !mc.cfg.InterpolateParams {
- return nil, driver.ErrSkip
- }
- // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
- prepared, err := mc.interpolateParams(query, args)
- if err != nil {
- return nil, err
- }
- query = prepared
- }
- mc.affectedRows = 0
- mc.insertId = 0
-
- err := mc.exec(query)
- if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, err
- }
- return nil, mc.markBadConn(err)
-}
-
-// Internal function to execute commands
-func (mc *mysqlConn) exec(query string) error {
- // Send command
- if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
- return mc.markBadConn(err)
- }
-
- // Read Result
- resLen, err := mc.readResultSetHeaderPacket()
- if err != nil {
- return err
- }
-
- if resLen > 0 {
- // columns
- if err := mc.readUntilEOF(); err != nil {
- return err
- }
-
- // rows
- if err := mc.readUntilEOF(); err != nil {
- return err
- }
- }
-
- return mc.discardResults()
-}
-
-func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
- return mc.query(query, args)
-}
-
-func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
- if mc.closed.IsSet() {
- errLog.Print(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- if len(args) != 0 {
- if !mc.cfg.InterpolateParams {
- return nil, driver.ErrSkip
- }
- // try client-side prepare to reduce roundtrip
- prepared, err := mc.interpolateParams(query, args)
- if err != nil {
- return nil, err
- }
- query = prepared
- }
- // Send command
- err := mc.writeCommandPacketStr(comQuery, query)
- if err == nil {
- // Read Result
- var resLen int
- resLen, err = mc.readResultSetHeaderPacket()
- if err == nil {
- rows := new(textRows)
- rows.mc = mc
-
- if resLen == 0 {
- rows.rs.done = true
-
- switch err := rows.NextResultSet(); err {
- case nil, io.EOF:
- return rows, nil
- default:
- return nil, err
- }
- }
-
- // Columns
- rows.rs.columns, err = mc.readColumns(resLen)
- return rows, err
- }
- }
- return nil, mc.markBadConn(err)
-}
-
-// Gets the value of the given MySQL System Variable
-// The returned byte slice is only valid until the next read
-func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
- // Send command
- if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
- return nil, err
- }
-
- // Read Result
- resLen, err := mc.readResultSetHeaderPacket()
- if err == nil {
- rows := new(textRows)
- rows.mc = mc
- rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
-
- if resLen > 0 {
- // Columns
- if err := mc.readUntilEOF(); err != nil {
- return nil, err
- }
- }
-
- dest := make([]driver.Value, resLen)
- if err = rows.readRow(dest); err == nil {
- return dest[0].([]byte), mc.readUntilEOF()
- }
- }
- return nil, err
-}
-
-// finish is called when the query has canceled.
-func (mc *mysqlConn) cancel(err error) {
- mc.canceled.Set(err)
- mc.cleanup()
-}
-
-// finish is called when the query has succeeded.
-func (mc *mysqlConn) finish() {
- if !mc.watching || mc.finished == nil {
- return
- }
- select {
- case mc.finished <- struct{}{}:
- mc.watching = false
- case <-mc.closech:
- }
-}
-
-// Ping implements driver.Pinger interface
-func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
- if mc.closed.IsSet() {
- errLog.Print(ErrInvalidConn)
- return driver.ErrBadConn
- }
-
- if err = mc.watchCancel(ctx); err != nil {
- return
- }
- defer mc.finish()
-
- if err = mc.writeCommandPacket(comPing); err != nil {
- return mc.markBadConn(err)
- }
-
- return mc.readResultOK()
-}
-
-// BeginTx implements driver.ConnBeginTx interface
-func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
- if err := mc.watchCancel(ctx); err != nil {
- return nil, err
- }
- defer mc.finish()
-
- if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
- level, err := mapIsolationLevel(opts.Isolation)
- if err != nil {
- return nil, err
- }
- err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
- if err != nil {
- return nil, err
- }
- }
-
- return mc.begin(opts.ReadOnly)
-}
-
-func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
- dargs, err := namedValueToValue(args)
- if err != nil {
- return nil, err
- }
-
- if err := mc.watchCancel(ctx); err != nil {
- return nil, err
- }
-
- rows, err := mc.query(query, dargs)
- if err != nil {
- mc.finish()
- return nil, err
- }
- rows.finish = mc.finish
- return rows, err
-}
-
-func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
- dargs, err := namedValueToValue(args)
- if err != nil {
- return nil, err
- }
-
- if err := mc.watchCancel(ctx); err != nil {
- return nil, err
- }
- defer mc.finish()
-
- return mc.Exec(query, dargs)
-}
-
-func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
- if err := mc.watchCancel(ctx); err != nil {
- return nil, err
- }
-
- stmt, err := mc.Prepare(query)
- mc.finish()
- if err != nil {
- return nil, err
- }
-
- select {
- default:
- case <-ctx.Done():
- stmt.Close()
- return nil, ctx.Err()
- }
- return stmt, nil
-}
-
-func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
- dargs, err := namedValueToValue(args)
- if err != nil {
- return nil, err
- }
-
- if err := stmt.mc.watchCancel(ctx); err != nil {
- return nil, err
- }
-
- rows, err := stmt.query(dargs)
- if err != nil {
- stmt.mc.finish()
- return nil, err
- }
- rows.finish = stmt.mc.finish
- return rows, err
-}
-
-func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
- dargs, err := namedValueToValue(args)
- if err != nil {
- return nil, err
- }
-
- if err := stmt.mc.watchCancel(ctx); err != nil {
- return nil, err
- }
- defer stmt.mc.finish()
-
- return stmt.Exec(dargs)
-}
-
-func (mc *mysqlConn) watchCancel(ctx context.Context) error {
- if mc.watching {
- // Reach here if canceled,
- // so the connection is already invalid
- mc.cleanup()
- return nil
- }
- // When ctx is already cancelled, don't watch it.
- if err := ctx.Err(); err != nil {
- return err
- }
- // When ctx is not cancellable, don't watch it.
- if ctx.Done() == nil {
- return nil
- }
- // When watcher is not alive, can't watch it.
- if mc.watcher == nil {
- return nil
- }
-
- mc.watching = true
- mc.watcher <- ctx
- return nil
-}
-
-func (mc *mysqlConn) startWatcher() {
- watcher := make(chan context.Context, 1)
- mc.watcher = watcher
- finished := make(chan struct{})
- mc.finished = finished
- go func() {
- for {
- var ctx context.Context
- select {
- case ctx = <-watcher:
- case <-mc.closech:
- return
- }
-
- select {
- case <-ctx.Done():
- mc.cancel(ctx.Err())
- case <-finished:
- case <-mc.closech:
- return
- }
- }
- }()
-}
-
-func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
- nv.Value, err = converter{}.ConvertValue(nv.Value)
- return
-}
-
-// ResetSession implements driver.SessionResetter.
-// (From Go 1.10)
-func (mc *mysqlConn) ResetSession(ctx context.Context) error {
- if mc.closed.IsSet() {
- return driver.ErrBadConn
- }
- mc.reset = true
- return nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/connection_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/connection_test.go
deleted file mode 100644
index 19c17ff..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/connection_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "context"
- "database/sql/driver"
- "errors"
- "net"
- "testing"
-)
-
-func TestInterpolateParams(t *testing.T) {
- mc := &mysqlConn{
- buf: newBuffer(nil),
- maxAllowedPacket: maxPacketSize,
- cfg: &Config{
- InterpolateParams: true,
- },
- }
-
- q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42), "gopher"})
- if err != nil {
- t.Errorf("Expected err=nil, got %#v", err)
- return
- }
- expected := `SELECT 42+'gopher'`
- if q != expected {
- t.Errorf("Expected: %q\nGot: %q", expected, q)
- }
-}
-
-func TestInterpolateParamsTooManyPlaceholders(t *testing.T) {
- mc := &mysqlConn{
- buf: newBuffer(nil),
- maxAllowedPacket: maxPacketSize,
- cfg: &Config{
- InterpolateParams: true,
- },
- }
-
- q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42)})
- if err != driver.ErrSkip {
- t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
- }
-}
-
-// We don't support placeholder in string literal for now.
-// https://github.com/go-sql-driver/mysql/pull/490
-func TestInterpolateParamsPlaceholderInString(t *testing.T) {
- mc := &mysqlConn{
- buf: newBuffer(nil),
- maxAllowedPacket: maxPacketSize,
- cfg: &Config{
- InterpolateParams: true,
- },
- }
-
- q, err := mc.interpolateParams("SELECT 'abc?xyz',?", []driver.Value{int64(42)})
- // When InterpolateParams support string literal, this should return `"SELECT 'abc?xyz', 42`
- if err != driver.ErrSkip {
- t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
- }
-}
-
-func TestInterpolateParamsUint64(t *testing.T) {
- mc := &mysqlConn{
- buf: newBuffer(nil),
- maxAllowedPacket: maxPacketSize,
- cfg: &Config{
- InterpolateParams: true,
- },
- }
-
- q, err := mc.interpolateParams("SELECT ?", []driver.Value{uint64(42)})
- if err != nil {
- t.Errorf("Expected err=nil, got err=%#v, q=%#v", err, q)
- }
- if q != "SELECT 42" {
- t.Errorf("Expected uint64 interpolation to work, got q=%#v", q)
- }
-}
-
-func TestCheckNamedValue(t *testing.T) {
- value := driver.NamedValue{Value: ^uint64(0)}
- x := &mysqlConn{}
- err := x.CheckNamedValue(&value)
-
- if err != nil {
- t.Fatal("uint64 high-bit not convertible", err)
- }
-
- if value.Value != ^uint64(0) {
- t.Fatalf("uint64 high-bit converted, got %#v %T", value.Value, value.Value)
- }
-}
-
-// TestCleanCancel tests passed context is cancelled at start.
-// No packet should be sent. Connection should keep current status.
-func TestCleanCancel(t *testing.T) {
- mc := &mysqlConn{
- closech: make(chan struct{}),
- }
- mc.startWatcher()
- defer mc.cleanup()
-
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
-
- for i := 0; i < 3; i++ { // Repeat same behavior
- err := mc.Ping(ctx)
- if err != context.Canceled {
- t.Errorf("expected context.Canceled, got %#v", err)
- }
-
- if mc.closed.IsSet() {
- t.Error("expected mc is not closed, closed actually")
- }
-
- if mc.watching {
- t.Error("expected watching is false, but true")
- }
- }
-}
-
-func TestPingMarkBadConnection(t *testing.T) {
- nc := badConnection{err: errors.New("boom")}
- ms := &mysqlConn{
- netConn: nc,
- buf: newBuffer(nc),
- maxAllowedPacket: defaultMaxAllowedPacket,
- }
-
- err := ms.Ping(context.Background())
-
- if err != driver.ErrBadConn {
- t.Errorf("expected driver.ErrBadConn, got %#v", err)
- }
-}
-
-func TestPingErrInvalidConn(t *testing.T) {
- nc := badConnection{err: errors.New("failed to write"), n: 10}
- ms := &mysqlConn{
- netConn: nc,
- buf: newBuffer(nc),
- maxAllowedPacket: defaultMaxAllowedPacket,
- closech: make(chan struct{}),
- }
-
- err := ms.Ping(context.Background())
-
- if err != ErrInvalidConn {
- t.Errorf("expected ErrInvalidConn, got %#v", err)
- }
-}
-
-type badConnection struct {
- n int
- err error
- net.Conn
-}
-
-func (bc badConnection) Write(b []byte) (n int, err error) {
- return bc.n, bc.err
-}
-
-func (bc badConnection) Close() error {
- return nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/connector.go
deleted file mode 100644
index 5aaaba4..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/connector.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "context"
- "database/sql/driver"
- "net"
-)
-
-type connector struct {
- cfg *Config // immutable private copy.
-}
-
-// Connect implements driver.Connector interface.
-// Connect returns a connection to the database.
-func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
- var err error
-
- // New mysqlConn
- mc := &mysqlConn{
- maxAllowedPacket: maxPacketSize,
- maxWriteSize: maxPacketSize - 1,
- closech: make(chan struct{}),
- cfg: c.cfg,
- }
- mc.parseTime = mc.cfg.ParseTime
-
- // Connect to Server
- dialsLock.RLock()
- dial, ok := dials[mc.cfg.Net]
- dialsLock.RUnlock()
- if ok {
- mc.netConn, err = dial(ctx, mc.cfg.Addr)
- } else {
- nd := net.Dialer{Timeout: mc.cfg.Timeout}
- mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
- }
-
- if err != nil {
- if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
- errLog.Print("net.Error from Dial()': ", nerr.Error())
- return nil, driver.ErrBadConn
- }
- return nil, err
- }
-
- // Enable TCP Keepalives on TCP connections
- if tc, ok := mc.netConn.(*net.TCPConn); ok {
- if err := tc.SetKeepAlive(true); err != nil {
- // Don't send COM_QUIT before handshake.
- mc.netConn.Close()
- mc.netConn = nil
- return nil, err
- }
- }
-
- // Call startWatcher for context support (From Go 1.8)
- mc.startWatcher()
- if err := mc.watchCancel(ctx); err != nil {
- return nil, err
- }
- defer mc.finish()
-
- mc.buf = newBuffer(mc.netConn)
-
- // Set I/O timeouts
- mc.buf.timeout = mc.cfg.ReadTimeout
- mc.writeTimeout = mc.cfg.WriteTimeout
-
- // Reading Handshake Initialization Packet
- authData, plugin, err := mc.readHandshakePacket()
- if err != nil {
- mc.cleanup()
- return nil, err
- }
-
- if plugin == "" {
- plugin = defaultAuthPlugin
- }
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- // try the default auth plugin, if using the requested plugin failed
- errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
- plugin = defaultAuthPlugin
- authResp, err = mc.auth(authData, plugin)
- if err != nil {
- mc.cleanup()
- return nil, err
- }
- }
- if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
- mc.cleanup()
- return nil, err
- }
-
- // Handle response to auth packet, switch methods if possible
- if err = mc.handleAuthResult(authData, plugin); err != nil {
- // Authentication failed and MySQL has already closed the connection
- // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
- // Do not send COM_QUIT, just cleanup and return the error.
- mc.cleanup()
- return nil, err
- }
-
- if mc.cfg.MaxAllowedPacket > 0 {
- mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
- } else {
- // Get max allowed packet size
- maxap, err := mc.getSystemVar("max_allowed_packet")
- if err != nil {
- mc.Close()
- return nil, err
- }
- mc.maxAllowedPacket = stringToInt(maxap) - 1
- }
- if mc.maxAllowedPacket < maxPacketSize {
- mc.maxWriteSize = mc.maxAllowedPacket
- }
-
- // Handle DSN Params
- err = mc.handleParams()
- if err != nil {
- mc.Close()
- return nil, err
- }
-
- return mc, nil
-}
-
-// Driver implements driver.Connector interface.
-// Driver returns &MySQLDriver{}.
-func (c *connector) Driver() driver.Driver {
- return &MySQLDriver{}
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/const.go
deleted file mode 100644
index b1e6b85..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/const.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-const (
- defaultAuthPlugin = "mysql_native_password"
- defaultMaxAllowedPacket = 4 << 20 // 4 MiB
- minProtocolVersion = 10
- maxPacketSize = 1<<24 - 1
- timeFormat = "2006-01-02 15:04:05.999999"
-)
-
-// MySQL constants documentation:
-// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
-
-const (
- iOK byte = 0x00
- iAuthMoreData byte = 0x01
- iLocalInFile byte = 0xfb
- iEOF byte = 0xfe
- iERR byte = 0xff
-)
-
-// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
-type clientFlag uint32
-
-const (
- clientLongPassword clientFlag = 1 << iota
- clientFoundRows
- clientLongFlag
- clientConnectWithDB
- clientNoSchema
- clientCompress
- clientODBC
- clientLocalFiles
- clientIgnoreSpace
- clientProtocol41
- clientInteractive
- clientSSL
- clientIgnoreSIGPIPE
- clientTransactions
- clientReserved
- clientSecureConn
- clientMultiStatements
- clientMultiResults
- clientPSMultiResults
- clientPluginAuth
- clientConnectAttrs
- clientPluginAuthLenEncClientData
- clientCanHandleExpiredPasswords
- clientSessionTrack
- clientDeprecateEOF
-)
-
-const (
- comQuit byte = iota + 1
- comInitDB
- comQuery
- comFieldList
- comCreateDB
- comDropDB
- comRefresh
- comShutdown
- comStatistics
- comProcessInfo
- comConnect
- comProcessKill
- comDebug
- comPing
- comTime
- comDelayedInsert
- comChangeUser
- comBinlogDump
- comTableDump
- comConnectOut
- comRegisterSlave
- comStmtPrepare
- comStmtExecute
- comStmtSendLongData
- comStmtClose
- comStmtReset
- comSetOption
- comStmtFetch
-)
-
-// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
-type fieldType byte
-
-const (
- fieldTypeDecimal fieldType = iota
- fieldTypeTiny
- fieldTypeShort
- fieldTypeLong
- fieldTypeFloat
- fieldTypeDouble
- fieldTypeNULL
- fieldTypeTimestamp
- fieldTypeLongLong
- fieldTypeInt24
- fieldTypeDate
- fieldTypeTime
- fieldTypeDateTime
- fieldTypeYear
- fieldTypeNewDate
- fieldTypeVarChar
- fieldTypeBit
-)
-const (
- fieldTypeJSON fieldType = iota + 0xf5
- fieldTypeNewDecimal
- fieldTypeEnum
- fieldTypeSet
- fieldTypeTinyBLOB
- fieldTypeMediumBLOB
- fieldTypeLongBLOB
- fieldTypeBLOB
- fieldTypeVarString
- fieldTypeString
- fieldTypeGeometry
-)
-
-type fieldFlag uint16
-
-const (
- flagNotNULL fieldFlag = 1 << iota
- flagPriKey
- flagUniqueKey
- flagMultipleKey
- flagBLOB
- flagUnsigned
- flagZeroFill
- flagBinary
- flagEnum
- flagAutoIncrement
- flagTimestamp
- flagSet
- flagUnknown1
- flagUnknown2
- flagUnknown3
- flagUnknown4
-)
-
-// http://dev.mysql.com/doc/internals/en/status-flags.html
-type statusFlag uint16
-
-const (
- statusInTrans statusFlag = 1 << iota
- statusInAutocommit
- statusReserved // Not in documentation
- statusMoreResultsExists
- statusNoGoodIndexUsed
- statusNoIndexUsed
- statusCursorExists
- statusLastRowSent
- statusDbDropped
- statusNoBackslashEscapes
- statusMetadataChanged
- statusQueryWasSlow
- statusPsOutParams
- statusInTransReadonly
- statusSessionStateChanged
-)
-
-const (
- cachingSha2PasswordRequestPublicKey = 2
- cachingSha2PasswordFastAuthSuccess = 3
- cachingSha2PasswordPerformFullAuthentication = 4
-)
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver.go
deleted file mode 100644
index 1f9decf..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// Package mysql provides a MySQL driver for Go's database/sql package.
-//
-// The driver should be used via the database/sql package:
-//
-// import "database/sql"
-// import _ "github.com/go-sql-driver/mysql"
-//
-// db, err := sql.Open("mysql", "user:password@/dbname")
-//
-// See https://github.com/go-sql-driver/mysql#usage for details
-package mysql
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "net"
- "sync"
-)
-
-// MySQLDriver is exported to make the driver directly accessible.
-// In general the driver is used via the database/sql package.
-type MySQLDriver struct{}
-
-// DialFunc is a function which can be used to establish the network connection.
-// Custom dial functions must be registered with RegisterDial
-//
-// Deprecated: users should register a DialContextFunc instead
-type DialFunc func(addr string) (net.Conn, error)
-
-// DialContextFunc is a function which can be used to establish the network connection.
-// Custom dial functions must be registered with RegisterDialContext
-type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error)
-
-var (
- dialsLock sync.RWMutex
- dials map[string]DialContextFunc
-)
-
-// RegisterDialContext registers a custom dial function. It can then be used by the
-// network address mynet(addr), where mynet is the registered new network.
-// The current context for the connection and its address is passed to the dial function.
-func RegisterDialContext(net string, dial DialContextFunc) {
- dialsLock.Lock()
- defer dialsLock.Unlock()
- if dials == nil {
- dials = make(map[string]DialContextFunc)
- }
- dials[net] = dial
-}
-
-// RegisterDial registers a custom dial function. It can then be used by the
-// network address mynet(addr), where mynet is the registered new network.
-// addr is passed as a parameter to the dial function.
-//
-// Deprecated: users should call RegisterDialContext instead
-func RegisterDial(network string, dial DialFunc) {
- RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) {
- return dial(addr)
- })
-}
-
-// Open new Connection.
-// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
-// the DSN string is formatted
-func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
- cfg, err := ParseDSN(dsn)
- if err != nil {
- return nil, err
- }
- c := &connector{
- cfg: cfg,
- }
- return c.Connect(context.Background())
-}
-
-func init() {
- sql.Register("mysql", &MySQLDriver{})
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver_go110.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver_go110.go
deleted file mode 100644
index eb5a8fe..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver_go110.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// +build go1.10
-
-package mysql
-
-import (
- "database/sql/driver"
-)
-
-// NewConnector returns new driver.Connector.
-func NewConnector(cfg *Config) (driver.Connector, error) {
- cfg = cfg.Clone()
- // normalize the contents of cfg so calls to NewConnector have the same
- // behavior as MySQLDriver.OpenConnector
- if err := cfg.normalize(); err != nil {
- return nil, err
- }
- return &connector{cfg: cfg}, nil
-}
-
-// OpenConnector implements driver.DriverContext.
-func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
- cfg, err := ParseDSN(dsn)
- if err != nil {
- return nil, err
- }
- return &connector{
- cfg: cfg,
- }, nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver_go110_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver_go110_test.go
deleted file mode 100644
index 19a0e59..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver_go110_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// +build go1.10
-
-package mysql
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "fmt"
- "net"
- "testing"
- "time"
-)
-
-var _ driver.DriverContext = &MySQLDriver{}
-
-type dialCtxKey struct{}
-
-func TestConnectorObeysDialTimeouts(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- RegisterDialContext("dialctxtest", func(ctx context.Context, addr string) (net.Conn, error) {
- var d net.Dialer
- if !ctx.Value(dialCtxKey{}).(bool) {
- return nil, fmt.Errorf("test error: query context is not propagated to our dialer")
- }
- return d.DialContext(ctx, prot, addr)
- })
-
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@dialctxtest(%s)/%s?timeout=30s", user, pass, addr, dbname))
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- ctx := context.WithValue(context.Background(), dialCtxKey{}, true)
-
- _, err = db.ExecContext(ctx, "DO 1")
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func configForTests(t *testing.T) *Config {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- mycnf := NewConfig()
- mycnf.User = user
- mycnf.Passwd = pass
- mycnf.Addr = addr
- mycnf.Net = prot
- mycnf.DBName = dbname
- return mycnf
-}
-
-func TestNewConnector(t *testing.T) {
- mycnf := configForTests(t)
- conn, err := NewConnector(mycnf)
- if err != nil {
- t.Fatal(err)
- }
-
- db := sql.OpenDB(conn)
- defer db.Close()
-
- if err := db.Ping(); err != nil {
- t.Fatal(err)
- }
-}
-
-type slowConnection struct {
- net.Conn
- slowdown time.Duration
-}
-
-func (sc *slowConnection) Read(b []byte) (int, error) {
- time.Sleep(sc.slowdown)
- return sc.Conn.Read(b)
-}
-
-type connectorHijack struct {
- driver.Connector
- connErr error
-}
-
-func (cw *connectorHijack) Connect(ctx context.Context) (driver.Conn, error) {
- var conn driver.Conn
- conn, cw.connErr = cw.Connector.Connect(ctx)
- return conn, cw.connErr
-}
-
-func TestConnectorTimeoutsDuringOpen(t *testing.T) {
- RegisterDialContext("slowconn", func(ctx context.Context, addr string) (net.Conn, error) {
- var d net.Dialer
- conn, err := d.DialContext(ctx, prot, addr)
- if err != nil {
- return nil, err
- }
- return &slowConnection{Conn: conn, slowdown: 100 * time.Millisecond}, nil
- })
-
- mycnf := configForTests(t)
- mycnf.Net = "slowconn"
-
- conn, err := NewConnector(mycnf)
- if err != nil {
- t.Fatal(err)
- }
-
- hijack := &connectorHijack{Connector: conn}
-
- db := sql.OpenDB(hijack)
- defer db.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
- defer cancel()
-
- _, err = db.ExecContext(ctx, "DO 1")
- if err != context.DeadlineExceeded {
- t.Fatalf("ExecContext should have timed out")
- }
- if hijack.connErr != context.DeadlineExceeded {
- t.Fatalf("(*Connector).Connect should have timed out")
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver_test.go
deleted file mode 100644
index 3dee1ba..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/driver_test.go
+++ /dev/null
@@ -1,2996 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "context"
- "crypto/tls"
- "database/sql"
- "database/sql/driver"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math"
- "net"
- "net/url"
- "os"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-)
-
-// Ensure that all the driver interfaces are implemented
-var (
- _ driver.Rows = &binaryRows{}
- _ driver.Rows = &textRows{}
-)
-
-var (
- user string
- pass string
- prot string
- addr string
- dbname string
- dsn string
- netAddr string
- available bool
-)
-
-var (
- tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC)
- sDate = "2012-06-14"
- tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)
- sDateTime = "2011-11-20 21:27:37"
- tDate0 = time.Time{}
- sDate0 = "0000-00-00"
- sDateTime0 = "0000-00-00 00:00:00"
-)
-
-// See https://github.com/go-sql-driver/mysql/wiki/Testing
-func init() {
- // get environment variables
- env := func(key, defaultValue string) string {
- if value := os.Getenv(key); value != "" {
- return value
- }
- return defaultValue
- }
- user = env("MYSQL_TEST_USER", "root")
- pass = env("MYSQL_TEST_PASS", "")
- prot = env("MYSQL_TEST_PROT", "tcp")
- addr = env("MYSQL_TEST_ADDR", "localhost:3306")
- dbname = env("MYSQL_TEST_DBNAME", "gotest")
- netAddr = fmt.Sprintf("%s(%s)", prot, addr)
- dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, pass, netAddr, dbname)
- c, err := net.Dial(prot, addr)
- if err == nil {
- available = true
- c.Close()
- }
-}
-
-type DBTest struct {
- *testing.T
- db *sql.DB
-}
-
-type netErrorMock struct {
- temporary bool
- timeout bool
-}
-
-func (e netErrorMock) Temporary() bool {
- return e.temporary
-}
-
-func (e netErrorMock) Timeout() bool {
- return e.timeout
-}
-
-func (e netErrorMock) Error() string {
- return fmt.Sprintf("mock net error. Temporary: %v, Timeout %v", e.temporary, e.timeout)
-}
-
-func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- dsn += "&multiStatements=true"
- var db *sql.DB
- if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation {
- db, err = sql.Open("mysql", dsn)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
- }
-
- dbt := &DBTest{t, db}
- for _, test := range tests {
- test(dbt)
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- }
-}
-
-func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- db, err := sql.Open("mysql", dsn)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- db.Exec("DROP TABLE IF EXISTS test")
-
- dsn2 := dsn + "&interpolateParams=true"
- var db2 *sql.DB
- if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation {
- db2, err = sql.Open("mysql", dsn2)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db2.Close()
- }
-
- dsn3 := dsn + "&multiStatements=true"
- var db3 *sql.DB
- if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation {
- db3, err = sql.Open("mysql", dsn3)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db3.Close()
- }
-
- dbt := &DBTest{t, db}
- dbt2 := &DBTest{t, db2}
- dbt3 := &DBTest{t, db3}
- for _, test := range tests {
- test(dbt)
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- if db2 != nil {
- test(dbt2)
- dbt2.db.Exec("DROP TABLE IF EXISTS test")
- }
- if db3 != nil {
- test(dbt3)
- dbt3.db.Exec("DROP TABLE IF EXISTS test")
- }
- }
-}
-
-func (dbt *DBTest) fail(method, query string, err error) {
- if len(query) > 300 {
- query = "[query too large to print]"
- }
- dbt.Fatalf("error on %s %s: %s", method, query, err.Error())
-}
-
-func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) {
- res, err := dbt.db.Exec(query, args...)
- if err != nil {
- dbt.fail("exec", query, err)
- }
- return res
-}
-
-func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) {
- rows, err := dbt.db.Query(query, args...)
- if err != nil {
- dbt.fail("query", query, err)
- }
- return rows
-}
-
-func maybeSkip(t *testing.T, err error, skipErrno uint16) {
- mySQLErr, ok := err.(*MySQLError)
- if !ok {
- return
- }
-
- if mySQLErr.Number == skipErrno {
- t.Skipf("skipping test for error: %v", err)
- }
-}
-
-func TestEmptyQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // just a comment, no query
- rows := dbt.mustQuery("--")
- defer rows.Close()
- // will hang before #255
- if rows.Next() {
- dbt.Errorf("next on rows must be false")
- }
- })
-}
-
-func TestCRUD(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // Create Table
- dbt.mustExec("CREATE TABLE test (value BOOL)")
-
- // Test for unexpected data
- var out bool
- rows := dbt.mustQuery("SELECT * FROM test")
- if rows.Next() {
- dbt.Error("unexpected data in empty table")
- }
- rows.Close()
-
- // Create Data
- res := dbt.mustExec("INSERT INTO test VALUES (1)")
- count, err := res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- id, err := res.LastInsertId()
- if err != nil {
- dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error())
- }
- if id != 0 {
- dbt.Fatalf("expected InsertId 0, got %d", id)
- }
-
- // Read
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if true != out {
- dbt.Errorf("true != %t", out)
- }
-
- if rows.Next() {
- dbt.Error("unexpected data")
- }
- } else {
- dbt.Error("no data")
- }
- rows.Close()
-
- // Update
- res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true)
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- // Check Update
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if false != out {
- dbt.Errorf("false != %t", out)
- }
-
- if rows.Next() {
- dbt.Error("unexpected data")
- }
- } else {
- dbt.Error("no data")
- }
- rows.Close()
-
- // Delete
- res = dbt.mustExec("DELETE FROM test WHERE value = ?", false)
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- // Check for unexpected rows
- res = dbt.mustExec("DELETE FROM test")
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 0 {
- dbt.Fatalf("expected 0 affected row, got %d", count)
- }
- })
-}
-
-func TestMultiQuery(t *testing.T) {
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- // Create Table
- dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ")
-
- // Create Data
- res := dbt.mustExec("INSERT INTO test VALUES (1, 1)")
- count, err := res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- // Update
- res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;")
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 1 {
- dbt.Fatalf("expected 1 affected row, got %d", count)
- }
-
- // Read
- var out int
- rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;")
- if rows.Next() {
- rows.Scan(&out)
- if 5 != out {
- dbt.Errorf("5 != %d", out)
- }
-
- if rows.Next() {
- dbt.Error("unexpected data")
- }
- } else {
- dbt.Error("no data")
- }
- rows.Close()
-
- })
-}
-
-func TestInt(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"}
- in := int64(42)
- var out int64
- var rows *sql.Rows
-
- // SIGNED
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
-
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
-
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Errorf("%s: %d != %d", v, in, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
-
- // UNSIGNED ZEROFILL
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)")
-
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
-
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out)
- }
- } else {
- dbt.Errorf("%s ZEROFILL: no data", v)
- }
- rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
- })
-}
-
-func TestFloat32(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [2]string{"FLOAT", "DOUBLE"}
- in := float32(42.23)
- var out float32
- var rows *sql.Rows
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Errorf("%s: %g != %g", v, in, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
- })
-}
-
-func TestFloat64(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [2]string{"FLOAT", "DOUBLE"}
- var expected float64 = 42.23
- var out float64
- var rows *sql.Rows
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (42.23)")
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if expected != out {
- dbt.Errorf("%s: %g != %g", v, expected, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
- })
-}
-
-func TestFloat64Placeholder(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [2]string{"FLOAT", "DOUBLE"}
- var expected float64 = 42.23
- var out float64
- var rows *sql.Rows
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (id int, value " + v + ")")
- dbt.mustExec("INSERT INTO test VALUES (1, 42.23)")
- rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1)
- if rows.Next() {
- rows.Scan(&out)
- if expected != out {
- dbt.Errorf("%s: %g != %g", v, expected, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
- })
-}
-
-func TestString(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"}
- in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย"
- var out string
- var rows *sql.Rows
-
- for _, v := range types {
- dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8")
-
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
-
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Errorf("%s: %s != %s", v, in, out)
- }
- } else {
- dbt.Errorf("%s: no data", v)
- }
- rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
- }
-
- // BLOB
- dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
-
- id := 2
- in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
- "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
- "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
- "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " +
- "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " +
- "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " +
- "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " +
- "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet."
- dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in)
-
- err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out)
- if err != nil {
- dbt.Fatalf("Error on BLOB-Query: %s", err.Error())
- } else if out != in {
- dbt.Errorf("BLOB: %s != %s", in, out)
- }
- })
-}
-
-func TestRawBytes(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- v1 := []byte("aaa")
- v2 := []byte("bbb")
- rows := dbt.mustQuery("SELECT ?, ?", v1, v2)
- defer rows.Close()
- if rows.Next() {
- var o1, o2 sql.RawBytes
- if err := rows.Scan(&o1, &o2); err != nil {
- dbt.Errorf("Got error: %v", err)
- }
- if !bytes.Equal(v1, o1) {
- dbt.Errorf("expected %v, got %v", v1, o1)
- }
- if !bytes.Equal(v2, o2) {
- dbt.Errorf("expected %v, got %v", v2, o2)
- }
- // https://github.com/go-sql-driver/mysql/issues/765
- // Appending to RawBytes shouldn't overwrite next RawBytes.
- o1 = append(o1, "xyzzy"...)
- if !bytes.Equal(v2, o2) {
- dbt.Errorf("expected %v, got %v", v2, o2)
- }
- } else {
- dbt.Errorf("no data")
- }
- })
-}
-
-type testValuer struct {
- value string
-}
-
-func (tv testValuer) Value() (driver.Value, error) {
- return tv.value, nil
-}
-
-func TestValuer(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- in := testValuer{"a_value"}
- var out string
- var rows *sql.Rows
-
- dbt.mustExec("CREATE TABLE test (value VARCHAR(255)) CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO test VALUES (?)", in)
- rows = dbt.mustQuery("SELECT value FROM test")
- if rows.Next() {
- rows.Scan(&out)
- if in.value != out {
- dbt.Errorf("Valuer: %v != %s", in, out)
- }
- } else {
- dbt.Errorf("Valuer: no data")
- }
- rows.Close()
-
- dbt.mustExec("DROP TABLE IF EXISTS test")
- })
-}
-
-type testValuerWithValidation struct {
- value string
-}
-
-func (tv testValuerWithValidation) Value() (driver.Value, error) {
- if len(tv.value) == 0 {
- return nil, fmt.Errorf("Invalid string valuer. Value must not be empty")
- }
-
- return tv.value, nil
-}
-
-func TestValuerWithValidation(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- in := testValuerWithValidation{"a_value"}
- var out string
- var rows *sql.Rows
-
- dbt.mustExec("CREATE TABLE testValuer (value VARCHAR(255)) CHARACTER SET utf8")
- dbt.mustExec("INSERT INTO testValuer VALUES (?)", in)
-
- rows = dbt.mustQuery("SELECT value FROM testValuer")
- defer rows.Close()
-
- if rows.Next() {
- rows.Scan(&out)
- if in.value != out {
- dbt.Errorf("Valuer: %v != %s", in, out)
- }
- } else {
- dbt.Errorf("Valuer: no data")
- }
-
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", testValuerWithValidation{""}); err == nil {
- dbt.Errorf("Failed to check valuer error")
- }
-
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", nil); err != nil {
- dbt.Errorf("Failed to check nil")
- }
-
- if _, err := dbt.db.Exec("INSERT INTO testValuer VALUES (?)", map[string]bool{}); err == nil {
- dbt.Errorf("Failed to check not valuer")
- }
-
- dbt.mustExec("DROP TABLE IF EXISTS testValuer")
- })
-}
-
-type timeTests struct {
- dbtype string
- tlayout string
- tests []timeTest
-}
-
-type timeTest struct {
- s string // leading "!": do not use t as value in queries
- t time.Time
-}
-
-type timeMode byte
-
-func (t timeMode) String() string {
- switch t {
- case binaryString:
- return "binary:string"
- case binaryTime:
- return "binary:time.Time"
- case textString:
- return "text:string"
- }
- panic("unsupported timeMode")
-}
-
-func (t timeMode) Binary() bool {
- switch t {
- case binaryString, binaryTime:
- return true
- }
- return false
-}
-
-const (
- binaryString timeMode = iota
- binaryTime
- textString
-)
-
-func (t timeTest) genQuery(dbtype string, mode timeMode) string {
- var inner string
- if mode.Binary() {
- inner = "?"
- } else {
- inner = `"%s"`
- }
- return `SELECT cast(` + inner + ` as ` + dbtype + `)`
-}
-
-func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) {
- var rows *sql.Rows
- query := t.genQuery(dbtype, mode)
- switch mode {
- case binaryString:
- rows = dbt.mustQuery(query, t.s)
- case binaryTime:
- rows = dbt.mustQuery(query, t.t)
- case textString:
- query = fmt.Sprintf(query, t.s)
- rows = dbt.mustQuery(query)
- default:
- panic("unsupported mode")
- }
- defer rows.Close()
- var err error
- if !rows.Next() {
- err = rows.Err()
- if err == nil {
- err = fmt.Errorf("no data")
- }
- dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
- return
- }
- var dst interface{}
- err = rows.Scan(&dst)
- if err != nil {
- dbt.Errorf("%s [%s]: %s", dbtype, mode, err)
- return
- }
- switch val := dst.(type) {
- case []uint8:
- str := string(val)
- if str == t.s {
- return
- }
- if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s {
- // a fix mainly for TravisCI:
- // accept full microsecond resolution in result for DATETIME columns
- // where the binary protocol was used
- return
- }
- dbt.Errorf("%s [%s] to string: expected %q, got %q",
- dbtype, mode,
- t.s, str,
- )
- case time.Time:
- if val == t.t {
- return
- }
- dbt.Errorf("%s [%s] to string: expected %q, got %q",
- dbtype, mode,
- t.s, val.Format(tlayout),
- )
- default:
- fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t})
- dbt.Errorf("%s [%s]: unhandled type %T (is '%v')",
- dbtype, mode,
- val, val,
- )
- }
-}
-
-func TestDateTime(t *testing.T) {
- afterTime := func(t time.Time, d string) time.Time {
- dur, err := time.ParseDuration(d)
- if err != nil {
- panic(err)
- }
- return t.Add(dur)
- }
- // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests
- format := "2006-01-02 15:04:05.999999"
- t0 := time.Time{}
- tstr0 := "0000-00-00 00:00:00.000000"
- testcases := []timeTests{
- {"DATE", format[:10], []timeTest{
- {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)},
- {t: t0, s: tstr0[:10]},
- }},
- {"DATETIME", format[:19], []timeTest{
- {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
- {t: t0, s: tstr0[:19]},
- }},
- {"DATETIME(0)", format[:21], []timeTest{
- {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)},
- {t: t0, s: tstr0[:19]},
- }},
- {"DATETIME(1)", format[:21], []timeTest{
- {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)},
- {t: t0, s: tstr0[:21]},
- }},
- {"DATETIME(6)", format, []timeTest{
- {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)},
- {t: t0, s: tstr0},
- }},
- {"TIME", format[11:19], []timeTest{
- {t: afterTime(t0, "12345s")},
- {s: "!-12:34:56"},
- {s: "!-838:59:59"},
- {s: "!838:59:59"},
- {t: t0, s: tstr0[11:19]},
- }},
- {"TIME(0)", format[11:19], []timeTest{
- {t: afterTime(t0, "12345s")},
- {s: "!-12:34:56"},
- {s: "!-838:59:59"},
- {s: "!838:59:59"},
- {t: t0, s: tstr0[11:19]},
- }},
- {"TIME(1)", format[11:21], []timeTest{
- {t: afterTime(t0, "12345600ms")},
- {s: "!-12:34:56.7"},
- {s: "!-838:59:58.9"},
- {s: "!838:59:58.9"},
- {t: t0, s: tstr0[11:21]},
- }},
- {"TIME(6)", format[11:], []timeTest{
- {t: afterTime(t0, "1234567890123000ns")},
- {s: "!-12:34:56.789012"},
- {s: "!-838:59:58.999999"},
- {s: "!838:59:58.999999"},
- {t: t0, s: tstr0[11:]},
- }},
- }
- dsns := []string{
- dsn + "&parseTime=true",
- dsn + "&parseTime=false",
- }
- for _, testdsn := range dsns {
- runTests(t, testdsn, func(dbt *DBTest) {
- microsecsSupported := false
- zeroDateSupported := false
- var rows *sql.Rows
- var err error
- rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`)
- if err == nil {
- rows.Scan(µsecsSupported)
- rows.Close()
- }
- rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`)
- if err == nil {
- rows.Scan(&zeroDateSupported)
- rows.Close()
- }
- for _, setups := range testcases {
- if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" {
- // skip fractional second tests if unsupported by server
- continue
- }
- for _, setup := range setups.tests {
- allowBinTime := true
- if setup.s == "" {
- // fill time string wherever Go can reliable produce it
- setup.s = setup.t.Format(setups.tlayout)
- } else if setup.s[0] == '!' {
- // skip tests using setup.t as source in queries
- allowBinTime = false
- // fix setup.s - remove the "!"
- setup.s = setup.s[1:]
- }
- if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] {
- // skip disallowed 0000-00-00 date
- continue
- }
- setup.run(dbt, setups.dbtype, setups.tlayout, textString)
- setup.run(dbt, setups.dbtype, setups.tlayout, binaryString)
- if allowBinTime {
- setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime)
- }
- }
- }
- })
- }
-}
-
-func TestTimestampMicros(t *testing.T) {
- format := "2006-01-02 15:04:05.999999"
- f0 := format[:19]
- f1 := format[:21]
- f6 := format[:26]
- runTests(t, dsn, func(dbt *DBTest) {
- // check if microseconds are supported.
- // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width
- // and not precision.
- // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html
- microsecsSupported := false
- if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil {
- rows.Scan(µsecsSupported)
- rows.Close()
- }
- if !microsecsSupported {
- // skip test
- return
- }
- _, err := dbt.db.Exec(`
- CREATE TABLE test (
- value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `',
- value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `',
- value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `'
- )`,
- )
- if err != nil {
- dbt.Error(err)
- }
- defer dbt.mustExec("DROP TABLE IF EXISTS test")
- dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6)
- var res0, res1, res6 string
- rows := dbt.mustQuery("SELECT * FROM test")
- defer rows.Close()
- if !rows.Next() {
- dbt.Errorf("test contained no selectable values")
- }
- err = rows.Scan(&res0, &res1, &res6)
- if err != nil {
- dbt.Error(err)
- }
- if res0 != f0 {
- dbt.Errorf("expected %q, got %q", f0, res0)
- }
- if res1 != f1 {
- dbt.Errorf("expected %q, got %q", f1, res1)
- }
- if res6 != f6 {
- dbt.Errorf("expected %q, got %q", f6, res6)
- }
- })
-}
-
-func TestNULL(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- nullStmt, err := dbt.db.Prepare("SELECT NULL")
- if err != nil {
- dbt.Fatal(err)
- }
- defer nullStmt.Close()
-
- nonNullStmt, err := dbt.db.Prepare("SELECT 1")
- if err != nil {
- dbt.Fatal(err)
- }
- defer nonNullStmt.Close()
-
- // NullBool
- var nb sql.NullBool
- // Invalid
- if err = nullStmt.QueryRow().Scan(&nb); err != nil {
- dbt.Fatal(err)
- }
- if nb.Valid {
- dbt.Error("valid NullBool which should be invalid")
- }
- // Valid
- if err = nonNullStmt.QueryRow().Scan(&nb); err != nil {
- dbt.Fatal(err)
- }
- if !nb.Valid {
- dbt.Error("invalid NullBool which should be valid")
- } else if nb.Bool != true {
- dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool)
- }
-
- // NullFloat64
- var nf sql.NullFloat64
- // Invalid
- if err = nullStmt.QueryRow().Scan(&nf); err != nil {
- dbt.Fatal(err)
- }
- if nf.Valid {
- dbt.Error("valid NullFloat64 which should be invalid")
- }
- // Valid
- if err = nonNullStmt.QueryRow().Scan(&nf); err != nil {
- dbt.Fatal(err)
- }
- if !nf.Valid {
- dbt.Error("invalid NullFloat64 which should be valid")
- } else if nf.Float64 != float64(1) {
- dbt.Errorf("unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64)
- }
-
- // NullInt64
- var ni sql.NullInt64
- // Invalid
- if err = nullStmt.QueryRow().Scan(&ni); err != nil {
- dbt.Fatal(err)
- }
- if ni.Valid {
- dbt.Error("valid NullInt64 which should be invalid")
- }
- // Valid
- if err = nonNullStmt.QueryRow().Scan(&ni); err != nil {
- dbt.Fatal(err)
- }
- if !ni.Valid {
- dbt.Error("invalid NullInt64 which should be valid")
- } else if ni.Int64 != int64(1) {
- dbt.Errorf("unexpected NullInt64 value: %d (should be 1)", ni.Int64)
- }
-
- // NullString
- var ns sql.NullString
- // Invalid
- if err = nullStmt.QueryRow().Scan(&ns); err != nil {
- dbt.Fatal(err)
- }
- if ns.Valid {
- dbt.Error("valid NullString which should be invalid")
- }
- // Valid
- if err = nonNullStmt.QueryRow().Scan(&ns); err != nil {
- dbt.Fatal(err)
- }
- if !ns.Valid {
- dbt.Error("invalid NullString which should be valid")
- } else if ns.String != `1` {
- dbt.Error("unexpected NullString value:" + ns.String + " (should be `1`)")
- }
-
- // nil-bytes
- var b []byte
- // Read nil
- if err = nullStmt.QueryRow().Scan(&b); err != nil {
- dbt.Fatal(err)
- }
- if b != nil {
- dbt.Error("non-nil []byte which should be nil")
- }
- // Read non-nil
- if err = nonNullStmt.QueryRow().Scan(&b); err != nil {
- dbt.Fatal(err)
- }
- if b == nil {
- dbt.Error("nil []byte which should be non-nil")
- }
- // Insert nil
- b = nil
- success := false
- if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil {
- dbt.Fatal(err)
- }
- if !success {
- dbt.Error("inserting []byte(nil) as NULL failed")
- }
- // Check input==output with input==nil
- b = nil
- if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
- dbt.Fatal(err)
- }
- if b != nil {
- dbt.Error("non-nil echo from nil input")
- }
- // Check input==output with input!=nil
- b = []byte("")
- if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil {
- dbt.Fatal(err)
- }
- if b == nil {
- dbt.Error("nil echo from non-nil input")
- }
-
- // Insert NULL
- dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)")
-
- dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2)
-
- var out interface{}
- rows := dbt.mustQuery("SELECT * FROM test")
- defer rows.Close()
- if rows.Next() {
- rows.Scan(&out)
- if out != nil {
- dbt.Errorf("%v != nil", out)
- }
- } else {
- dbt.Error("no data")
- }
- })
-}
-
-func TestUint64(t *testing.T) {
- const (
- u0 = uint64(0)
- uall = ^u0
- uhigh = uall >> 1
- utop = ^uhigh
- s0 = int64(0)
- sall = ^s0
- shigh = int64(uhigh)
- stop = ^shigh
- )
- runTests(t, dsn, func(dbt *DBTest) {
- stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`)
- if err != nil {
- dbt.Fatal(err)
- }
- defer stmt.Close()
- row := stmt.QueryRow(
- u0, uhigh, utop, uall,
- s0, shigh, stop, sall,
- )
-
- var ua, ub, uc, ud uint64
- var sa, sb, sc, sd int64
-
- err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd)
- if err != nil {
- dbt.Fatal(err)
- }
- switch {
- case ua != u0,
- ub != uhigh,
- uc != utop,
- ud != uall,
- sa != s0,
- sb != shigh,
- sc != stop,
- sd != sall:
- dbt.Fatal("unexpected result value")
- }
- })
-}
-
-func TestLongData(t *testing.T) {
- runTests(t, dsn+"&maxAllowedPacket=0", func(dbt *DBTest) {
- var maxAllowedPacketSize int
- err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize)
- if err != nil {
- dbt.Fatal(err)
- }
- maxAllowedPacketSize--
-
- // don't get too ambitious
- if maxAllowedPacketSize > 1<<25 {
- maxAllowedPacketSize = 1 << 25
- }
-
- dbt.mustExec("CREATE TABLE test (value LONGBLOB)")
-
- in := strings.Repeat(`a`, maxAllowedPacketSize+1)
- var out string
- var rows *sql.Rows
-
- // Long text data
- const nonDataQueryLen = 28 // length query w/o value
- inS := in[:maxAllowedPacketSize-nonDataQueryLen]
- dbt.mustExec("INSERT INTO test VALUES('" + inS + "')")
- rows = dbt.mustQuery("SELECT value FROM test")
- defer rows.Close()
- if rows.Next() {
- rows.Scan(&out)
- if inS != out {
- dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out))
- }
- if rows.Next() {
- dbt.Error("LONGBLOB: unexpexted row")
- }
- } else {
- dbt.Fatalf("LONGBLOB: no data")
- }
-
- // Empty table
- dbt.mustExec("TRUNCATE TABLE test")
-
- // Long binary data
- dbt.mustExec("INSERT INTO test VALUES(?)", in)
- rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1)
- defer rows.Close()
- if rows.Next() {
- rows.Scan(&out)
- if in != out {
- dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out))
- }
- if rows.Next() {
- dbt.Error("LONGBLOB: unexpexted row")
- }
- } else {
- if err = rows.Err(); err != nil {
- dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error())
- } else {
- dbt.Fatal("LONGBLOB: no data (err: )")
- }
- }
- })
-}
-
-func TestLoadData(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- verifyLoadDataResult := func() {
- rows, err := dbt.db.Query("SELECT * FROM test")
- if err != nil {
- dbt.Fatal(err.Error())
- }
-
- i := 0
- values := [4]string{
- "a string",
- "a string containing a \t",
- "a string containing a \n",
- "a string containing both \t\n",
- }
-
- var id int
- var value string
-
- for rows.Next() {
- i++
- err = rows.Scan(&id, &value)
- if err != nil {
- dbt.Fatal(err.Error())
- }
- if i != id {
- dbt.Fatalf("%d != %d", i, id)
- }
- if values[i-1] != value {
- dbt.Fatalf("%q != %q", values[i-1], value)
- }
- }
- err = rows.Err()
- if err != nil {
- dbt.Fatal(err.Error())
- }
-
- if i != 4 {
- dbt.Fatalf("rows count mismatch. Got %d, want 4", i)
- }
- }
-
- dbt.db.Exec("DROP TABLE IF EXISTS test")
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8")
-
- // Local File
- file, err := ioutil.TempFile("", "gotest")
- defer os.Remove(file.Name())
- if err != nil {
- dbt.Fatal(err)
- }
- RegisterLocalFile(file.Name())
-
- // Try first with empty file
- dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
- var count int
- err = dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&count)
- if err != nil {
- dbt.Fatal(err.Error())
- }
- if count != 0 {
- dbt.Fatalf("unexpected row count: got %d, want 0", count)
- }
-
- // Then fille File with data and try to load it
- file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n")
- file.Close()
- dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name()))
- verifyLoadDataResult()
-
- // Try with non-existing file
- _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test")
- if err == nil {
- dbt.Fatal("load non-existent file didn't fail")
- } else if err.Error() != "local file 'doesnotexist' is not registered" {
- dbt.Fatal(err.Error())
- }
-
- // Empty table
- dbt.mustExec("TRUNCATE TABLE test")
-
- // Reader
- RegisterReaderHandler("test", func() io.Reader {
- file, err = os.Open(file.Name())
- if err != nil {
- dbt.Fatal(err)
- }
- return file
- })
- dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test")
- verifyLoadDataResult()
- // negative test
- _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test")
- if err == nil {
- dbt.Fatal("load non-existent Reader didn't fail")
- } else if err.Error() != "Reader 'doesnotexist' is not registered" {
- dbt.Fatal(err.Error())
- }
- })
-}
-
-func TestFoundRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
- dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
-
- res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
- count, err := res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 2 {
- dbt.Fatalf("Expected 2 affected rows, got %d", count)
- }
- res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 2 {
- dbt.Fatalf("Expected 2 affected rows, got %d", count)
- }
- })
- runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)")
- dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)")
-
- res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0")
- count, err := res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 2 {
- dbt.Fatalf("Expected 2 matched rows, got %d", count)
- }
- res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1")
- count, err = res.RowsAffected()
- if err != nil {
- dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error())
- }
- if count != 3 {
- dbt.Fatalf("Expected 3 matched rows, got %d", count)
- }
- })
-}
-
-func TestTLS(t *testing.T) {
- tlsTestReq := func(dbt *DBTest) {
- if err := dbt.db.Ping(); err != nil {
- if err == ErrNoTLS {
- dbt.Skip("server does not support TLS")
- } else {
- dbt.Fatalf("error on Ping: %s", err.Error())
- }
- }
-
- rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'")
- defer rows.Close()
-
- var variable, value *sql.RawBytes
- for rows.Next() {
- if err := rows.Scan(&variable, &value); err != nil {
- dbt.Fatal(err.Error())
- }
-
- if (*value == nil) || (len(*value) == 0) {
- dbt.Fatalf("no Cipher")
- } else {
- dbt.Logf("Cipher: %s", *value)
- }
- }
- }
- tlsTestOpt := func(dbt *DBTest) {
- if err := dbt.db.Ping(); err != nil {
- dbt.Fatalf("error on Ping: %s", err.Error())
- }
- }
-
- runTests(t, dsn+"&tls=preferred", tlsTestOpt)
- runTests(t, dsn+"&tls=skip-verify", tlsTestReq)
-
- // Verify that registering / using a custom cfg works
- RegisterTLSConfig("custom-skip-verify", &tls.Config{
- InsecureSkipVerify: true,
- })
- runTests(t, dsn+"&tls=custom-skip-verify", tlsTestReq)
-}
-
-func TestReuseClosedConnection(t *testing.T) {
- // this test does not use sql.database, it uses the driver directly
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- md := &MySQLDriver{}
- conn, err := md.Open(dsn)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- stmt, err := conn.Prepare("DO 1")
- if err != nil {
- t.Fatalf("error preparing statement: %s", err.Error())
- }
- _, err = stmt.Exec(nil)
- if err != nil {
- t.Fatalf("error executing statement: %s", err.Error())
- }
- err = conn.Close()
- if err != nil {
- t.Fatalf("error closing connection: %s", err.Error())
- }
-
- defer func() {
- if err := recover(); err != nil {
- t.Errorf("panic after reusing a closed connection: %v", err)
- }
- }()
- _, err = stmt.Exec(nil)
- if err != nil && err != driver.ErrBadConn {
- t.Errorf("unexpected error '%s', expected '%s'",
- err.Error(), driver.ErrBadConn.Error())
- }
-}
-
-func TestCharset(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- mustSetCharset := func(charsetParam, expected string) {
- runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) {
- rows := dbt.mustQuery("SELECT @@character_set_connection")
- defer rows.Close()
-
- if !rows.Next() {
- dbt.Fatalf("error getting connection charset: %s", rows.Err())
- }
-
- var got string
- rows.Scan(&got)
-
- if got != expected {
- dbt.Fatalf("expected connection charset %s but got %s", expected, got)
- }
- })
- }
-
- // non utf8 test
- mustSetCharset("charset=ascii", "ascii")
-
- // when the first charset is invalid, use the second
- mustSetCharset("charset=none,utf8", "utf8")
-
- // when the first charset is valid, use it
- mustSetCharset("charset=ascii,utf8", "ascii")
- mustSetCharset("charset=utf8,ascii", "utf8")
-}
-
-func TestFailingCharset(t *testing.T) {
- runTests(t, dsn+"&charset=none", func(dbt *DBTest) {
- // run query to really establish connection...
- _, err := dbt.db.Exec("SELECT 1")
- if err == nil {
- dbt.db.Close()
- t.Fatalf("connection must not succeed without a valid charset")
- }
- })
-}
-
-func TestCollation(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- defaultCollation := "utf8mb4_general_ci"
- testCollations := []string{
- "", // do not set
- defaultCollation, // driver default
- "latin1_general_ci",
- "binary",
- "utf8_unicode_ci",
- "cp1257_bin",
- }
-
- for _, collation := range testCollations {
- var expected, tdsn string
- if collation != "" {
- tdsn = dsn + "&collation=" + collation
- expected = collation
- } else {
- tdsn = dsn
- expected = defaultCollation
- }
-
- runTests(t, tdsn, func(dbt *DBTest) {
- var got string
- if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil {
- dbt.Fatal(err)
- }
-
- if got != expected {
- dbt.Fatalf("expected connection collation %s but got %s", expected, got)
- }
- })
- }
-}
-
-func TestColumnsWithAlias(t *testing.T) {
- runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) {
- rows := dbt.mustQuery("SELECT 1 AS A")
- defer rows.Close()
- cols, _ := rows.Columns()
- if len(cols) != 1 {
- t.Fatalf("expected 1 column, got %d", len(cols))
- }
- if cols[0] != "A" {
- t.Fatalf("expected column name \"A\", got \"%s\"", cols[0])
- }
-
- rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A")
- defer rows.Close()
- cols, _ = rows.Columns()
- if len(cols) != 1 {
- t.Fatalf("expected 1 column, got %d", len(cols))
- }
- if cols[0] != "A.one" {
- t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0])
- }
- })
-}
-
-func TestRawBytesResultExceedsBuffer(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // defaultBufSize from buffer.go
- expected := strings.Repeat("abc", defaultBufSize)
-
- rows := dbt.mustQuery("SELECT '" + expected + "'")
- defer rows.Close()
- if !rows.Next() {
- dbt.Error("expected result, got none")
- }
- var result sql.RawBytes
- rows.Scan(&result)
- if expected != string(result) {
- dbt.Error("result did not match expected value")
- }
- })
-}
-
-func TestTimezoneConversion(t *testing.T) {
- zones := []string{"UTC", "US/Central", "US/Pacific", "Local"}
-
- // Regression test for timezone handling
- tzTest := func(dbt *DBTest) {
- // Create table
- dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)")
-
- // Insert local time into database (should be converted)
- usCentral, _ := time.LoadLocation("US/Central")
- reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral)
- dbt.mustExec("INSERT INTO test VALUE (?)", reftime)
-
- // Retrieve time from DB
- rows := dbt.mustQuery("SELECT ts FROM test")
- defer rows.Close()
- if !rows.Next() {
- dbt.Fatal("did not get any rows out")
- }
-
- var dbTime time.Time
- err := rows.Scan(&dbTime)
- if err != nil {
- dbt.Fatal("Err", err)
- }
-
- // Check that dates match
- if reftime.Unix() != dbTime.Unix() {
- dbt.Errorf("times do not match.\n")
- dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime)
- dbt.Errorf(" Now(UTC)=%v\n", dbTime)
- }
- }
-
- for _, tz := range zones {
- runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest)
- }
-}
-
-// Special cases
-
-func TestRowsClose(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- rows, err := dbt.db.Query("SELECT 1")
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows.Close()
- if err != nil {
- dbt.Fatal(err)
- }
-
- if rows.Next() {
- dbt.Fatal("unexpected row after rows.Close()")
- }
-
- err = rows.Err()
- if err != nil {
- dbt.Fatal(err)
- }
- })
-}
-
-// dangling statements
-// http://code.google.com/p/go/issues/detail?id=3865
-func TestCloseStmtBeforeRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- stmt, err := dbt.db.Prepare("SELECT 1")
- if err != nil {
- dbt.Fatal(err)
- }
-
- rows, err := stmt.Query()
- if err != nil {
- stmt.Close()
- dbt.Fatal(err)
- }
- defer rows.Close()
-
- err = stmt.Close()
- if err != nil {
- dbt.Fatal(err)
- }
-
- if !rows.Next() {
- dbt.Fatal("getting row failed")
- } else {
- err = rows.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- var out bool
- err = rows.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != true {
- dbt.Errorf("true != %t", out)
- }
- }
- })
-}
-
-// It is valid to have multiple Rows for the same Stmt
-// http://code.google.com/p/go/issues/detail?id=3734
-func TestStmtMultiRows(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0")
- if err != nil {
- dbt.Fatal(err)
- }
-
- rows1, err := stmt.Query()
- if err != nil {
- stmt.Close()
- dbt.Fatal(err)
- }
- defer rows1.Close()
-
- rows2, err := stmt.Query()
- if err != nil {
- stmt.Close()
- dbt.Fatal(err)
- }
- defer rows2.Close()
-
- var out bool
-
- // 1
- if !rows1.Next() {
- dbt.Fatal("first rows1.Next failed")
- } else {
- err = rows1.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows1.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != true {
- dbt.Errorf("true != %t", out)
- }
- }
-
- if !rows2.Next() {
- dbt.Fatal("first rows2.Next failed")
- } else {
- err = rows2.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows2.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != true {
- dbt.Errorf("true != %t", out)
- }
- }
-
- // 2
- if !rows1.Next() {
- dbt.Fatal("second rows1.Next failed")
- } else {
- err = rows1.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows1.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != false {
- dbt.Errorf("false != %t", out)
- }
-
- if rows1.Next() {
- dbt.Fatal("unexpected row on rows1")
- }
- err = rows1.Close()
- if err != nil {
- dbt.Fatal(err)
- }
- }
-
- if !rows2.Next() {
- dbt.Fatal("second rows2.Next failed")
- } else {
- err = rows2.Err()
- if err != nil {
- dbt.Fatal(err)
- }
-
- err = rows2.Scan(&out)
- if err != nil {
- dbt.Fatalf("error on rows.Scan(): %s", err.Error())
- }
- if out != false {
- dbt.Errorf("false != %t", out)
- }
-
- if rows2.Next() {
- dbt.Fatal("unexpected row on rows2")
- }
- err = rows2.Close()
- if err != nil {
- dbt.Fatal(err)
- }
- }
- })
-}
-
-// Regression test for
-// * more than 32 NULL parameters (issue 209)
-// * more parameters than fit into the buffer (issue 201)
-// * parameters * 64 > max_allowed_packet (issue 734)
-func TestPreparedManyCols(t *testing.T) {
- numParams := 65535
- runTests(t, dsn, func(dbt *DBTest) {
- query := "SELECT ?" + strings.Repeat(",?", numParams-1)
- stmt, err := dbt.db.Prepare(query)
- if err != nil {
- dbt.Fatal(err)
- }
- defer stmt.Close()
-
- // create more parameters than fit into the buffer
- // which will take nil-values
- params := make([]interface{}, numParams)
- rows, err := stmt.Query(params...)
- if err != nil {
- dbt.Fatal(err)
- }
- rows.Close()
-
- // Create 0byte string which we can't send via STMT_LONG_DATA.
- for i := 0; i < numParams; i++ {
- params[i] = ""
- }
- rows, err = stmt.Query(params...)
- if err != nil {
- dbt.Fatal(err)
- }
- rows.Close()
- })
-}
-
-func TestConcurrent(t *testing.T) {
- if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled {
- t.Skip("MYSQL_TEST_CONCURRENT env var not set")
- }
-
- runTests(t, dsn, func(dbt *DBTest) {
- var max int
- err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max)
- if err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- dbt.Logf("testing up to %d concurrent connections \r\n", max)
-
- var remaining, succeeded int32 = int32(max), 0
-
- var wg sync.WaitGroup
- wg.Add(max)
-
- var fatalError string
- var once sync.Once
- fatalf := func(s string, vals ...interface{}) {
- once.Do(func() {
- fatalError = fmt.Sprintf(s, vals...)
- })
- }
-
- for i := 0; i < max; i++ {
- go func(id int) {
- defer wg.Done()
-
- tx, err := dbt.db.Begin()
- atomic.AddInt32(&remaining, -1)
-
- if err != nil {
- if err.Error() != "Error 1040: Too many connections" {
- fatalf("error on conn %d: %s", id, err.Error())
- }
- return
- }
-
- // keep the connection busy until all connections are open
- for remaining > 0 {
- if _, err = tx.Exec("DO 1"); err != nil {
- fatalf("error on conn %d: %s", id, err.Error())
- return
- }
- }
-
- if err = tx.Commit(); err != nil {
- fatalf("error on conn %d: %s", id, err.Error())
- return
- }
-
- // everything went fine with this connection
- atomic.AddInt32(&succeeded, 1)
- }(i)
- }
-
- // wait until all conections are open
- wg.Wait()
-
- if fatalError != "" {
- dbt.Fatal(fatalError)
- }
-
- dbt.Logf("reached %d concurrent connections\r\n", succeeded)
- })
-}
-
-func testDialError(t *testing.T, dialErr error, expectErr error) {
- RegisterDialContext("mydial", func(ctx context.Context, addr string) (net.Conn, error) {
- return nil, dialErr
- })
-
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- _, err = db.Exec("DO 1")
- if err != expectErr {
- t.Fatalf("was expecting %s. Got: %s", dialErr, err)
- }
-}
-
-func TestDialUnknownError(t *testing.T) {
- testErr := fmt.Errorf("test")
- testDialError(t, testErr, testErr)
-}
-
-func TestDialNonRetryableNetErr(t *testing.T) {
- testErr := netErrorMock{}
- testDialError(t, testErr, testErr)
-}
-
-func TestDialTemporaryNetErr(t *testing.T) {
- testErr := netErrorMock{temporary: true}
- testDialError(t, testErr, driver.ErrBadConn)
-}
-
-// Tests custom dial functions
-func TestCustomDial(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- // our custom dial function which justs wraps net.Dial here
- RegisterDialContext("mydial", func(ctx context.Context, addr string) (net.Conn, error) {
- var d net.Dialer
- return d.DialContext(ctx, prot, addr)
- })
-
- db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s", user, pass, addr, dbname))
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- if _, err = db.Exec("DO 1"); err != nil {
- t.Fatalf("connection failed: %s", err.Error())
- }
-}
-
-func TestSQLInjection(t *testing.T) {
- createTest := func(arg string) func(dbt *DBTest) {
- return func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- dbt.mustExec("INSERT INTO test VALUES (?)", 1)
-
- var v int
- // NULL can't be equal to anything, the idea here is to inject query so it returns row
- // This test verifies that escapeQuotes and escapeBackslash are working properly
- err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v)
- if err == sql.ErrNoRows {
- return // success, sql injection failed
- } else if err == nil {
- dbt.Errorf("sql injection successful with arg: %s", arg)
- } else {
- dbt.Errorf("error running query with arg: %s; err: %s", arg, err.Error())
- }
- }
- }
-
- dsns := []string{
- dsn,
- dsn + "&sql_mode='NO_BACKSLASH_ESCAPES'",
- }
- for _, testdsn := range dsns {
- runTests(t, testdsn, createTest("1 OR 1=1"))
- runTests(t, testdsn, createTest("' OR '1'='1"))
- }
-}
-
-// Test if inserted data is correctly retrieved after being escaped
-func TestInsertRetrieveEscapedData(t *testing.T) {
- testData := func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v VARCHAR(255))")
-
- // All sequences that are escaped by escapeQuotes and escapeBackslash
- v := "foo \x00\n\r\x1a\"'\\"
- dbt.mustExec("INSERT INTO test VALUES (?)", v)
-
- var out string
- err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out)
- if err != nil {
- dbt.Fatalf("%s", err.Error())
- }
-
- if out != v {
- dbt.Errorf("%q != %q", out, v)
- }
- }
-
- dsns := []string{
- dsn,
- dsn + "&sql_mode='NO_BACKSLASH_ESCAPES'",
- }
- for _, testdsn := range dsns {
- runTests(t, testdsn, testData)
- }
-}
-
-func TestUnixSocketAuthFail(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // Save the current logger so we can restore it.
- oldLogger := errLog
-
- // Set a new logger so we can capture its output.
- buffer := bytes.NewBuffer(make([]byte, 0, 64))
- newLogger := log.New(buffer, "prefix: ", 0)
- SetLogger(newLogger)
-
- // Restore the logger.
- defer SetLogger(oldLogger)
-
- // Make a new DSN that uses the MySQL socket file and a bad password, which
- // we can make by simply appending any character to the real password.
- badPass := pass + "x"
- socket := ""
- if prot == "unix" {
- socket = addr
- } else {
- // Get socket file from MySQL.
- err := dbt.db.QueryRow("SELECT @@socket").Scan(&socket)
- if err != nil {
- t.Fatalf("error on SELECT @@socket: %s", err.Error())
- }
- }
- t.Logf("socket: %s", socket)
- badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s", user, badPass, socket, dbname)
- db, err := sql.Open("mysql", badDSN)
- if err != nil {
- t.Fatalf("error connecting: %s", err.Error())
- }
- defer db.Close()
-
- // Connect to MySQL for real. This will cause an auth failure.
- err = db.Ping()
- if err == nil {
- t.Error("expected Ping() to return an error")
- }
-
- // The driver should not log anything.
- if actual := buffer.String(); actual != "" {
- t.Errorf("expected no output, got %q", actual)
- }
- })
-}
-
-// See Issue #422
-func TestInterruptBySignal(t *testing.T) {
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- dbt.mustExec(`
- DROP PROCEDURE IF EXISTS test_signal;
- CREATE PROCEDURE test_signal(ret INT)
- BEGIN
- SELECT ret;
- SIGNAL SQLSTATE
- '45001'
- SET
- MESSAGE_TEXT = "an error",
- MYSQL_ERRNO = 45001;
- END
- `)
- defer dbt.mustExec("DROP PROCEDURE test_signal")
-
- var val int
-
- // text protocol
- rows, err := dbt.db.Query("CALL test_signal(42)")
- if err != nil {
- dbt.Fatalf("error on text query: %s", err.Error())
- }
- for rows.Next() {
- if err := rows.Scan(&val); err != nil {
- dbt.Error(err)
- } else if val != 42 {
- dbt.Errorf("expected val to be 42")
- }
- }
- rows.Close()
-
- // binary protocol
- rows, err = dbt.db.Query("CALL test_signal(?)", 42)
- if err != nil {
- dbt.Fatalf("error on binary query: %s", err.Error())
- }
- for rows.Next() {
- if err := rows.Scan(&val); err != nil {
- dbt.Error(err)
- } else if val != 42 {
- dbt.Errorf("expected val to be 42")
- }
- }
- rows.Close()
- })
-}
-
-func TestColumnsReusesSlice(t *testing.T) {
- rows := mysqlRows{
- rs: resultSet{
- columns: []mysqlField{
- {
- tableName: "test",
- name: "A",
- },
- {
- tableName: "test",
- name: "B",
- },
- },
- },
- }
-
- allocs := testing.AllocsPerRun(1, func() {
- cols := rows.Columns()
-
- if len(cols) != 2 {
- t.Fatalf("expected 2 columns, got %d", len(cols))
- }
- })
-
- if allocs != 0 {
- t.Fatalf("expected 0 allocations, got %d", int(allocs))
- }
-
- if rows.rs.columnNames == nil {
- t.Fatalf("expected columnNames to be set, got nil")
- }
-}
-
-func TestRejectReadOnly(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- // Create Table
- dbt.mustExec("CREATE TABLE test (value BOOL)")
- // Set the session to read-only. We didn't set the `rejectReadOnly`
- // option, so any writes after this should fail.
- _, err := dbt.db.Exec("SET SESSION TRANSACTION READ ONLY")
- // Error 1193: Unknown system variable 'TRANSACTION' => skip test,
- // MySQL server version is too old
- maybeSkip(t, err, 1193)
- if _, err := dbt.db.Exec("DROP TABLE test"); err == nil {
- t.Fatalf("writing to DB in read-only session without " +
- "rejectReadOnly did not error")
- }
- // Set the session back to read-write so runTests() can properly clean
- // up the table `test`.
- dbt.mustExec("SET SESSION TRANSACTION READ WRITE")
- })
-
- // Enable the `rejectReadOnly` option.
- runTests(t, dsn+"&rejectReadOnly=true", func(dbt *DBTest) {
- // Create Table
- dbt.mustExec("CREATE TABLE test (value BOOL)")
- // Set the session to read only. Any writes after this should error on
- // a driver.ErrBadConn, and cause `database/sql` to initiate a new
- // connection.
- dbt.mustExec("SET SESSION TRANSACTION READ ONLY")
- // This would error, but `database/sql` should automatically retry on a
- // new connection which is not read-only, and eventually succeed.
- dbt.mustExec("DROP TABLE test")
- })
-}
-
-func TestPing(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- if err := dbt.db.Ping(); err != nil {
- dbt.fail("Ping", "Ping", err)
- }
- })
-}
-
-// See Issue #799
-func TestEmptyPassword(t *testing.T) {
- if !available {
- t.Skipf("MySQL server not running on %s", netAddr)
- }
-
- dsn := fmt.Sprintf("%s:%s@%s/%s?timeout=30s", user, "", netAddr, dbname)
- db, err := sql.Open("mysql", dsn)
- if err == nil {
- defer db.Close()
- err = db.Ping()
- }
-
- if pass == "" {
- if err != nil {
- t.Fatal(err.Error())
- }
- } else {
- if err == nil {
- t.Fatal("expected authentication error")
- }
- if !strings.HasPrefix(err.Error(), "Error 1045") {
- t.Fatal(err.Error())
- }
- }
-}
-
-// static interface implementation checks of mysqlConn
-var (
- _ driver.ConnBeginTx = &mysqlConn{}
- _ driver.ConnPrepareContext = &mysqlConn{}
- _ driver.ExecerContext = &mysqlConn{}
- _ driver.Pinger = &mysqlConn{}
- _ driver.QueryerContext = &mysqlConn{}
-)
-
-// static interface implementation checks of mysqlStmt
-var (
- _ driver.StmtExecContext = &mysqlStmt{}
- _ driver.StmtQueryContext = &mysqlStmt{}
-)
-
-// Ensure that all the driver interfaces are implemented
-var (
- // _ driver.RowsColumnTypeLength = &binaryRows{}
- // _ driver.RowsColumnTypeLength = &textRows{}
- _ driver.RowsColumnTypeDatabaseTypeName = &binaryRows{}
- _ driver.RowsColumnTypeDatabaseTypeName = &textRows{}
- _ driver.RowsColumnTypeNullable = &binaryRows{}
- _ driver.RowsColumnTypeNullable = &textRows{}
- _ driver.RowsColumnTypePrecisionScale = &binaryRows{}
- _ driver.RowsColumnTypePrecisionScale = &textRows{}
- _ driver.RowsColumnTypeScanType = &binaryRows{}
- _ driver.RowsColumnTypeScanType = &textRows{}
- _ driver.RowsNextResultSet = &binaryRows{}
- _ driver.RowsNextResultSet = &textRows{}
-)
-
-func TestMultiResultSet(t *testing.T) {
- type result struct {
- values [][]int
- columns []string
- }
-
- // checkRows is a helper test function to validate rows containing 3 result
- // sets with specific values and columns. The basic query would look like this:
- //
- // SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
- // SELECT 0 UNION SELECT 1;
- // SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
- //
- // to distinguish test cases the first string argument is put in front of
- // every error or fatal message.
- checkRows := func(desc string, rows *sql.Rows, dbt *DBTest) {
- expected := []result{
- {
- values: [][]int{{1, 2}, {3, 4}},
- columns: []string{"col1", "col2"},
- },
- {
- values: [][]int{{1, 2, 3}, {4, 5, 6}},
- columns: []string{"col1", "col2", "col3"},
- },
- }
-
- var res1 result
- for rows.Next() {
- var res [2]int
- if err := rows.Scan(&res[0], &res[1]); err != nil {
- dbt.Fatal(err)
- }
- res1.values = append(res1.values, res[:])
- }
-
- cols, err := rows.Columns()
- if err != nil {
- dbt.Fatal(desc, err)
- }
- res1.columns = cols
-
- if !reflect.DeepEqual(expected[0], res1) {
- dbt.Error(desc, "want =", expected[0], "got =", res1)
- }
-
- if !rows.NextResultSet() {
- dbt.Fatal(desc, "expected next result set")
- }
-
- // ignoring one result set
-
- if !rows.NextResultSet() {
- dbt.Fatal(desc, "expected next result set")
- }
-
- var res2 result
- cols, err = rows.Columns()
- if err != nil {
- dbt.Fatal(desc, err)
- }
- res2.columns = cols
-
- for rows.Next() {
- var res [3]int
- if err := rows.Scan(&res[0], &res[1], &res[2]); err != nil {
- dbt.Fatal(desc, err)
- }
- res2.values = append(res2.values, res[:])
- }
-
- if !reflect.DeepEqual(expected[1], res2) {
- dbt.Error(desc, "want =", expected[1], "got =", res2)
- }
-
- if rows.NextResultSet() {
- dbt.Error(desc, "unexpected next result set")
- }
-
- if err := rows.Err(); err != nil {
- dbt.Error(desc, err)
- }
- }
-
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- rows := dbt.mustQuery(`DO 1;
- SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
- DO 1;
- SELECT 0 UNION SELECT 1;
- SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;`)
- defer rows.Close()
- checkRows("query: ", rows, dbt)
- })
-
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- queries := []string{
- `
- DROP PROCEDURE IF EXISTS test_mrss;
- CREATE PROCEDURE test_mrss()
- BEGIN
- DO 1;
- SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
- DO 1;
- SELECT 0 UNION SELECT 1;
- SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
- END
- `,
- `
- DROP PROCEDURE IF EXISTS test_mrss;
- CREATE PROCEDURE test_mrss()
- BEGIN
- SELECT 1 AS col1, 2 AS col2 UNION SELECT 3, 4;
- SELECT 0 UNION SELECT 1;
- SELECT 1 AS col1, 2 AS col2, 3 AS col3 UNION SELECT 4, 5, 6;
- END
- `,
- }
-
- defer dbt.mustExec("DROP PROCEDURE IF EXISTS test_mrss")
-
- for i, query := range queries {
- dbt.mustExec(query)
-
- stmt, err := dbt.db.Prepare("CALL test_mrss()")
- if err != nil {
- dbt.Fatalf("%v (i=%d)", err, i)
- }
- defer stmt.Close()
-
- for j := 0; j < 2; j++ {
- rows, err := stmt.Query()
- if err != nil {
- dbt.Fatalf("%v (i=%d) (j=%d)", err, i, j)
- }
- checkRows(fmt.Sprintf("prepared stmt query (i=%d) (j=%d): ", i, j), rows, dbt)
- }
- }
- })
-}
-
-func TestMultiResultSetNoSelect(t *testing.T) {
- runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) {
- rows := dbt.mustQuery("DO 1; DO 2;")
- defer rows.Close()
-
- if rows.Next() {
- dbt.Error("unexpected row")
- }
-
- if rows.NextResultSet() {
- dbt.Error("unexpected next result set")
- }
-
- if err := rows.Err(); err != nil {
- dbt.Error("expected nil; got ", err)
- }
- })
-}
-
-// tests if rows are set in a proper state if some results were ignored before
-// calling rows.NextResultSet.
-func TestSkipResults(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- rows := dbt.mustQuery("SELECT 1, 2")
- defer rows.Close()
-
- if !rows.Next() {
- dbt.Error("expected row")
- }
-
- if rows.NextResultSet() {
- dbt.Error("unexpected next result set")
- }
-
- if err := rows.Err(); err != nil {
- dbt.Error("expected nil; got ", err)
- }
- })
-}
-
-func TestPingContext(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
- if err := dbt.db.PingContext(ctx); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- })
-}
-
-func TestContextCancelExec(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Wait for the INSERT query to be done.
- time.Sleep(time.Second)
-
- // Check how many times the query is executed.
- var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 { // TODO: need to kill the query, and v should be 0.
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
-
- // Context is already canceled, so error should come before execution.
- if _, err := dbt.db.ExecContext(ctx, "INSERT INTO test VALUES (1)"); err == nil {
- dbt.Error("expected error")
- } else if err.Error() != "context canceled" {
- dbt.Fatalf("unexpected error: %s", err)
- }
-
- // The second insert query will fail, so the table has no changes.
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 {
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
- })
-}
-
-func TestContextCancelQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Wait for the INSERT query to be done.
- time.Sleep(time.Second)
-
- // Check how many times the query is executed.
- var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 { // TODO: need to kill the query, and v should be 0.
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
-
- // Context is already canceled, so error should come before execution.
- if _, err := dbt.db.QueryContext(ctx, "INSERT INTO test VALUES (1)"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
-
- // The second insert query will fail, so the table has no changes.
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 {
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
- })
-}
-
-func TestContextCancelQueryRow(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- dbt.mustExec("INSERT INTO test VALUES (1), (2), (3)")
- ctx, cancel := context.WithCancel(context.Background())
-
- rows, err := dbt.db.QueryContext(ctx, "SELECT v FROM test")
- if err != nil {
- dbt.Fatalf("%s", err.Error())
- }
-
- // the first row will be succeed.
- var v int
- if !rows.Next() {
- dbt.Fatalf("unexpected end")
- }
- if err := rows.Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
-
- cancel()
- // make sure the driver receives the cancel request.
- time.Sleep(100 * time.Millisecond)
-
- if rows.Next() {
- dbt.Errorf("expected end, but not")
- }
- if err := rows.Err(); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- })
-}
-
-func TestContextCancelPrepare(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
- if _, err := dbt.db.PrepareContext(ctx, "SELECT 1"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- })
-}
-
-func TestContextCancelStmtExec(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
- if err != nil {
- dbt.Fatalf("unexpected error: %v", err)
- }
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := stmt.ExecContext(ctx); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Wait for the INSERT query to be done.
- time.Sleep(time.Second)
-
- // Check how many times the query is executed.
- var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 { // TODO: need to kill the query, and v should be 0.
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
- })
-}
-
-func TestContextCancelStmtQuery(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- stmt, err := dbt.db.PrepareContext(ctx, "INSERT INTO test VALUES (SLEEP(1))")
- if err != nil {
- dbt.Fatalf("unexpected error: %v", err)
- }
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(250*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := stmt.QueryContext(ctx); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Wait for the INSERT query has done.
- time.Sleep(time.Second)
-
- // Check how many times the query is executed.
- var v int
- if err := dbt.db.QueryRow("SELECT COUNT(*) FROM test").Scan(&v); err != nil {
- dbt.Fatalf("%s", err.Error())
- }
- if v != 1 { // TODO: need to kill the query, and v should be 0.
- dbt.Skipf("[WARN] expected val to be 1, got %d", v)
- }
- })
-}
-
-func TestContextCancelBegin(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- tx, err := dbt.db.BeginTx(ctx, nil)
- if err != nil {
- dbt.Fatal(err)
- }
-
- // Delay execution for just a bit until db.ExecContext has begun.
- defer time.AfterFunc(100*time.Millisecond, cancel).Stop()
-
- // This query will be canceled.
- startTime := time.Now()
- if _, err := tx.ExecContext(ctx, "INSERT INTO test VALUES (SLEEP(1))"); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- if d := time.Since(startTime); d > 500*time.Millisecond {
- dbt.Errorf("too long execution time: %s", d)
- }
-
- // Transaction is canceled, so expect an error.
- switch err := tx.Commit(); err {
- case sql.ErrTxDone:
- // because the transaction has already been rollbacked.
- // the database/sql package watches ctx
- // and rollbacks when ctx is canceled.
- case context.Canceled:
- // the database/sql package rollbacks on another goroutine,
- // so the transaction may not be rollbacked depending on goroutine scheduling.
- default:
- dbt.Errorf("expected sql.ErrTxDone or context.Canceled, got %v", err)
- }
-
- // Context is canceled, so cannot begin a transaction.
- if _, err := dbt.db.BeginTx(ctx, nil); err != context.Canceled {
- dbt.Errorf("expected context.Canceled, got %v", err)
- }
- })
-}
-
-func TestContextBeginIsolationLevel(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- tx1, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
- Isolation: sql.LevelRepeatableRead,
- })
- if err != nil {
- dbt.Fatal(err)
- }
-
- tx2, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
- Isolation: sql.LevelReadCommitted,
- })
- if err != nil {
- dbt.Fatal(err)
- }
-
- _, err = tx1.ExecContext(ctx, "INSERT INTO test VALUES (1)")
- if err != nil {
- dbt.Fatal(err)
- }
-
- var v int
- row := tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
- if err := row.Scan(&v); err != nil {
- dbt.Fatal(err)
- }
- // Because writer transaction wasn't commited yet, it should be available
- if v != 0 {
- dbt.Errorf("expected val to be 0, got %d", v)
- }
-
- err = tx1.Commit()
- if err != nil {
- dbt.Fatal(err)
- }
-
- row = tx2.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
- if err := row.Scan(&v); err != nil {
- dbt.Fatal(err)
- }
- // Data written by writer transaction is already commited, it should be selectable
- if v != 1 {
- dbt.Errorf("expected val to be 1, got %d", v)
- }
- tx2.Commit()
- })
-}
-
-func TestContextBeginReadOnly(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (v INTEGER)")
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- tx, err := dbt.db.BeginTx(ctx, &sql.TxOptions{
- ReadOnly: true,
- })
- if _, ok := err.(*MySQLError); ok {
- dbt.Skip("It seems that your MySQL does not support READ ONLY transactions")
- return
- } else if err != nil {
- dbt.Fatal(err)
- }
-
- // INSERT queries fail in a READ ONLY transaction.
- _, err = tx.ExecContext(ctx, "INSERT INTO test VALUES (1)")
- if _, ok := err.(*MySQLError); !ok {
- dbt.Errorf("expected MySQLError, got %v", err)
- }
-
- // SELECT queries can be executed.
- var v int
- row := tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM test")
- if err := row.Scan(&v); err != nil {
- dbt.Fatal(err)
- }
- if v != 0 {
- dbt.Errorf("expected val to be 0, got %d", v)
- }
-
- if err := tx.Commit(); err != nil {
- dbt.Fatal(err)
- }
- })
-}
-
-func TestRowsColumnTypes(t *testing.T) {
- niNULL := sql.NullInt64{Int64: 0, Valid: false}
- ni0 := sql.NullInt64{Int64: 0, Valid: true}
- ni1 := sql.NullInt64{Int64: 1, Valid: true}
- ni42 := sql.NullInt64{Int64: 42, Valid: true}
- nfNULL := sql.NullFloat64{Float64: 0.0, Valid: false}
- nf0 := sql.NullFloat64{Float64: 0.0, Valid: true}
- nf1337 := sql.NullFloat64{Float64: 13.37, Valid: true}
- nt0 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC), Valid: true}
- nt1 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 100000000, time.UTC), Valid: true}
- nt2 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 110000000, time.UTC), Valid: true}
- nt6 := NullTime{Time: time.Date(2006, 01, 02, 15, 04, 05, 111111000, time.UTC), Valid: true}
- nd1 := NullTime{Time: time.Date(2006, 01, 02, 0, 0, 0, 0, time.UTC), Valid: true}
- nd2 := NullTime{Time: time.Date(2006, 03, 04, 0, 0, 0, 0, time.UTC), Valid: true}
- ndNULL := NullTime{Time: time.Time{}, Valid: false}
- rbNULL := sql.RawBytes(nil)
- rb0 := sql.RawBytes("0")
- rb42 := sql.RawBytes("42")
- rbTest := sql.RawBytes("Test")
- rb0pad4 := sql.RawBytes("0\x00\x00\x00") // BINARY right-pads values with 0x00
- rbx0 := sql.RawBytes("\x00")
- rbx42 := sql.RawBytes("\x42")
-
- var columns = []struct {
- name string
- fieldType string // type used when creating table schema
- databaseTypeName string // actual type used by MySQL
- scanType reflect.Type
- nullable bool
- precision int64 // 0 if not ok
- scale int64
- valuesIn [3]string
- valuesOut [3]interface{}
- }{
- {"bit8null", "BIT(8)", "BIT", scanTypeRawBytes, true, 0, 0, [3]string{"0x0", "NULL", "0x42"}, [3]interface{}{rbx0, rbNULL, rbx42}},
- {"boolnull", "BOOL", "TINYINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "true", "0"}, [3]interface{}{niNULL, ni1, ni0}},
- {"bool", "BOOL NOT NULL", "TINYINT", scanTypeInt8, false, 0, 0, [3]string{"1", "0", "FALSE"}, [3]interface{}{int8(1), int8(0), int8(0)}},
- {"intnull", "INTEGER", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"smallint", "SMALLINT NOT NULL", "SMALLINT", scanTypeInt16, false, 0, 0, [3]string{"0", "-32768", "32767"}, [3]interface{}{int16(0), int16(-32768), int16(32767)}},
- {"smallintnull", "SMALLINT", "SMALLINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"int3null", "INT(3)", "INT", scanTypeNullInt, true, 0, 0, [3]string{"0", "NULL", "42"}, [3]interface{}{ni0, niNULL, ni42}},
- {"int7", "INT(7) NOT NULL", "INT", scanTypeInt32, false, 0, 0, [3]string{"0", "-1337", "42"}, [3]interface{}{int32(0), int32(-1337), int32(42)}},
- {"mediumintnull", "MEDIUMINT", "MEDIUMINT", scanTypeNullInt, true, 0, 0, [3]string{"0", "42", "NULL"}, [3]interface{}{ni0, ni42, niNULL}},
- {"bigint", "BIGINT NOT NULL", "BIGINT", scanTypeInt64, false, 0, 0, [3]string{"0", "65535", "-42"}, [3]interface{}{int64(0), int64(65535), int64(-42)}},
- {"bigintnull", "BIGINT", "BIGINT", scanTypeNullInt, true, 0, 0, [3]string{"NULL", "1", "42"}, [3]interface{}{niNULL, ni1, ni42}},
- {"tinyuint", "TINYINT UNSIGNED NOT NULL", "TINYINT", scanTypeUint8, false, 0, 0, [3]string{"0", "255", "42"}, [3]interface{}{uint8(0), uint8(255), uint8(42)}},
- {"smalluint", "SMALLINT UNSIGNED NOT NULL", "SMALLINT", scanTypeUint16, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint16(0), uint16(65535), uint16(42)}},
- {"biguint", "BIGINT UNSIGNED NOT NULL", "BIGINT", scanTypeUint64, false, 0, 0, [3]string{"0", "65535", "42"}, [3]interface{}{uint64(0), uint64(65535), uint64(42)}},
- {"uint13", "INT(13) UNSIGNED NOT NULL", "INT", scanTypeUint32, false, 0, 0, [3]string{"0", "1337", "42"}, [3]interface{}{uint32(0), uint32(1337), uint32(42)}},
- {"float", "FLOAT NOT NULL", "FLOAT", scanTypeFloat32, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float32(0), float32(42), float32(13.37)}},
- {"floatnull", "FLOAT", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"float74null", "FLOAT(7,4)", "FLOAT", scanTypeNullFloat, true, math.MaxInt64, 4, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"double", "DOUBLE NOT NULL", "DOUBLE", scanTypeFloat64, false, math.MaxInt64, math.MaxInt64, [3]string{"0", "42", "13.37"}, [3]interface{}{float64(0), float64(42), float64(13.37)}},
- {"doublenull", "DOUBLE", "DOUBLE", scanTypeNullFloat, true, math.MaxInt64, math.MaxInt64, [3]string{"0", "NULL", "13.37"}, [3]interface{}{nf0, nfNULL, nf1337}},
- {"decimal1", "DECIMAL(10,6) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 10, 6, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), sql.RawBytes("13.370000"), sql.RawBytes("1234.123456")}},
- {"decimal1null", "DECIMAL(10,6)", "DECIMAL", scanTypeRawBytes, true, 10, 6, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.000000"), rbNULL, sql.RawBytes("1234.123456")}},
- {"decimal2", "DECIMAL(8,4) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 8, 4, [3]string{"0", "13.37", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), sql.RawBytes("13.3700"), sql.RawBytes("1234.1235")}},
- {"decimal2null", "DECIMAL(8,4)", "DECIMAL", scanTypeRawBytes, true, 8, 4, [3]string{"0", "NULL", "1234.123456"}, [3]interface{}{sql.RawBytes("0.0000"), rbNULL, sql.RawBytes("1234.1235")}},
- {"decimal3", "DECIMAL(5,0) NOT NULL", "DECIMAL", scanTypeRawBytes, false, 5, 0, [3]string{"0", "13.37", "-12345.123456"}, [3]interface{}{rb0, sql.RawBytes("13"), sql.RawBytes("-12345")}},
- {"decimal3null", "DECIMAL(5,0)", "DECIMAL", scanTypeRawBytes, true, 5, 0, [3]string{"0", "NULL", "-12345.123456"}, [3]interface{}{rb0, rbNULL, sql.RawBytes("-12345")}},
- {"char25null", "CHAR(25)", "CHAR", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"varchar42", "VARCHAR(42) NOT NULL", "VARCHAR", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"binary4null", "BINARY(4)", "BINARY", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0pad4, rbNULL, rbTest}},
- {"varbinary42", "VARBINARY(42) NOT NULL", "VARBINARY", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"tinyblobnull", "TINYBLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"tinytextnull", "TINYTEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"blobnull", "BLOB", "BLOB", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"textnull", "TEXT", "TEXT", scanTypeRawBytes, true, 0, 0, [3]string{"0", "NULL", "'Test'"}, [3]interface{}{rb0, rbNULL, rbTest}},
- {"mediumblob", "MEDIUMBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"mediumtext", "MEDIUMTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"longblob", "LONGBLOB NOT NULL", "BLOB", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"longtext", "LONGTEXT NOT NULL", "TEXT", scanTypeRawBytes, false, 0, 0, [3]string{"0", "'Test'", "42"}, [3]interface{}{rb0, rbTest, rb42}},
- {"datetime", "DATETIME", "DATETIME", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt0, nt0}},
- {"datetime2", "DATETIME(2)", "DATETIME", scanTypeNullTime, true, 2, 2, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt2}},
- {"datetime6", "DATETIME(6)", "DATETIME", scanTypeNullTime, true, 6, 6, [3]string{"'2006-01-02 15:04:05'", "'2006-01-02 15:04:05.1'", "'2006-01-02 15:04:05.111111'"}, [3]interface{}{nt0, nt1, nt6}},
- {"date", "DATE", "DATE", scanTypeNullTime, true, 0, 0, [3]string{"'2006-01-02'", "NULL", "'2006-03-04'"}, [3]interface{}{nd1, ndNULL, nd2}},
- {"year", "YEAR NOT NULL", "YEAR", scanTypeUint16, false, 0, 0, [3]string{"2006", "2000", "1994"}, [3]interface{}{uint16(2006), uint16(2000), uint16(1994)}},
- }
-
- schema := ""
- values1 := ""
- values2 := ""
- values3 := ""
- for _, column := range columns {
- schema += fmt.Sprintf("`%s` %s, ", column.name, column.fieldType)
- values1 += column.valuesIn[0] + ", "
- values2 += column.valuesIn[1] + ", "
- values3 += column.valuesIn[2] + ", "
- }
- schema = schema[:len(schema)-2]
- values1 = values1[:len(values1)-2]
- values2 = values2[:len(values2)-2]
- values3 = values3[:len(values3)-2]
-
- dsns := []string{
- dsn + "&parseTime=true",
- dsn + "&parseTime=false",
- }
- for _, testdsn := range dsns {
- runTests(t, testdsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (" + schema + ")")
- dbt.mustExec("INSERT INTO test VALUES (" + values1 + "), (" + values2 + "), (" + values3 + ")")
-
- rows, err := dbt.db.Query("SELECT * FROM test")
- if err != nil {
- t.Fatalf("Query: %v", err)
- }
-
- tt, err := rows.ColumnTypes()
- if err != nil {
- t.Fatalf("ColumnTypes: %v", err)
- }
-
- if len(tt) != len(columns) {
- t.Fatalf("unexpected number of columns: expected %d, got %d", len(columns), len(tt))
- }
-
- types := make([]reflect.Type, len(tt))
- for i, tp := range tt {
- column := columns[i]
-
- // Name
- name := tp.Name()
- if name != column.name {
- t.Errorf("column name mismatch %s != %s", name, column.name)
- continue
- }
-
- // DatabaseTypeName
- databaseTypeName := tp.DatabaseTypeName()
- if databaseTypeName != column.databaseTypeName {
- t.Errorf("databasetypename name mismatch for column %q: %s != %s", name, databaseTypeName, column.databaseTypeName)
- continue
- }
-
- // ScanType
- scanType := tp.ScanType()
- if scanType != column.scanType {
- if scanType == nil {
- t.Errorf("scantype is null for column %q", name)
- } else {
- t.Errorf("scantype mismatch for column %q: %s != %s", name, scanType.Name(), column.scanType.Name())
- }
- continue
- }
- types[i] = scanType
-
- // Nullable
- nullable, ok := tp.Nullable()
- if !ok {
- t.Errorf("nullable not ok %q", name)
- continue
- }
- if nullable != column.nullable {
- t.Errorf("nullable mismatch for column %q: %t != %t", name, nullable, column.nullable)
- }
-
- // Length
- // length, ok := tp.Length()
- // if length != column.length {
- // if !ok {
- // t.Errorf("length not ok for column %q", name)
- // } else {
- // t.Errorf("length mismatch for column %q: %d != %d", name, length, column.length)
- // }
- // continue
- // }
-
- // Precision and Scale
- precision, scale, ok := tp.DecimalSize()
- if precision != column.precision {
- if !ok {
- t.Errorf("precision not ok for column %q", name)
- } else {
- t.Errorf("precision mismatch for column %q: %d != %d", name, precision, column.precision)
- }
- continue
- }
- if scale != column.scale {
- if !ok {
- t.Errorf("scale not ok for column %q", name)
- } else {
- t.Errorf("scale mismatch for column %q: %d != %d", name, scale, column.scale)
- }
- continue
- }
- }
-
- values := make([]interface{}, len(tt))
- for i := range values {
- values[i] = reflect.New(types[i]).Interface()
- }
- i := 0
- for rows.Next() {
- err = rows.Scan(values...)
- if err != nil {
- t.Fatalf("failed to scan values in %v", err)
- }
- for j := range values {
- value := reflect.ValueOf(values[j]).Elem().Interface()
- if !reflect.DeepEqual(value, columns[j].valuesOut[i]) {
- if columns[j].scanType == scanTypeRawBytes {
- t.Errorf("row %d, column %d: %v != %v", i, j, string(value.(sql.RawBytes)), string(columns[j].valuesOut[i].(sql.RawBytes)))
- } else {
- t.Errorf("row %d, column %d: %v != %v", i, j, value, columns[j].valuesOut[i])
- }
- }
- }
- i++
- }
- if i != 3 {
- t.Errorf("expected 3 rows, got %d", i)
- }
-
- if err := rows.Close(); err != nil {
- t.Errorf("error closing rows: %s", err)
- }
- })
- }
-}
-
-func TestValuerWithValueReceiverGivenNilValue(t *testing.T) {
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (value VARCHAR(255))")
- dbt.db.Exec("INSERT INTO test VALUES (?)", (*testValuer)(nil))
- // This test will panic on the INSERT if ConvertValue() does not check for typed nil before calling Value()
- })
-}
-
-// TestRawBytesAreNotModified checks for a race condition that arises when a query context
-// is canceled while a user is calling rows.Scan. This is a more stringent test than the one
-// proposed in https://github.com/golang/go/issues/23519. Here we're explicitly using
-// `sql.RawBytes` to check the contents of our internal buffers are not modified after an implicit
-// call to `Rows.Close`, so Context cancellation should **not** invalidate the backing buffers.
-func TestRawBytesAreNotModified(t *testing.T) {
- const blob = "abcdefghijklmnop"
- const contextRaceIterations = 20
- const blobSize = defaultBufSize * 3 / 4 // Second row overwrites first row.
- const insertRows = 4
-
- var sqlBlobs = [2]string{
- strings.Repeat(blob, blobSize/len(blob)),
- strings.Repeat(strings.ToUpper(blob), blobSize/len(blob)),
- }
-
- runTests(t, dsn, func(dbt *DBTest) {
- dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8")
- for i := 0; i < insertRows; i++ {
- dbt.mustExec("INSERT INTO test VALUES (?, ?)", i+1, sqlBlobs[i&1])
- }
-
- for i := 0; i < contextRaceIterations; i++ {
- func() {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- rows, err := dbt.db.QueryContext(ctx, `SELECT id, value FROM test`)
- if err != nil {
- t.Fatal(err)
- }
-
- var b int
- var raw sql.RawBytes
- for rows.Next() {
- if err := rows.Scan(&b, &raw); err != nil {
- t.Fatal(err)
- }
-
- before := string(raw)
- // Ensure cancelling the query does not corrupt the contents of `raw`
- cancel()
- time.Sleep(time.Microsecond * 100)
- after := string(raw)
-
- if before != after {
- t.Fatalf("the backing storage for sql.RawBytes has been modified (i=%v)", i)
- }
- }
- rows.Close()
- }()
- }
- })
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/dsn.go
deleted file mode 100644
index 1d9b4ab..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ /dev/null
@@ -1,636 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "crypto/rsa"
- "crypto/tls"
- "errors"
- "fmt"
- "math/big"
- "net"
- "net/url"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-var (
- errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
- errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
- errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
- errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
-)
-
-// Config is a configuration parsed from a DSN string.
-// If a new Config is created instead of being parsed from a DSN string,
-// the NewConfig function should be used, which sets default values.
-type Config struct {
- User string // Username
- Passwd string // Password (requires User)
- Net string // Network type
- Addr string // Network address (requires Net)
- DBName string // Database name
- Params map[string]string // Connection parameters
- Collation string // Connection collation
- Loc *time.Location // Location for time.Time values
- MaxAllowedPacket int // Max packet size allowed
- ServerPubKey string // Server public key name
- pubKey *rsa.PublicKey // Server public key
- TLSConfig string // TLS configuration name
- tls *tls.Config // TLS configuration
- Timeout time.Duration // Dial timeout
- ReadTimeout time.Duration // I/O read timeout
- WriteTimeout time.Duration // I/O write timeout
-
- AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
- AllowCleartextPasswords bool // Allows the cleartext client side plugin
- AllowNativePasswords bool // Allows the native password authentication method
- AllowOldPasswords bool // Allows the old insecure password method
- ClientFoundRows bool // Return number of matching rows instead of rows changed
- ColumnsWithAlias bool // Prepend table alias to column names
- InterpolateParams bool // Interpolate placeholders into query string
- MultiStatements bool // Allow multiple statements in one query
- ParseTime bool // Parse time values to time.Time
- RejectReadOnly bool // Reject read-only connections
-}
-
-// NewConfig creates a new Config and sets default values.
-func NewConfig() *Config {
- return &Config{
- Collation: defaultCollation,
- Loc: time.UTC,
- MaxAllowedPacket: defaultMaxAllowedPacket,
- AllowNativePasswords: true,
- }
-}
-
-func (cfg *Config) Clone() *Config {
- cp := *cfg
- if cp.tls != nil {
- cp.tls = cfg.tls.Clone()
- }
- if len(cp.Params) > 0 {
- cp.Params = make(map[string]string, len(cfg.Params))
- for k, v := range cfg.Params {
- cp.Params[k] = v
- }
- }
- if cfg.pubKey != nil {
- cp.pubKey = &rsa.PublicKey{
- N: new(big.Int).Set(cfg.pubKey.N),
- E: cfg.pubKey.E,
- }
- }
- return &cp
-}
-
-func (cfg *Config) normalize() error {
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
- return errInvalidDSNUnsafeCollation
- }
-
- // Set default network if empty
- if cfg.Net == "" {
- cfg.Net = "tcp"
- }
-
- // Set default address if empty
- if cfg.Addr == "" {
- switch cfg.Net {
- case "tcp":
- cfg.Addr = "127.0.0.1:3306"
- case "unix":
- cfg.Addr = "/tmp/mysql.sock"
- default:
- return errors.New("default addr for network '" + cfg.Net + "' unknown")
- }
- } else if cfg.Net == "tcp" {
- cfg.Addr = ensureHavePort(cfg.Addr)
- }
-
- switch cfg.TLSConfig {
- case "false", "":
- // don't set anything
- case "true":
- cfg.tls = &tls.Config{}
- case "skip-verify", "preferred":
- cfg.tls = &tls.Config{InsecureSkipVerify: true}
- default:
- cfg.tls = getTLSConfigClone(cfg.TLSConfig)
- if cfg.tls == nil {
- return errors.New("invalid value / unknown config name: " + cfg.TLSConfig)
- }
- }
-
- if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
- host, _, err := net.SplitHostPort(cfg.Addr)
- if err == nil {
- cfg.tls.ServerName = host
- }
- }
-
- if cfg.ServerPubKey != "" {
- cfg.pubKey = getServerPubKey(cfg.ServerPubKey)
- if cfg.pubKey == nil {
- return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey)
- }
- }
-
- return nil
-}
-
-// FormatDSN formats the given Config into a DSN string which can be passed to
-// the driver.
-func (cfg *Config) FormatDSN() string {
- var buf bytes.Buffer
-
- // [username[:password]@]
- if len(cfg.User) > 0 {
- buf.WriteString(cfg.User)
- if len(cfg.Passwd) > 0 {
- buf.WriteByte(':')
- buf.WriteString(cfg.Passwd)
- }
- buf.WriteByte('@')
- }
-
- // [protocol[(address)]]
- if len(cfg.Net) > 0 {
- buf.WriteString(cfg.Net)
- if len(cfg.Addr) > 0 {
- buf.WriteByte('(')
- buf.WriteString(cfg.Addr)
- buf.WriteByte(')')
- }
- }
-
- // /dbname
- buf.WriteByte('/')
- buf.WriteString(cfg.DBName)
-
- // [?param1=value1&...¶mN=valueN]
- hasParam := false
-
- if cfg.AllowAllFiles {
- hasParam = true
- buf.WriteString("?allowAllFiles=true")
- }
-
- if cfg.AllowCleartextPasswords {
- if hasParam {
- buf.WriteString("&allowCleartextPasswords=true")
- } else {
- hasParam = true
- buf.WriteString("?allowCleartextPasswords=true")
- }
- }
-
- if !cfg.AllowNativePasswords {
- if hasParam {
- buf.WriteString("&allowNativePasswords=false")
- } else {
- hasParam = true
- buf.WriteString("?allowNativePasswords=false")
- }
- }
-
- if cfg.AllowOldPasswords {
- if hasParam {
- buf.WriteString("&allowOldPasswords=true")
- } else {
- hasParam = true
- buf.WriteString("?allowOldPasswords=true")
- }
- }
-
- if cfg.ClientFoundRows {
- if hasParam {
- buf.WriteString("&clientFoundRows=true")
- } else {
- hasParam = true
- buf.WriteString("?clientFoundRows=true")
- }
- }
-
- if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
- if hasParam {
- buf.WriteString("&collation=")
- } else {
- hasParam = true
- buf.WriteString("?collation=")
- }
- buf.WriteString(col)
- }
-
- if cfg.ColumnsWithAlias {
- if hasParam {
- buf.WriteString("&columnsWithAlias=true")
- } else {
- hasParam = true
- buf.WriteString("?columnsWithAlias=true")
- }
- }
-
- if cfg.InterpolateParams {
- if hasParam {
- buf.WriteString("&interpolateParams=true")
- } else {
- hasParam = true
- buf.WriteString("?interpolateParams=true")
- }
- }
-
- if cfg.Loc != time.UTC && cfg.Loc != nil {
- if hasParam {
- buf.WriteString("&loc=")
- } else {
- hasParam = true
- buf.WriteString("?loc=")
- }
- buf.WriteString(url.QueryEscape(cfg.Loc.String()))
- }
-
- if cfg.MultiStatements {
- if hasParam {
- buf.WriteString("&multiStatements=true")
- } else {
- hasParam = true
- buf.WriteString("?multiStatements=true")
- }
- }
-
- if cfg.ParseTime {
- if hasParam {
- buf.WriteString("&parseTime=true")
- } else {
- hasParam = true
- buf.WriteString("?parseTime=true")
- }
- }
-
- if cfg.ReadTimeout > 0 {
- if hasParam {
- buf.WriteString("&readTimeout=")
- } else {
- hasParam = true
- buf.WriteString("?readTimeout=")
- }
- buf.WriteString(cfg.ReadTimeout.String())
- }
-
- if cfg.RejectReadOnly {
- if hasParam {
- buf.WriteString("&rejectReadOnly=true")
- } else {
- hasParam = true
- buf.WriteString("?rejectReadOnly=true")
- }
- }
-
- if len(cfg.ServerPubKey) > 0 {
- if hasParam {
- buf.WriteString("&serverPubKey=")
- } else {
- hasParam = true
- buf.WriteString("?serverPubKey=")
- }
- buf.WriteString(url.QueryEscape(cfg.ServerPubKey))
- }
-
- if cfg.Timeout > 0 {
- if hasParam {
- buf.WriteString("&timeout=")
- } else {
- hasParam = true
- buf.WriteString("?timeout=")
- }
- buf.WriteString(cfg.Timeout.String())
- }
-
- if len(cfg.TLSConfig) > 0 {
- if hasParam {
- buf.WriteString("&tls=")
- } else {
- hasParam = true
- buf.WriteString("?tls=")
- }
- buf.WriteString(url.QueryEscape(cfg.TLSConfig))
- }
-
- if cfg.WriteTimeout > 0 {
- if hasParam {
- buf.WriteString("&writeTimeout=")
- } else {
- hasParam = true
- buf.WriteString("?writeTimeout=")
- }
- buf.WriteString(cfg.WriteTimeout.String())
- }
-
- if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
- if hasParam {
- buf.WriteString("&maxAllowedPacket=")
- } else {
- hasParam = true
- buf.WriteString("?maxAllowedPacket=")
- }
- buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
-
- }
-
- // other params
- if cfg.Params != nil {
- var params []string
- for param := range cfg.Params {
- params = append(params, param)
- }
- sort.Strings(params)
- for _, param := range params {
- if hasParam {
- buf.WriteByte('&')
- } else {
- hasParam = true
- buf.WriteByte('?')
- }
-
- buf.WriteString(param)
- buf.WriteByte('=')
- buf.WriteString(url.QueryEscape(cfg.Params[param]))
- }
- }
-
- return buf.String()
-}
-
-// ParseDSN parses the DSN string to a Config
-func ParseDSN(dsn string) (cfg *Config, err error) {
- // New config with some default values
- cfg = NewConfig()
-
- // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
- // Find the last '/' (since the password or the net addr might contain a '/')
- foundSlash := false
- for i := len(dsn) - 1; i >= 0; i-- {
- if dsn[i] == '/' {
- foundSlash = true
- var j, k int
-
- // left part is empty if i <= 0
- if i > 0 {
- // [username[:password]@][protocol[(address)]]
- // Find the last '@' in dsn[:i]
- for j = i; j >= 0; j-- {
- if dsn[j] == '@' {
- // username[:password]
- // Find the first ':' in dsn[:j]
- for k = 0; k < j; k++ {
- if dsn[k] == ':' {
- cfg.Passwd = dsn[k+1 : j]
- break
- }
- }
- cfg.User = dsn[:k]
-
- break
- }
- }
-
- // [protocol[(address)]]
- // Find the first '(' in dsn[j+1:i]
- for k = j + 1; k < i; k++ {
- if dsn[k] == '(' {
- // dsn[i-1] must be == ')' if an address is specified
- if dsn[i-1] != ')' {
- if strings.ContainsRune(dsn[k+1:i], ')') {
- return nil, errInvalidDSNUnescaped
- }
- return nil, errInvalidDSNAddr
- }
- cfg.Addr = dsn[k+1 : i-1]
- break
- }
- }
- cfg.Net = dsn[j+1 : k]
- }
-
- // dbname[?param1=value1&...¶mN=valueN]
- // Find the first '?' in dsn[i+1:]
- for j = i + 1; j < len(dsn); j++ {
- if dsn[j] == '?' {
- if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
- return
- }
- break
- }
- }
- cfg.DBName = dsn[i+1 : j]
-
- break
- }
- }
-
- if !foundSlash && len(dsn) > 0 {
- return nil, errInvalidDSNNoSlash
- }
-
- if err = cfg.normalize(); err != nil {
- return nil, err
- }
- return
-}
-
-// parseDSNParams parses the DSN "query string"
-// Values must be url.QueryEscape'ed
-func parseDSNParams(cfg *Config, params string) (err error) {
- for _, v := range strings.Split(params, "&") {
- param := strings.SplitN(v, "=", 2)
- if len(param) != 2 {
- continue
- }
-
- // cfg params
- switch value := param[1]; param[0] {
- // Disable INFILE whitelist / enable all files
- case "allowAllFiles":
- var isBool bool
- cfg.AllowAllFiles, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Use cleartext authentication mode (MySQL 5.5.10+)
- case "allowCleartextPasswords":
- var isBool bool
- cfg.AllowCleartextPasswords, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Use native password authentication
- case "allowNativePasswords":
- var isBool bool
- cfg.AllowNativePasswords, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Use old authentication mode (pre MySQL 4.1)
- case "allowOldPasswords":
- var isBool bool
- cfg.AllowOldPasswords, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Switch "rowsAffected" mode
- case "clientFoundRows":
- var isBool bool
- cfg.ClientFoundRows, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Collation
- case "collation":
- cfg.Collation = value
- break
-
- case "columnsWithAlias":
- var isBool bool
- cfg.ColumnsWithAlias, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Compression
- case "compress":
- return errors.New("compression not implemented yet")
-
- // Enable client side placeholder substitution
- case "interpolateParams":
- var isBool bool
- cfg.InterpolateParams, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Time Location
- case "loc":
- if value, err = url.QueryUnescape(value); err != nil {
- return
- }
- cfg.Loc, err = time.LoadLocation(value)
- if err != nil {
- return
- }
-
- // multiple statements in one query
- case "multiStatements":
- var isBool bool
- cfg.MultiStatements, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // time.Time parsing
- case "parseTime":
- var isBool bool
- cfg.ParseTime, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // I/O read Timeout
- case "readTimeout":
- cfg.ReadTimeout, err = time.ParseDuration(value)
- if err != nil {
- return
- }
-
- // Reject read-only connections
- case "rejectReadOnly":
- var isBool bool
- cfg.RejectReadOnly, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Server public key
- case "serverPubKey":
- name, err := url.QueryUnescape(value)
- if err != nil {
- return fmt.Errorf("invalid value for server pub key name: %v", err)
- }
- cfg.ServerPubKey = name
-
- // Strict mode
- case "strict":
- panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
-
- // Dial Timeout
- case "timeout":
- cfg.Timeout, err = time.ParseDuration(value)
- if err != nil {
- return
- }
-
- // TLS-Encryption
- case "tls":
- boolValue, isBool := readBool(value)
- if isBool {
- if boolValue {
- cfg.TLSConfig = "true"
- } else {
- cfg.TLSConfig = "false"
- }
- } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" {
- cfg.TLSConfig = vl
- } else {
- name, err := url.QueryUnescape(value)
- if err != nil {
- return fmt.Errorf("invalid value for TLS config name: %v", err)
- }
- cfg.TLSConfig = name
- }
-
- // I/O write Timeout
- case "writeTimeout":
- cfg.WriteTimeout, err = time.ParseDuration(value)
- if err != nil {
- return
- }
- case "maxAllowedPacket":
- cfg.MaxAllowedPacket, err = strconv.Atoi(value)
- if err != nil {
- return
- }
- default:
- // lazy init
- if cfg.Params == nil {
- cfg.Params = make(map[string]string)
- }
-
- if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
- return
- }
- }
- }
-
- return
-}
-
-func ensureHavePort(addr string) string {
- if _, _, err := net.SplitHostPort(addr); err != nil {
- return net.JoinHostPort(addr, "3306")
- }
- return addr
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/dsn_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/dsn_test.go
deleted file mode 100644
index 50dc293..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/dsn_test.go
+++ /dev/null
@@ -1,415 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "crypto/tls"
- "fmt"
- "net/url"
- "reflect"
- "testing"
- "time"
-)
-
-var testDSNs = []struct {
- in string
- out *Config
-}{{
- "username:password@protocol(address)/dbname?param=value",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true},
-}, {
- "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
- &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true, MultiStatements: true},
-}, {
- "user@unix(/path/to/socket)/dbname?charset=utf8",
- &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "true"},
-}, {
- "user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "skip-verify"},
-}, {
- "user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216&tls=false&allowCleartextPasswords=true&parseTime=true&rejectReadOnly=true",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
-}, {
- "user:password@/dbname?allowNativePasswords=false&maxAllowedPacket=0",
- &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowNativePasswords: false},
-}, {
- "user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
- &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "/dbname",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "@/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "/",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "user:p@/ssword@/",
- &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "unix/?arg=%2Fsome%2Fpath.ext",
- &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "tcp(127.0.0.1)/dbname",
- &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-}, {
- "tcp(de:ad:be:ef::ca:fe)/dbname",
- &Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
-},
-}
-
-func TestDSNParser(t *testing.T) {
- for i, tst := range testDSNs {
- cfg, err := ParseDSN(tst.in)
- if err != nil {
- t.Error(err.Error())
- }
-
- // pointer not static
- cfg.tls = nil
-
- if !reflect.DeepEqual(cfg, tst.out) {
- t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
- }
- }
-}
-
-func TestDSNParserInvalid(t *testing.T) {
- var invalidDSNs = []string{
- "@net(addr/", // no closing brace
- "@tcp(/", // no closing brace
- "tcp(/", // no closing brace
- "(/", // no closing brace
- "net(addr)//", // unescaped
- "User:pass@tcp(1.2.3.4:3306)", // no trailing slash
- "net()/", // unknown default addr
- //"/dbname?arg=/some/unescaped/path",
- }
-
- for i, tst := range invalidDSNs {
- if _, err := ParseDSN(tst); err == nil {
- t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
- }
- }
-}
-
-func TestDSNReformat(t *testing.T) {
- for i, tst := range testDSNs {
- dsn1 := tst.in
- cfg1, err := ParseDSN(dsn1)
- if err != nil {
- t.Error(err.Error())
- continue
- }
- cfg1.tls = nil // pointer not static
- res1 := fmt.Sprintf("%+v", cfg1)
-
- dsn2 := cfg1.FormatDSN()
- cfg2, err := ParseDSN(dsn2)
- if err != nil {
- t.Error(err.Error())
- continue
- }
- cfg2.tls = nil // pointer not static
- res2 := fmt.Sprintf("%+v", cfg2)
-
- if res1 != res2 {
- t.Errorf("%d. %q does not match %q", i, res2, res1)
- }
- }
-}
-
-func TestDSNServerPubKey(t *testing.T) {
- baseDSN := "User:password@tcp(localhost:5555)/dbname?serverPubKey="
-
- RegisterServerPubKey("testKey", testPubKeyRSA)
- defer DeregisterServerPubKey("testKey")
-
- tst := baseDSN + "testKey"
- cfg, err := ParseDSN(tst)
- if err != nil {
- t.Error(err.Error())
- }
-
- if cfg.ServerPubKey != "testKey" {
- t.Errorf("unexpected cfg.ServerPubKey value: %v", cfg.ServerPubKey)
- }
- if cfg.pubKey != testPubKeyRSA {
- t.Error("pub key pointer doesn't match")
- }
-
- // Key is missing
- tst = baseDSN + "invalid_name"
- cfg, err = ParseDSN(tst)
- if err == nil {
- t.Errorf("invalid name in DSN (%s) but did not error. Got config: %#v", tst, cfg)
- }
-}
-
-func TestDSNServerPubKeyQueryEscape(t *testing.T) {
- const name = "&%!:"
- dsn := "User:password@tcp(localhost:5555)/dbname?serverPubKey=" + url.QueryEscape(name)
-
- RegisterServerPubKey(name, testPubKeyRSA)
- defer DeregisterServerPubKey(name)
-
- cfg, err := ParseDSN(dsn)
- if err != nil {
- t.Error(err.Error())
- }
-
- if cfg.pubKey != testPubKeyRSA {
- t.Error("pub key pointer doesn't match")
- }
-}
-
-func TestDSNWithCustomTLS(t *testing.T) {
- baseDSN := "User:password@tcp(localhost:5555)/dbname?tls="
- tlsCfg := tls.Config{}
-
- RegisterTLSConfig("utils_test", &tlsCfg)
- defer DeregisterTLSConfig("utils_test")
-
- // Custom TLS is missing
- tst := baseDSN + "invalid_tls"
- cfg, err := ParseDSN(tst)
- if err == nil {
- t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg)
- }
-
- tst = baseDSN + "utils_test"
-
- // Custom TLS with a server name
- name := "foohost"
- tlsCfg.ServerName = name
- cfg, err = ParseDSN(tst)
-
- if err != nil {
- t.Error(err.Error())
- } else if cfg.tls.ServerName != name {
- t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst)
- }
-
- // Custom TLS without a server name
- name = "localhost"
- tlsCfg.ServerName = ""
- cfg, err = ParseDSN(tst)
-
- if err != nil {
- t.Error(err.Error())
- } else if cfg.tls.ServerName != name {
- t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
- } else if tlsCfg.ServerName != "" {
- t.Errorf("tlsCfg was mutated ServerName (%s) should be empty parsing DSN (%s).", name, tst)
- }
-}
-
-func TestDSNTLSConfig(t *testing.T) {
- expectedServerName := "example.com"
- dsn := "tcp(example.com:1234)/?tls=true"
-
- cfg, err := ParseDSN(dsn)
- if err != nil {
- t.Error(err.Error())
- }
- if cfg.tls == nil {
- t.Error("cfg.tls should not be nil")
- }
- if cfg.tls.ServerName != expectedServerName {
- t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
- }
-
- dsn = "tcp(example.com)/?tls=true"
- cfg, err = ParseDSN(dsn)
- if err != nil {
- t.Error(err.Error())
- }
- if cfg.tls == nil {
- t.Error("cfg.tls should not be nil")
- }
- if cfg.tls.ServerName != expectedServerName {
- t.Errorf("cfg.tls.ServerName should be %q, got %q (host without port)", expectedServerName, cfg.tls.ServerName)
- }
-}
-
-func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
- const configKey = "&%!:"
- dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey)
- name := "foohost"
- tlsCfg := tls.Config{ServerName: name}
-
- RegisterTLSConfig(configKey, &tlsCfg)
- defer DeregisterTLSConfig(configKey)
-
- cfg, err := ParseDSN(dsn)
-
- if err != nil {
- t.Error(err.Error())
- } else if cfg.tls.ServerName != name {
- t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn)
- }
-}
-
-func TestDSNUnsafeCollation(t *testing.T) {
- _, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true")
- if err != errInvalidDSNUnsafeCollation {
- t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err)
- }
-
- _, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=gbk_chinese_ci")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-
- _, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true")
- if err != nil {
- t.Errorf("expected %v, got %v", nil, err)
- }
-}
-
-func TestParamsAreSorted(t *testing.T) {
- expected := "/dbname?interpolateParams=true&foobar=baz&quux=loo"
- cfg := NewConfig()
- cfg.DBName = "dbname"
- cfg.InterpolateParams = true
- cfg.Params = map[string]string{
- "quux": "loo",
- "foobar": "baz",
- }
- actual := cfg.FormatDSN()
- if actual != expected {
- t.Errorf("generic Config.Params were not sorted: want %#v, got %#v", expected, actual)
- }
-}
-
-func TestCloneConfig(t *testing.T) {
- RegisterServerPubKey("testKey", testPubKeyRSA)
- defer DeregisterServerPubKey("testKey")
-
- expectedServerName := "example.com"
- dsn := "tcp(example.com:1234)/?tls=true&foobar=baz&serverPubKey=testKey"
- cfg, err := ParseDSN(dsn)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- cfg2 := cfg.Clone()
- if cfg == cfg2 {
- t.Errorf("Config.Clone did not create a separate config struct")
- }
-
- if cfg2.tls.ServerName != expectedServerName {
- t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
- }
-
- cfg2.tls.ServerName = "example2.com"
- if cfg.tls.ServerName == cfg2.tls.ServerName {
- t.Errorf("changed cfg.tls.Server name should not propagate to original Config")
- }
-
- if _, ok := cfg2.Params["foobar"]; !ok {
- t.Errorf("cloned Config is missing custom params")
- }
-
- delete(cfg2.Params, "foobar")
-
- if _, ok := cfg.Params["foobar"]; !ok {
- t.Errorf("custom params in cloned Config should not propagate to original Config")
- }
-
- if !reflect.DeepEqual(cfg.pubKey, cfg2.pubKey) {
- t.Errorf("public key in Config should be identical")
- }
-}
-
-func TestNormalizeTLSConfig(t *testing.T) {
- tt := []struct {
- tlsConfig string
- want *tls.Config
- }{
- {"", nil},
- {"false", nil},
- {"true", &tls.Config{ServerName: "myserver"}},
- {"skip-verify", &tls.Config{InsecureSkipVerify: true}},
- {"preferred", &tls.Config{InsecureSkipVerify: true}},
- {"test_tls_config", &tls.Config{ServerName: "myServerName"}},
- }
-
- RegisterTLSConfig("test_tls_config", &tls.Config{ServerName: "myServerName"})
- defer func() { DeregisterTLSConfig("test_tls_config") }()
-
- for _, tc := range tt {
- t.Run(tc.tlsConfig, func(t *testing.T) {
- cfg := &Config{
- Addr: "myserver:3306",
- TLSConfig: tc.tlsConfig,
- }
-
- cfg.normalize()
-
- if cfg.tls == nil {
- if tc.want != nil {
- t.Fatal("wanted a tls config but got nil instead")
- }
- return
- }
-
- if cfg.tls.ServerName != tc.want.ServerName {
- t.Errorf("tls.ServerName doesn't match (want: '%s', got: '%s')",
- tc.want.ServerName, cfg.tls.ServerName)
- }
- if cfg.tls.InsecureSkipVerify != tc.want.InsecureSkipVerify {
- t.Errorf("tls.InsecureSkipVerify doesn't match (want: %T, got :%T)",
- tc.want.InsecureSkipVerify, cfg.tls.InsecureSkipVerify)
- }
- })
- }
-}
-
-func BenchmarkParseDSN(b *testing.B) {
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- for _, tst := range testDSNs {
- if _, err := ParseDSN(tst.in); err != nil {
- b.Error(err.Error())
- }
- }
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/errors.go
deleted file mode 100644
index 760782f..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/errors.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "errors"
- "fmt"
- "log"
- "os"
-)
-
-// Various errors the driver might return. Can change between driver versions.
-var (
- ErrInvalidConn = errors.New("invalid connection")
- ErrMalformPkt = errors.New("malformed packet")
- ErrNoTLS = errors.New("TLS requested but server does not support TLS")
- ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
- ErrNativePassword = errors.New("this user requires mysql native password authentication.")
- ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
- ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
- ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
- ErrPktSync = errors.New("commands out of sync. You can't run this command now")
- ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
- ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
- ErrBusyBuffer = errors.New("busy buffer")
-
- // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
- // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
- // to trigger a resend.
- // See https://github.com/go-sql-driver/mysql/pull/302
- errBadConnNoWrite = errors.New("bad connection")
-)
-
-var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
-
-// Logger is used to log critical error messages.
-type Logger interface {
- Print(v ...interface{})
-}
-
-// SetLogger is used to set the logger for critical errors.
-// The initial logger is os.Stderr.
-func SetLogger(logger Logger) error {
- if logger == nil {
- return errors.New("logger is nil")
- }
- errLog = logger
- return nil
-}
-
-// MySQLError is an error type which represents a single MySQL error
-type MySQLError struct {
- Number uint16
- Message string
-}
-
-func (me *MySQLError) Error() string {
- return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/errors_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/errors_test.go
deleted file mode 100644
index 96f9126..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/errors_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "log"
- "testing"
-)
-
-func TestErrorsSetLogger(t *testing.T) {
- previous := errLog
- defer func() {
- errLog = previous
- }()
-
- // set up logger
- const expected = "prefix: test\n"
- buffer := bytes.NewBuffer(make([]byte, 0, 64))
- logger := log.New(buffer, "prefix: ", 0)
-
- // print
- SetLogger(logger)
- errLog.Print("test")
-
- // check result
- if actual := buffer.String(); actual != expected {
- t.Errorf("expected %q, got %q", expected, actual)
- }
-}
-
-func TestErrorsStrictIgnoreNotes(t *testing.T) {
- runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) {
- dbt.mustExec("DROP TABLE IF EXISTS does_not_exist")
- })
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/fields.go
deleted file mode 100644
index e1e2ece..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/fields.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "database/sql"
- "reflect"
-)
-
-func (mf *mysqlField) typeDatabaseName() string {
- switch mf.fieldType {
- case fieldTypeBit:
- return "BIT"
- case fieldTypeBLOB:
- if mf.charSet != collations[binaryCollation] {
- return "TEXT"
- }
- return "BLOB"
- case fieldTypeDate:
- return "DATE"
- case fieldTypeDateTime:
- return "DATETIME"
- case fieldTypeDecimal:
- return "DECIMAL"
- case fieldTypeDouble:
- return "DOUBLE"
- case fieldTypeEnum:
- return "ENUM"
- case fieldTypeFloat:
- return "FLOAT"
- case fieldTypeGeometry:
- return "GEOMETRY"
- case fieldTypeInt24:
- return "MEDIUMINT"
- case fieldTypeJSON:
- return "JSON"
- case fieldTypeLong:
- return "INT"
- case fieldTypeLongBLOB:
- if mf.charSet != collations[binaryCollation] {
- return "LONGTEXT"
- }
- return "LONGBLOB"
- case fieldTypeLongLong:
- return "BIGINT"
- case fieldTypeMediumBLOB:
- if mf.charSet != collations[binaryCollation] {
- return "MEDIUMTEXT"
- }
- return "MEDIUMBLOB"
- case fieldTypeNewDate:
- return "DATE"
- case fieldTypeNewDecimal:
- return "DECIMAL"
- case fieldTypeNULL:
- return "NULL"
- case fieldTypeSet:
- return "SET"
- case fieldTypeShort:
- return "SMALLINT"
- case fieldTypeString:
- if mf.charSet == collations[binaryCollation] {
- return "BINARY"
- }
- return "CHAR"
- case fieldTypeTime:
- return "TIME"
- case fieldTypeTimestamp:
- return "TIMESTAMP"
- case fieldTypeTiny:
- return "TINYINT"
- case fieldTypeTinyBLOB:
- if mf.charSet != collations[binaryCollation] {
- return "TINYTEXT"
- }
- return "TINYBLOB"
- case fieldTypeVarChar:
- if mf.charSet == collations[binaryCollation] {
- return "VARBINARY"
- }
- return "VARCHAR"
- case fieldTypeVarString:
- if mf.charSet == collations[binaryCollation] {
- return "VARBINARY"
- }
- return "VARCHAR"
- case fieldTypeYear:
- return "YEAR"
- default:
- return ""
- }
-}
-
-var (
- scanTypeFloat32 = reflect.TypeOf(float32(0))
- scanTypeFloat64 = reflect.TypeOf(float64(0))
- scanTypeInt8 = reflect.TypeOf(int8(0))
- scanTypeInt16 = reflect.TypeOf(int16(0))
- scanTypeInt32 = reflect.TypeOf(int32(0))
- scanTypeInt64 = reflect.TypeOf(int64(0))
- scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
- scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
- scanTypeNullTime = reflect.TypeOf(NullTime{})
- scanTypeUint8 = reflect.TypeOf(uint8(0))
- scanTypeUint16 = reflect.TypeOf(uint16(0))
- scanTypeUint32 = reflect.TypeOf(uint32(0))
- scanTypeUint64 = reflect.TypeOf(uint64(0))
- scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
- scanTypeUnknown = reflect.TypeOf(new(interface{}))
-)
-
-type mysqlField struct {
- tableName string
- name string
- length uint32
- flags fieldFlag
- fieldType fieldType
- decimals byte
- charSet uint8
-}
-
-func (mf *mysqlField) scanType() reflect.Type {
- switch mf.fieldType {
- case fieldTypeTiny:
- if mf.flags&flagNotNULL != 0 {
- if mf.flags&flagUnsigned != 0 {
- return scanTypeUint8
- }
- return scanTypeInt8
- }
- return scanTypeNullInt
-
- case fieldTypeShort, fieldTypeYear:
- if mf.flags&flagNotNULL != 0 {
- if mf.flags&flagUnsigned != 0 {
- return scanTypeUint16
- }
- return scanTypeInt16
- }
- return scanTypeNullInt
-
- case fieldTypeInt24, fieldTypeLong:
- if mf.flags&flagNotNULL != 0 {
- if mf.flags&flagUnsigned != 0 {
- return scanTypeUint32
- }
- return scanTypeInt32
- }
- return scanTypeNullInt
-
- case fieldTypeLongLong:
- if mf.flags&flagNotNULL != 0 {
- if mf.flags&flagUnsigned != 0 {
- return scanTypeUint64
- }
- return scanTypeInt64
- }
- return scanTypeNullInt
-
- case fieldTypeFloat:
- if mf.flags&flagNotNULL != 0 {
- return scanTypeFloat32
- }
- return scanTypeNullFloat
-
- case fieldTypeDouble:
- if mf.flags&flagNotNULL != 0 {
- return scanTypeFloat64
- }
- return scanTypeNullFloat
-
- case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
- fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
- fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
- fieldTypeTime:
- return scanTypeRawBytes
-
- case fieldTypeDate, fieldTypeNewDate,
- fieldTypeTimestamp, fieldTypeDateTime:
- // NullTime is always returned for more consistent behavior as it can
- // handle both cases of parseTime regardless if the field is nullable.
- return scanTypeNullTime
-
- default:
- return scanTypeUnknown
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/infile.go
deleted file mode 100644
index 273cb0b..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/infile.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
- "sync"
-)
-
-var (
- fileRegister map[string]bool
- fileRegisterLock sync.RWMutex
- readerRegister map[string]func() io.Reader
- readerRegisterLock sync.RWMutex
-)
-
-// RegisterLocalFile adds the given file to the file whitelist,
-// so that it can be used by "LOAD DATA LOCAL INFILE ".
-// Alternatively you can allow the use of all local files with
-// the DSN parameter 'allowAllFiles=true'
-//
-// filePath := "/home/gopher/data.csv"
-// mysql.RegisterLocalFile(filePath)
-// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
-// if err != nil {
-// ...
-//
-func RegisterLocalFile(filePath string) {
- fileRegisterLock.Lock()
- // lazy map init
- if fileRegister == nil {
- fileRegister = make(map[string]bool)
- }
-
- fileRegister[strings.Trim(filePath, `"`)] = true
- fileRegisterLock.Unlock()
-}
-
-// DeregisterLocalFile removes the given filepath from the whitelist.
-func DeregisterLocalFile(filePath string) {
- fileRegisterLock.Lock()
- delete(fileRegister, strings.Trim(filePath, `"`))
- fileRegisterLock.Unlock()
-}
-
-// RegisterReaderHandler registers a handler function which is used
-// to receive a io.Reader.
-// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
-// If the handler returns a io.ReadCloser Close() is called when the
-// request is finished.
-//
-// mysql.RegisterReaderHandler("data", func() io.Reader {
-// var csvReader io.Reader // Some Reader that returns CSV data
-// ... // Open Reader here
-// return csvReader
-// })
-// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
-// if err != nil {
-// ...
-//
-func RegisterReaderHandler(name string, handler func() io.Reader) {
- readerRegisterLock.Lock()
- // lazy map init
- if readerRegister == nil {
- readerRegister = make(map[string]func() io.Reader)
- }
-
- readerRegister[name] = handler
- readerRegisterLock.Unlock()
-}
-
-// DeregisterReaderHandler removes the ReaderHandler function with
-// the given name from the registry.
-func DeregisterReaderHandler(name string) {
- readerRegisterLock.Lock()
- delete(readerRegister, name)
- readerRegisterLock.Unlock()
-}
-
-func deferredClose(err *error, closer io.Closer) {
- closeErr := closer.Close()
- if *err == nil {
- *err = closeErr
- }
-}
-
-func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
- var rdr io.Reader
- var data []byte
- packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
- if mc.maxWriteSize < packetSize {
- packetSize = mc.maxWriteSize
- }
-
- if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
- // The server might return an an absolute path. See issue #355.
- name = name[idx+8:]
-
- readerRegisterLock.RLock()
- handler, inMap := readerRegister[name]
- readerRegisterLock.RUnlock()
-
- if inMap {
- rdr = handler()
- if rdr != nil {
- if cl, ok := rdr.(io.Closer); ok {
- defer deferredClose(&err, cl)
- }
- } else {
- err = fmt.Errorf("Reader '%s' is ", name)
- }
- } else {
- err = fmt.Errorf("Reader '%s' is not registered", name)
- }
- } else { // File
- name = strings.Trim(name, `"`)
- fileRegisterLock.RLock()
- fr := fileRegister[name]
- fileRegisterLock.RUnlock()
- if mc.cfg.AllowAllFiles || fr {
- var file *os.File
- var fi os.FileInfo
-
- if file, err = os.Open(name); err == nil {
- defer deferredClose(&err, file)
-
- // get file size
- if fi, err = file.Stat(); err == nil {
- rdr = file
- if fileSize := int(fi.Size()); fileSize < packetSize {
- packetSize = fileSize
- }
- }
- }
- } else {
- err = fmt.Errorf("local file '%s' is not registered", name)
- }
- }
-
- // send content packets
- // if packetSize == 0, the Reader contains no data
- if err == nil && packetSize > 0 {
- data := make([]byte, 4+packetSize)
- var n int
- for err == nil {
- n, err = rdr.Read(data[4:])
- if n > 0 {
- if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
- return ioErr
- }
- }
- }
- if err == io.EOF {
- err = nil
- }
- }
-
- // send empty packet (termination)
- if data == nil {
- data = make([]byte, 4)
- }
- if ioErr := mc.writePacket(data[:4]); ioErr != nil {
- return ioErr
- }
-
- // read OK packet
- if err == nil {
- return mc.readResultOK()
- }
-
- mc.readPacket()
- return err
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/packets.go
deleted file mode 100644
index 30b3352..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/packets.go
+++ /dev/null
@@ -1,1342 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "crypto/tls"
- "database/sql/driver"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math"
- "time"
-)
-
-// Packets documentation:
-// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
-
-// Read packet to buffer 'data'
-func (mc *mysqlConn) readPacket() ([]byte, error) {
- var prevData []byte
- for {
- // read packet header
- data, err := mc.buf.readNext(4)
- if err != nil {
- if cerr := mc.canceled.Value(); cerr != nil {
- return nil, cerr
- }
- errLog.Print(err)
- mc.Close()
- return nil, ErrInvalidConn
- }
-
- // packet length [24 bit]
- pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
-
- // check packet sync [8 bit]
- if data[3] != mc.sequence {
- if data[3] > mc.sequence {
- return nil, ErrPktSyncMul
- }
- return nil, ErrPktSync
- }
- mc.sequence++
-
- // packets with length 0 terminate a previous packet which is a
- // multiple of (2^24)-1 bytes long
- if pktLen == 0 {
- // there was no previous packet
- if prevData == nil {
- errLog.Print(ErrMalformPkt)
- mc.Close()
- return nil, ErrInvalidConn
- }
-
- return prevData, nil
- }
-
- // read packet body [pktLen bytes]
- data, err = mc.buf.readNext(pktLen)
- if err != nil {
- if cerr := mc.canceled.Value(); cerr != nil {
- return nil, cerr
- }
- errLog.Print(err)
- mc.Close()
- return nil, ErrInvalidConn
- }
-
- // return data if this was the last packet
- if pktLen < maxPacketSize {
- // zero allocations for non-split packets
- if prevData == nil {
- return data, nil
- }
-
- return append(prevData, data...), nil
- }
-
- prevData = append(prevData, data...)
- }
-}
-
-// Write packet buffer 'data'
-func (mc *mysqlConn) writePacket(data []byte) error {
- pktLen := len(data) - 4
-
- if pktLen > mc.maxAllowedPacket {
- return ErrPktTooLarge
- }
-
- // Perform a stale connection check. We only perform this check for
- // the first query on a connection that has been checked out of the
- // connection pool: a fresh connection from the pool is more likely
- // to be stale, and it has not performed any previous writes that
- // could cause data corruption, so it's safe to return ErrBadConn
- // if the check fails.
- if mc.reset {
- mc.reset = false
- conn := mc.netConn
- if mc.rawConn != nil {
- conn = mc.rawConn
- }
- var err error
- // If this connection has a ReadTimeout which we've been setting on
- // reads, reset it to its default value before we attempt a non-blocking
- // read, otherwise the scheduler will just time us out before we can read
- if mc.cfg.ReadTimeout != 0 {
- err = conn.SetReadDeadline(time.Time{})
- }
- if err == nil {
- err = connCheck(conn)
- }
- if err != nil {
- errLog.Print("closing bad idle connection: ", err)
- mc.Close()
- return driver.ErrBadConn
- }
- }
-
- for {
- var size int
- if pktLen >= maxPacketSize {
- data[0] = 0xff
- data[1] = 0xff
- data[2] = 0xff
- size = maxPacketSize
- } else {
- data[0] = byte(pktLen)
- data[1] = byte(pktLen >> 8)
- data[2] = byte(pktLen >> 16)
- size = pktLen
- }
- data[3] = mc.sequence
-
- // Write packet
- if mc.writeTimeout > 0 {
- if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
- return err
- }
- }
-
- n, err := mc.netConn.Write(data[:4+size])
- if err == nil && n == 4+size {
- mc.sequence++
- if size != maxPacketSize {
- return nil
- }
- pktLen -= size
- data = data[size:]
- continue
- }
-
- // Handle error
- if err == nil { // n != len(data)
- mc.cleanup()
- errLog.Print(ErrMalformPkt)
- } else {
- if cerr := mc.canceled.Value(); cerr != nil {
- return cerr
- }
- if n == 0 && pktLen == len(data)-4 {
- // only for the first loop iteration when nothing was written yet
- return errBadConnNoWrite
- }
- mc.cleanup()
- errLog.Print(err)
- }
- return ErrInvalidConn
- }
-}
-
-/******************************************************************************
-* Initialization Process *
-******************************************************************************/
-
-// Handshake Initialization Packet
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
-func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
- data, err = mc.readPacket()
- if err != nil {
- // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
- // in connection initialization we don't risk retrying non-idempotent actions.
- if err == ErrInvalidConn {
- return nil, "", driver.ErrBadConn
- }
- return
- }
-
- if data[0] == iERR {
- return nil, "", mc.handleErrorPacket(data)
- }
-
- // protocol version [1 byte]
- if data[0] < minProtocolVersion {
- return nil, "", fmt.Errorf(
- "unsupported protocol version %d. Version %d or higher is required",
- data[0],
- minProtocolVersion,
- )
- }
-
- // server version [null terminated string]
- // connection id [4 bytes]
- pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
-
- // first part of the password cipher [8 bytes]
- authData := data[pos : pos+8]
-
- // (filler) always 0x00 [1 byte]
- pos += 8 + 1
-
- // capability flags (lower 2 bytes) [2 bytes]
- mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
- if mc.flags&clientProtocol41 == 0 {
- return nil, "", ErrOldProtocol
- }
- if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
- if mc.cfg.TLSConfig == "preferred" {
- mc.cfg.tls = nil
- } else {
- return nil, "", ErrNoTLS
- }
- }
- pos += 2
-
- if len(data) > pos {
- // character set [1 byte]
- // status flags [2 bytes]
- // capability flags (upper 2 bytes) [2 bytes]
- // length of auth-plugin-data [1 byte]
- // reserved (all [00]) [10 bytes]
- pos += 1 + 2 + 2 + 1 + 10
-
- // second part of the password cipher [mininum 13 bytes],
- // where len=MAX(13, length of auth-plugin-data - 8)
- //
- // The web documentation is ambiguous about the length. However,
- // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
- // the 13th byte is "\0 byte, terminating the second part of
- // a scramble". So the second part of the password cipher is
- // a NULL terminated string that's at least 13 bytes with the
- // last byte being NULL.
- //
- // The official Python library uses the fixed length 12
- // which seems to work but technically could have a hidden bug.
- authData = append(authData, data[pos:pos+12]...)
- pos += 13
-
- // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
- // \NUL otherwise
- if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
- plugin = string(data[pos : pos+end])
- } else {
- plugin = string(data[pos:])
- }
-
- // make a memory safe copy of the cipher slice
- var b [20]byte
- copy(b[:], authData)
- return b[:], plugin, nil
- }
-
- // make a memory safe copy of the cipher slice
- var b [8]byte
- copy(b[:], authData)
- return b[:], plugin, nil
-}
-
-// Client Authentication Packet
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
-func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
- // Adjust client flags based on server support
- clientFlags := clientProtocol41 |
- clientSecureConn |
- clientLongPassword |
- clientTransactions |
- clientLocalFiles |
- clientPluginAuth |
- clientMultiResults |
- mc.flags&clientLongFlag
-
- if mc.cfg.ClientFoundRows {
- clientFlags |= clientFoundRows
- }
-
- // To enable TLS / SSL
- if mc.cfg.tls != nil {
- clientFlags |= clientSSL
- }
-
- if mc.cfg.MultiStatements {
- clientFlags |= clientMultiStatements
- }
-
- // encode length of the auth plugin data
- var authRespLEIBuf [9]byte
- authRespLen := len(authResp)
- authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
- if len(authRespLEI) > 1 {
- // if the length can not be written in 1 byte, it must be written as a
- // length encoded integer
- clientFlags |= clientPluginAuthLenEncClientData
- }
-
- pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
-
- // To specify a db name
- if n := len(mc.cfg.DBName); n > 0 {
- clientFlags |= clientConnectWithDB
- pktLen += n + 1
- }
-
- // Calculate packet length and get buffer with that size
- data, err := mc.buf.takeSmallBuffer(pktLen + 4)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
- }
-
- // ClientFlags [32 bit]
- data[4] = byte(clientFlags)
- data[5] = byte(clientFlags >> 8)
- data[6] = byte(clientFlags >> 16)
- data[7] = byte(clientFlags >> 24)
-
- // MaxPacketSize [32 bit] (none)
- data[8] = 0x00
- data[9] = 0x00
- data[10] = 0x00
- data[11] = 0x00
-
- // Charset [1 byte]
- var found bool
- data[12], found = collations[mc.cfg.Collation]
- if !found {
- // Note possibility for false negatives:
- // could be triggered although the collation is valid if the
- // collations map does not contain entries the server supports.
- return errors.New("unknown collation")
- }
-
- // SSL Connection Request Packet
- // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
- if mc.cfg.tls != nil {
- // Send TLS / SSL request packet
- if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
- return err
- }
-
- // Switch to TLS
- tlsConn := tls.Client(mc.netConn, mc.cfg.tls)
- if err := tlsConn.Handshake(); err != nil {
- return err
- }
- mc.rawConn = mc.netConn
- mc.netConn = tlsConn
- mc.buf.nc = tlsConn
- }
-
- // Filler [23 bytes] (all 0x00)
- pos := 13
- for ; pos < 13+23; pos++ {
- data[pos] = 0
- }
-
- // User [null terminated string]
- if len(mc.cfg.User) > 0 {
- pos += copy(data[pos:], mc.cfg.User)
- }
- data[pos] = 0x00
- pos++
-
- // Auth Data [length encoded integer]
- pos += copy(data[pos:], authRespLEI)
- pos += copy(data[pos:], authResp)
-
- // Databasename [null terminated string]
- if len(mc.cfg.DBName) > 0 {
- pos += copy(data[pos:], mc.cfg.DBName)
- data[pos] = 0x00
- pos++
- }
-
- pos += copy(data[pos:], plugin)
- data[pos] = 0x00
- pos++
-
- // Send Auth packet
- return mc.writePacket(data[:pos])
-}
-
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
- pktLen := 4 + len(authData)
- data, err := mc.buf.takeSmallBuffer(pktLen)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
- }
-
- // Add the auth data [EOF]
- copy(data[4:], authData)
- return mc.writePacket(data)
-}
-
-/******************************************************************************
-* Command Packets *
-******************************************************************************/
-
-func (mc *mysqlConn) writeCommandPacket(command byte) error {
- // Reset Packet Sequence
- mc.sequence = 0
-
- data, err := mc.buf.takeSmallBuffer(4 + 1)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
- }
-
- // Add command byte
- data[4] = command
-
- // Send CMD packet
- return mc.writePacket(data)
-}
-
-func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
- // Reset Packet Sequence
- mc.sequence = 0
-
- pktLen := 1 + len(arg)
- data, err := mc.buf.takeBuffer(pktLen + 4)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
- }
-
- // Add command byte
- data[4] = command
-
- // Add arg
- copy(data[5:], arg)
-
- // Send CMD packet
- return mc.writePacket(data)
-}
-
-func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
- // Reset Packet Sequence
- mc.sequence = 0
-
- data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
- }
-
- // Add command byte
- data[4] = command
-
- // Add arg [32 bit]
- data[5] = byte(arg)
- data[6] = byte(arg >> 8)
- data[7] = byte(arg >> 16)
- data[8] = byte(arg >> 24)
-
- // Send CMD packet
- return mc.writePacket(data)
-}
-
-/******************************************************************************
-* Result Packets *
-******************************************************************************/
-
-func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
- data, err := mc.readPacket()
- if err != nil {
- return nil, "", err
- }
-
- // packet indicator
- switch data[0] {
-
- case iOK:
- return nil, "", mc.handleOkPacket(data)
-
- case iAuthMoreData:
- return data[1:], "", err
-
- case iEOF:
- if len(data) == 1 {
- // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
- return nil, "mysql_old_password", nil
- }
- pluginEndIndex := bytes.IndexByte(data, 0x00)
- if pluginEndIndex < 0 {
- return nil, "", ErrMalformPkt
- }
- plugin := string(data[1:pluginEndIndex])
- authData := data[pluginEndIndex+1:]
- return authData, plugin, nil
-
- default: // Error otherwise
- return nil, "", mc.handleErrorPacket(data)
- }
-}
-
-// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() error {
- data, err := mc.readPacket()
- if err != nil {
- return err
- }
-
- if data[0] == iOK {
- return mc.handleOkPacket(data)
- }
- return mc.handleErrorPacket(data)
-}
-
-// Result Set Header Packet
-// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
-func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
- data, err := mc.readPacket()
- if err == nil {
- switch data[0] {
-
- case iOK:
- return 0, mc.handleOkPacket(data)
-
- case iERR:
- return 0, mc.handleErrorPacket(data)
-
- case iLocalInFile:
- return 0, mc.handleInFileRequest(string(data[1:]))
- }
-
- // column count
- num, _, n := readLengthEncodedInteger(data)
- if n-len(data) == 0 {
- return int(num), nil
- }
-
- return 0, ErrMalformPkt
- }
- return 0, err
-}
-
-// Error Packet
-// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
-func (mc *mysqlConn) handleErrorPacket(data []byte) error {
- if data[0] != iERR {
- return ErrMalformPkt
- }
-
- // 0xff [1 byte]
-
- // Error Number [16 bit uint]
- errno := binary.LittleEndian.Uint16(data[1:3])
-
- // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
- // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
- if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
- // Oops; we are connected to a read-only connection, and won't be able
- // to issue any write statements. Since RejectReadOnly is configured,
- // we throw away this connection hoping this one would have write
- // permission. This is specifically for a possible race condition
- // during failover (e.g. on AWS Aurora). See README.md for more.
- //
- // We explicitly close the connection before returning
- // driver.ErrBadConn to ensure that `database/sql` purges this
- // connection and initiates a new one for next statement next time.
- mc.Close()
- return driver.ErrBadConn
- }
-
- pos := 3
-
- // SQL State [optional: # + 5bytes string]
- if data[3] == 0x23 {
- //sqlstate := string(data[4 : 4+5])
- pos = 9
- }
-
- // Error Message [string]
- return &MySQLError{
- Number: errno,
- Message: string(data[pos:]),
- }
-}
-
-func readStatus(b []byte) statusFlag {
- return statusFlag(b[0]) | statusFlag(b[1])<<8
-}
-
-// Ok Packet
-// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
-func (mc *mysqlConn) handleOkPacket(data []byte) error {
- var n, m int
-
- // 0x00 [1 byte]
-
- // Affected rows [Length Coded Binary]
- mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
-
- // Insert id [Length Coded Binary]
- mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
-
- // server_status [2 bytes]
- mc.status = readStatus(data[1+n+m : 1+n+m+2])
- if mc.status&statusMoreResultsExists != 0 {
- return nil
- }
-
- // warning count [2 bytes]
-
- return nil
-}
-
-// Read Packets as Field Packets until EOF-Packet or an Error appears
-// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
-func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
- columns := make([]mysqlField, count)
-
- for i := 0; ; i++ {
- data, err := mc.readPacket()
- if err != nil {
- return nil, err
- }
-
- // EOF Packet
- if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
- if i == count {
- return columns, nil
- }
- return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
- }
-
- // Catalog
- pos, err := skipLengthEncodedString(data)
- if err != nil {
- return nil, err
- }
-
- // Database [len coded string]
- n, err := skipLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
-
- // Table [len coded string]
- if mc.cfg.ColumnsWithAlias {
- tableName, _, n, err := readLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
- columns[i].tableName = string(tableName)
- } else {
- n, err = skipLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
- }
-
- // Original table [len coded string]
- n, err = skipLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
-
- // Name [len coded string]
- name, _, n, err := readLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- columns[i].name = string(name)
- pos += n
-
- // Original name [len coded string]
- n, err = skipLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
-
- // Filler [uint8]
- pos++
-
- // Charset [charset, collation uint8]
- columns[i].charSet = data[pos]
- pos += 2
-
- // Length [uint32]
- columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
- pos += 4
-
- // Field type [uint8]
- columns[i].fieldType = fieldType(data[pos])
- pos++
-
- // Flags [uint16]
- columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
- pos += 2
-
- // Decimals [uint8]
- columns[i].decimals = data[pos]
- //pos++
-
- // Default value [len coded binary]
- //if pos < len(data) {
- // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
- //}
- }
-}
-
-// Read Packets as Field Packets until EOF-Packet or an Error appears
-// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
-func (rows *textRows) readRow(dest []driver.Value) error {
- mc := rows.mc
-
- if rows.rs.done {
- return io.EOF
- }
-
- data, err := mc.readPacket()
- if err != nil {
- return err
- }
-
- // EOF Packet
- if data[0] == iEOF && len(data) == 5 {
- // server_status [2 bytes]
- rows.mc.status = readStatus(data[3:])
- rows.rs.done = true
- if !rows.HasNextResultSet() {
- rows.mc = nil
- }
- return io.EOF
- }
- if data[0] == iERR {
- rows.mc = nil
- return mc.handleErrorPacket(data)
- }
-
- // RowSet Packet
- var n int
- var isNull bool
- pos := 0
-
- for i := range dest {
- // Read bytes and convert to string
- dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
- pos += n
- if err == nil {
- if !isNull {
- if !mc.parseTime {
- continue
- } else {
- switch rows.rs.columns[i].fieldType {
- case fieldTypeTimestamp, fieldTypeDateTime,
- fieldTypeDate, fieldTypeNewDate:
- dest[i], err = parseDateTime(
- string(dest[i].([]byte)),
- mc.cfg.Loc,
- )
- if err == nil {
- continue
- }
- default:
- continue
- }
- }
-
- } else {
- dest[i] = nil
- continue
- }
- }
- return err // err != nil
- }
-
- return nil
-}
-
-// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
-func (mc *mysqlConn) readUntilEOF() error {
- for {
- data, err := mc.readPacket()
- if err != nil {
- return err
- }
-
- switch data[0] {
- case iERR:
- return mc.handleErrorPacket(data)
- case iEOF:
- if len(data) == 5 {
- mc.status = readStatus(data[3:])
- }
- return nil
- }
- }
-}
-
-/******************************************************************************
-* Prepared Statements *
-******************************************************************************/
-
-// Prepare Result Packets
-// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
-func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
- data, err := stmt.mc.readPacket()
- if err == nil {
- // packet indicator [1 byte]
- if data[0] != iOK {
- return 0, stmt.mc.handleErrorPacket(data)
- }
-
- // statement id [4 bytes]
- stmt.id = binary.LittleEndian.Uint32(data[1:5])
-
- // Column count [16 bit uint]
- columnCount := binary.LittleEndian.Uint16(data[5:7])
-
- // Param count [16 bit uint]
- stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
-
- // Reserved [8 bit]
-
- // Warning count [16 bit uint]
-
- return columnCount, nil
- }
- return 0, err
-}
-
-// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
-func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
- maxLen := stmt.mc.maxAllowedPacket - 1
- pktLen := maxLen
-
- // After the header (bytes 0-3) follows before the data:
- // 1 byte command
- // 4 bytes stmtID
- // 2 bytes paramID
- const dataOffset = 1 + 4 + 2
-
- // Cannot use the write buffer since
- // a) the buffer is too small
- // b) it is in use
- data := make([]byte, 4+1+4+2+len(arg))
-
- copy(data[4+dataOffset:], arg)
-
- for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
- if dataOffset+argLen < maxLen {
- pktLen = dataOffset + argLen
- }
-
- stmt.mc.sequence = 0
- // Add command byte [1 byte]
- data[4] = comStmtSendLongData
-
- // Add stmtID [32 bit]
- data[5] = byte(stmt.id)
- data[6] = byte(stmt.id >> 8)
- data[7] = byte(stmt.id >> 16)
- data[8] = byte(stmt.id >> 24)
-
- // Add paramID [16 bit]
- data[9] = byte(paramID)
- data[10] = byte(paramID >> 8)
-
- // Send CMD packet
- err := stmt.mc.writePacket(data[:4+pktLen])
- if err == nil {
- data = data[pktLen-dataOffset:]
- continue
- }
- return err
-
- }
-
- // Reset Packet Sequence
- stmt.mc.sequence = 0
- return nil
-}
-
-// Execute Prepared Statement
-// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
-func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
- if len(args) != stmt.paramCount {
- return fmt.Errorf(
- "argument count mismatch (got: %d; has: %d)",
- len(args),
- stmt.paramCount,
- )
- }
-
- const minPktLen = 4 + 1 + 4 + 1 + 4
- mc := stmt.mc
-
- // Determine threshold dynamically to avoid packet size shortage.
- longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
- if longDataSize < 64 {
- longDataSize = 64
- }
-
- // Reset packet-sequence
- mc.sequence = 0
-
- var data []byte
- var err error
-
- if len(args) == 0 {
- data, err = mc.buf.takeBuffer(minPktLen)
- } else {
- data, err = mc.buf.takeCompleteBuffer()
- // In this case the len(data) == cap(data) which is used to optimise the flow below.
- }
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
- return errBadConnNoWrite
- }
-
- // command [1 byte]
- data[4] = comStmtExecute
-
- // statement_id [4 bytes]
- data[5] = byte(stmt.id)
- data[6] = byte(stmt.id >> 8)
- data[7] = byte(stmt.id >> 16)
- data[8] = byte(stmt.id >> 24)
-
- // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
- data[9] = 0x00
-
- // iteration_count (uint32(1)) [4 bytes]
- data[10] = 0x01
- data[11] = 0x00
- data[12] = 0x00
- data[13] = 0x00
-
- if len(args) > 0 {
- pos := minPktLen
-
- var nullMask []byte
- if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) {
- // buffer has to be extended but we don't know by how much so
- // we depend on append after all data with known sizes fit.
- // We stop at that because we deal with a lot of columns here
- // which makes the required allocation size hard to guess.
- tmp := make([]byte, pos+maskLen+typesLen)
- copy(tmp[:pos], data[:pos])
- data = tmp
- nullMask = data[pos : pos+maskLen]
- // No need to clean nullMask as make ensures that.
- pos += maskLen
- } else {
- nullMask = data[pos : pos+maskLen]
- for i := range nullMask {
- nullMask[i] = 0
- }
- pos += maskLen
- }
-
- // newParameterBoundFlag 1 [1 byte]
- data[pos] = 0x01
- pos++
-
- // type of each parameter [len(args)*2 bytes]
- paramTypes := data[pos:]
- pos += len(args) * 2
-
- // value of each parameter [n bytes]
- paramValues := data[pos:pos]
- valuesCap := cap(paramValues)
-
- for i, arg := range args {
- // build NULL-bitmap
- if arg == nil {
- nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = byte(fieldTypeNULL)
- paramTypes[i+i+1] = 0x00
- continue
- }
-
- // cache types and values
- switch v := arg.(type) {
- case int64:
- paramTypes[i+i] = byte(fieldTypeLongLong)
- paramTypes[i+i+1] = 0x00
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- uint64(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(uint64(v))...,
- )
- }
-
- case uint64:
- paramTypes[i+i] = byte(fieldTypeLongLong)
- paramTypes[i+i+1] = 0x80 // type is unsigned
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- uint64(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(uint64(v))...,
- )
- }
-
- case float64:
- paramTypes[i+i] = byte(fieldTypeDouble)
- paramTypes[i+i+1] = 0x00
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- math.Float64bits(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(math.Float64bits(v))...,
- )
- }
-
- case bool:
- paramTypes[i+i] = byte(fieldTypeTiny)
- paramTypes[i+i+1] = 0x00
-
- if v {
- paramValues = append(paramValues, 0x01)
- } else {
- paramValues = append(paramValues, 0x00)
- }
-
- case []byte:
- // Common case (non-nil value) first
- if v != nil {
- paramTypes[i+i] = byte(fieldTypeString)
- paramTypes[i+i+1] = 0x00
-
- if len(v) < longDataSize {
- paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(v)),
- )
- paramValues = append(paramValues, v...)
- } else {
- if err := stmt.writeCommandLongData(i, v); err != nil {
- return err
- }
- }
- continue
- }
-
- // Handle []byte(nil) as a NULL value
- nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = byte(fieldTypeNULL)
- paramTypes[i+i+1] = 0x00
-
- case string:
- paramTypes[i+i] = byte(fieldTypeString)
- paramTypes[i+i+1] = 0x00
-
- if len(v) < longDataSize {
- paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(v)),
- )
- paramValues = append(paramValues, v...)
- } else {
- if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
- return err
- }
- }
-
- case time.Time:
- paramTypes[i+i] = byte(fieldTypeString)
- paramTypes[i+i+1] = 0x00
-
- var a [64]byte
- var b = a[:0]
-
- if v.IsZero() {
- b = append(b, "0000-00-00"...)
- } else {
- b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
- }
-
- paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(b)),
- )
- paramValues = append(paramValues, b...)
-
- default:
- return fmt.Errorf("cannot convert type: %T", arg)
- }
- }
-
- // Check if param values exceeded the available buffer
- // In that case we must build the data packet with the new values buffer
- if valuesCap != cap(paramValues) {
- data = append(data[:pos], paramValues...)
- if err = mc.buf.store(data); err != nil {
- errLog.Print(err)
- return errBadConnNoWrite
- }
- }
-
- pos += len(paramValues)
- data = data[:pos]
- }
-
- return mc.writePacket(data)
-}
-
-func (mc *mysqlConn) discardResults() error {
- for mc.status&statusMoreResultsExists != 0 {
- resLen, err := mc.readResultSetHeaderPacket()
- if err != nil {
- return err
- }
- if resLen > 0 {
- // columns
- if err := mc.readUntilEOF(); err != nil {
- return err
- }
- // rows
- if err := mc.readUntilEOF(); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
-func (rows *binaryRows) readRow(dest []driver.Value) error {
- data, err := rows.mc.readPacket()
- if err != nil {
- return err
- }
-
- // packet indicator [1 byte]
- if data[0] != iOK {
- // EOF Packet
- if data[0] == iEOF && len(data) == 5 {
- rows.mc.status = readStatus(data[3:])
- rows.rs.done = true
- if !rows.HasNextResultSet() {
- rows.mc = nil
- }
- return io.EOF
- }
- mc := rows.mc
- rows.mc = nil
-
- // Error otherwise
- return mc.handleErrorPacket(data)
- }
-
- // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
- pos := 1 + (len(dest)+7+2)>>3
- nullMask := data[1:pos]
-
- for i := range dest {
- // Field is NULL
- // (byte >> bit-pos) % 2 == 1
- if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
- dest[i] = nil
- continue
- }
-
- // Convert to byte-coded string
- switch rows.rs.columns[i].fieldType {
- case fieldTypeNULL:
- dest[i] = nil
- continue
-
- // Numeric Types
- case fieldTypeTiny:
- if rows.rs.columns[i].flags&flagUnsigned != 0 {
- dest[i] = int64(data[pos])
- } else {
- dest[i] = int64(int8(data[pos]))
- }
- pos++
- continue
-
- case fieldTypeShort, fieldTypeYear:
- if rows.rs.columns[i].flags&flagUnsigned != 0 {
- dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
- } else {
- dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
- }
- pos += 2
- continue
-
- case fieldTypeInt24, fieldTypeLong:
- if rows.rs.columns[i].flags&flagUnsigned != 0 {
- dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
- } else {
- dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
- }
- pos += 4
- continue
-
- case fieldTypeLongLong:
- if rows.rs.columns[i].flags&flagUnsigned != 0 {
- val := binary.LittleEndian.Uint64(data[pos : pos+8])
- if val > math.MaxInt64 {
- dest[i] = uint64ToString(val)
- } else {
- dest[i] = int64(val)
- }
- } else {
- dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
- }
- pos += 8
- continue
-
- case fieldTypeFloat:
- dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
- pos += 4
- continue
-
- case fieldTypeDouble:
- dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
- pos += 8
- continue
-
- // Length coded Binary Strings
- case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
- fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
- fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
- var isNull bool
- var n int
- dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
- pos += n
- if err == nil {
- if !isNull {
- continue
- } else {
- dest[i] = nil
- continue
- }
- }
- return err
-
- case
- fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
- fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
- fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
-
- num, isNull, n := readLengthEncodedInteger(data[pos:])
- pos += n
-
- switch {
- case isNull:
- dest[i] = nil
- continue
- case rows.rs.columns[i].fieldType == fieldTypeTime:
- // database/sql does not support an equivalent to TIME, return a string
- var dstlen uint8
- switch decimals := rows.rs.columns[i].decimals; decimals {
- case 0x00, 0x1f:
- dstlen = 8
- case 1, 2, 3, 4, 5, 6:
- dstlen = 8 + 1 + decimals
- default:
- return fmt.Errorf(
- "protocol error, illegal decimals value %d",
- rows.rs.columns[i].decimals,
- )
- }
- dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
- case rows.mc.parseTime:
- dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
- default:
- var dstlen uint8
- if rows.rs.columns[i].fieldType == fieldTypeDate {
- dstlen = 10
- } else {
- switch decimals := rows.rs.columns[i].decimals; decimals {
- case 0x00, 0x1f:
- dstlen = 19
- case 1, 2, 3, 4, 5, 6:
- dstlen = 19 + 1 + decimals
- default:
- return fmt.Errorf(
- "protocol error, illegal decimals value %d",
- rows.rs.columns[i].decimals,
- )
- }
- }
- dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
- }
-
- if err == nil {
- pos += int(num)
- continue
- } else {
- return err
- }
-
- // Please report if this happens!
- default:
- return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/packets_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/packets_test.go
deleted file mode 100644
index b61e4db..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/packets_test.go
+++ /dev/null
@@ -1,336 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "errors"
- "net"
- "testing"
- "time"
-)
-
-var (
- errConnClosed = errors.New("connection is closed")
- errConnTooManyReads = errors.New("too many reads")
- errConnTooManyWrites = errors.New("too many writes")
-)
-
-// struct to mock a net.Conn for testing purposes
-type mockConn struct {
- laddr net.Addr
- raddr net.Addr
- data []byte
- written []byte
- queuedReplies [][]byte
- closed bool
- read int
- reads int
- writes int
- maxReads int
- maxWrites int
-}
-
-func (m *mockConn) Read(b []byte) (n int, err error) {
- if m.closed {
- return 0, errConnClosed
- }
-
- m.reads++
- if m.maxReads > 0 && m.reads > m.maxReads {
- return 0, errConnTooManyReads
- }
-
- n = copy(b, m.data)
- m.read += n
- m.data = m.data[n:]
- return
-}
-func (m *mockConn) Write(b []byte) (n int, err error) {
- if m.closed {
- return 0, errConnClosed
- }
-
- m.writes++
- if m.maxWrites > 0 && m.writes > m.maxWrites {
- return 0, errConnTooManyWrites
- }
-
- n = len(b)
- m.written = append(m.written, b...)
-
- if n > 0 && len(m.queuedReplies) > 0 {
- m.data = m.queuedReplies[0]
- m.queuedReplies = m.queuedReplies[1:]
- }
- return
-}
-func (m *mockConn) Close() error {
- m.closed = true
- return nil
-}
-func (m *mockConn) LocalAddr() net.Addr {
- return m.laddr
-}
-func (m *mockConn) RemoteAddr() net.Addr {
- return m.raddr
-}
-func (m *mockConn) SetDeadline(t time.Time) error {
- return nil
-}
-func (m *mockConn) SetReadDeadline(t time.Time) error {
- return nil
-}
-func (m *mockConn) SetWriteDeadline(t time.Time) error {
- return nil
-}
-
-// make sure mockConn implements the net.Conn interface
-var _ net.Conn = new(mockConn)
-
-func newRWMockConn(sequence uint8) (*mockConn, *mysqlConn) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- cfg: NewConfig(),
- netConn: conn,
- closech: make(chan struct{}),
- maxAllowedPacket: defaultMaxAllowedPacket,
- sequence: sequence,
- }
- return conn, mc
-}
-
-func TestReadPacketSingleByte(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- }
-
- conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
- conn.maxReads = 1
- packet, err := mc.readPacket()
- if err != nil {
- t.Fatal(err)
- }
- if len(packet) != 1 {
- t.Fatalf("unexpected packet length: expected %d, got %d", 1, len(packet))
- }
- if packet[0] != 0xff {
- t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0])
- }
-}
-
-func TestReadPacketWrongSequenceID(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- }
-
- // too low sequence id
- conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
- conn.maxReads = 1
- mc.sequence = 1
- _, err := mc.readPacket()
- if err != ErrPktSync {
- t.Errorf("expected ErrPktSync, got %v", err)
- }
-
- // reset
- conn.reads = 0
- mc.sequence = 0
- mc.buf = newBuffer(conn)
-
- // too high sequence id
- conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff}
- _, err = mc.readPacket()
- if err != ErrPktSyncMul {
- t.Errorf("expected ErrPktSyncMul, got %v", err)
- }
-}
-
-func TestReadPacketSplit(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- }
-
- data := make([]byte, maxPacketSize*2+4*3)
- const pkt2ofs = maxPacketSize + 4
- const pkt3ofs = 2 * (maxPacketSize + 4)
-
- // case 1: payload has length maxPacketSize
- data = data[:pkt2ofs+4]
-
- // 1st packet has maxPacketSize length and sequence id 0
- // ff ff ff 00 ...
- data[0] = 0xff
- data[1] = 0xff
- data[2] = 0xff
-
- // mark the payload start and end of 1st packet so that we can check if the
- // content was correctly appended
- data[4] = 0x11
- data[maxPacketSize+3] = 0x22
-
- // 2nd packet has payload length 0 and squence id 1
- // 00 00 00 01
- data[pkt2ofs+3] = 0x01
-
- conn.data = data
- conn.maxReads = 3
- packet, err := mc.readPacket()
- if err != nil {
- t.Fatal(err)
- }
- if len(packet) != maxPacketSize {
- t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize, len(packet))
- }
- if packet[0] != 0x11 {
- t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
- }
- if packet[maxPacketSize-1] != 0x22 {
- t.Fatalf("unexpected payload end: expected %x, got %x", 0x22, packet[maxPacketSize-1])
- }
-
- // case 2: payload has length which is a multiple of maxPacketSize
- data = data[:cap(data)]
-
- // 2nd packet now has maxPacketSize length
- data[pkt2ofs] = 0xff
- data[pkt2ofs+1] = 0xff
- data[pkt2ofs+2] = 0xff
-
- // mark the payload start and end of the 2nd packet
- data[pkt2ofs+4] = 0x33
- data[pkt2ofs+maxPacketSize+3] = 0x44
-
- // 3rd packet has payload length 0 and squence id 2
- // 00 00 00 02
- data[pkt3ofs+3] = 0x02
-
- conn.data = data
- conn.reads = 0
- conn.maxReads = 5
- mc.sequence = 0
- packet, err = mc.readPacket()
- if err != nil {
- t.Fatal(err)
- }
- if len(packet) != 2*maxPacketSize {
- t.Fatalf("unexpected packet length: expected %d, got %d", 2*maxPacketSize, len(packet))
- }
- if packet[0] != 0x11 {
- t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
- }
- if packet[2*maxPacketSize-1] != 0x44 {
- t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[2*maxPacketSize-1])
- }
-
- // case 3: payload has a length larger maxPacketSize, which is not an exact
- // multiple of it
- data = data[:pkt2ofs+4+42]
- data[pkt2ofs] = 0x2a
- data[pkt2ofs+1] = 0x00
- data[pkt2ofs+2] = 0x00
- data[pkt2ofs+4+41] = 0x44
-
- conn.data = data
- conn.reads = 0
- conn.maxReads = 4
- mc.sequence = 0
- packet, err = mc.readPacket()
- if err != nil {
- t.Fatal(err)
- }
- if len(packet) != maxPacketSize+42 {
- t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize+42, len(packet))
- }
- if packet[0] != 0x11 {
- t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
- }
- if packet[maxPacketSize+41] != 0x44 {
- t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[maxPacketSize+41])
- }
-}
-
-func TestReadPacketFail(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- closech: make(chan struct{}),
- }
-
- // illegal empty (stand-alone) packet
- conn.data = []byte{0x00, 0x00, 0x00, 0x00}
- conn.maxReads = 1
- _, err := mc.readPacket()
- if err != ErrInvalidConn {
- t.Errorf("expected ErrInvalidConn, got %v", err)
- }
-
- // reset
- conn.reads = 0
- mc.sequence = 0
- mc.buf = newBuffer(conn)
-
- // fail to read header
- conn.closed = true
- _, err = mc.readPacket()
- if err != ErrInvalidConn {
- t.Errorf("expected ErrInvalidConn, got %v", err)
- }
-
- // reset
- conn.closed = false
- conn.reads = 0
- mc.sequence = 0
- mc.buf = newBuffer(conn)
-
- // fail to read body
- conn.maxReads = 1
- _, err = mc.readPacket()
- if err != ErrInvalidConn {
- t.Errorf("expected ErrInvalidConn, got %v", err)
- }
-}
-
-// https://github.com/go-sql-driver/mysql/pull/801
-// not-NUL terminated plugin_name in init packet
-func TestRegression801(t *testing.T) {
- conn := new(mockConn)
- mc := &mysqlConn{
- buf: newBuffer(conn),
- cfg: new(Config),
- sequence: 42,
- closech: make(chan struct{}),
- }
-
- conn.data = []byte{72, 0, 0, 42, 10, 53, 46, 53, 46, 56, 0, 165, 0, 0, 0,
- 60, 70, 63, 58, 68, 104, 34, 97, 0, 223, 247, 33, 2, 0, 15, 128, 21, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 98, 120, 114, 47, 85, 75, 109, 99, 51, 77,
- 50, 64, 0, 109, 121, 115, 113, 108, 95, 110, 97, 116, 105, 118, 101, 95,
- 112, 97, 115, 115, 119, 111, 114, 100}
- conn.maxReads = 1
-
- authData, pluginName, err := mc.readHandshakePacket()
- if err != nil {
- t.Fatalf("got error: %v", err)
- }
-
- if pluginName != "mysql_native_password" {
- t.Errorf("expected plugin name 'mysql_native_password', got '%s'", pluginName)
- }
-
- expectedAuthData := []byte{60, 70, 63, 58, 68, 104, 34, 97, 98, 120, 114,
- 47, 85, 75, 109, 99, 51, 77, 50, 64}
- if !bytes.Equal(authData, expectedAuthData) {
- t.Errorf("expected authData '%v', got '%v'", expectedAuthData, authData)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/result.go
deleted file mode 100644
index c6438d0..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/result.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-type mysqlResult struct {
- affectedRows int64
- insertId int64
-}
-
-func (res *mysqlResult) LastInsertId() (int64, error) {
- return res.insertId, nil
-}
-
-func (res *mysqlResult) RowsAffected() (int64, error) {
- return res.affectedRows, nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/rows.go
deleted file mode 100644
index 888bdb5..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/rows.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "database/sql/driver"
- "io"
- "math"
- "reflect"
-)
-
-type resultSet struct {
- columns []mysqlField
- columnNames []string
- done bool
-}
-
-type mysqlRows struct {
- mc *mysqlConn
- rs resultSet
- finish func()
-}
-
-type binaryRows struct {
- mysqlRows
-}
-
-type textRows struct {
- mysqlRows
-}
-
-func (rows *mysqlRows) Columns() []string {
- if rows.rs.columnNames != nil {
- return rows.rs.columnNames
- }
-
- columns := make([]string, len(rows.rs.columns))
- if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
- for i := range columns {
- if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
- columns[i] = tableName + "." + rows.rs.columns[i].name
- } else {
- columns[i] = rows.rs.columns[i].name
- }
- }
- } else {
- for i := range columns {
- columns[i] = rows.rs.columns[i].name
- }
- }
-
- rows.rs.columnNames = columns
- return columns
-}
-
-func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
- return rows.rs.columns[i].typeDatabaseName()
-}
-
-// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
-// return int64(rows.rs.columns[i].length), true
-// }
-
-func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
- return rows.rs.columns[i].flags&flagNotNULL == 0, true
-}
-
-func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
- column := rows.rs.columns[i]
- decimals := int64(column.decimals)
-
- switch column.fieldType {
- case fieldTypeDecimal, fieldTypeNewDecimal:
- if decimals > 0 {
- return int64(column.length) - 2, decimals, true
- }
- return int64(column.length) - 1, decimals, true
- case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
- return decimals, decimals, true
- case fieldTypeFloat, fieldTypeDouble:
- if decimals == 0x1f {
- return math.MaxInt64, math.MaxInt64, true
- }
- return math.MaxInt64, decimals, true
- }
-
- return 0, 0, false
-}
-
-func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
- return rows.rs.columns[i].scanType()
-}
-
-func (rows *mysqlRows) Close() (err error) {
- if f := rows.finish; f != nil {
- f()
- rows.finish = nil
- }
-
- mc := rows.mc
- if mc == nil {
- return nil
- }
- if err := mc.error(); err != nil {
- return err
- }
-
- // flip the buffer for this connection if we need to drain it.
- // note that for a successful query (i.e. one where rows.next()
- // has been called until it returns false), `rows.mc` will be nil
- // by the time the user calls `(*Rows).Close`, so we won't reach this
- // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
- mc.buf.flip()
-
- // Remove unread packets from stream
- if !rows.rs.done {
- err = mc.readUntilEOF()
- }
- if err == nil {
- if err = mc.discardResults(); err != nil {
- return err
- }
- }
-
- rows.mc = nil
- return err
-}
-
-func (rows *mysqlRows) HasNextResultSet() (b bool) {
- if rows.mc == nil {
- return false
- }
- return rows.mc.status&statusMoreResultsExists != 0
-}
-
-func (rows *mysqlRows) nextResultSet() (int, error) {
- if rows.mc == nil {
- return 0, io.EOF
- }
- if err := rows.mc.error(); err != nil {
- return 0, err
- }
-
- // Remove unread packets from stream
- if !rows.rs.done {
- if err := rows.mc.readUntilEOF(); err != nil {
- return 0, err
- }
- rows.rs.done = true
- }
-
- if !rows.HasNextResultSet() {
- rows.mc = nil
- return 0, io.EOF
- }
- rows.rs = resultSet{}
- return rows.mc.readResultSetHeaderPacket()
-}
-
-func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
- for {
- resLen, err := rows.nextResultSet()
- if err != nil {
- return 0, err
- }
-
- if resLen > 0 {
- return resLen, nil
- }
-
- rows.rs.done = true
- }
-}
-
-func (rows *binaryRows) NextResultSet() error {
- resLen, err := rows.nextNotEmptyResultSet()
- if err != nil {
- return err
- }
-
- rows.rs.columns, err = rows.mc.readColumns(resLen)
- return err
-}
-
-func (rows *binaryRows) Next(dest []driver.Value) error {
- if mc := rows.mc; mc != nil {
- if err := mc.error(); err != nil {
- return err
- }
-
- // Fetch next row from stream
- return rows.readRow(dest)
- }
- return io.EOF
-}
-
-func (rows *textRows) NextResultSet() (err error) {
- resLen, err := rows.nextNotEmptyResultSet()
- if err != nil {
- return err
- }
-
- rows.rs.columns, err = rows.mc.readColumns(resLen)
- return err
-}
-
-func (rows *textRows) Next(dest []driver.Value) error {
- if mc := rows.mc; mc != nil {
- if err := mc.error(); err != nil {
- return err
- }
-
- // Fetch next row from stream
- return rows.readRow(dest)
- }
- return io.EOF
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/statement.go
deleted file mode 100644
index f7e3709..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/statement.go
+++ /dev/null
@@ -1,204 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "database/sql/driver"
- "fmt"
- "io"
- "reflect"
-)
-
-type mysqlStmt struct {
- mc *mysqlConn
- id uint32
- paramCount int
-}
-
-func (stmt *mysqlStmt) Close() error {
- if stmt.mc == nil || stmt.mc.closed.IsSet() {
- // driver.Stmt.Close can be called more than once, thus this function
- // has to be idempotent.
- // See also Issue #450 and golang/go#16019.
- //errLog.Print(ErrInvalidConn)
- return driver.ErrBadConn
- }
-
- err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
- stmt.mc = nil
- return err
-}
-
-func (stmt *mysqlStmt) NumInput() int {
- return stmt.paramCount
-}
-
-func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
- return converter{}
-}
-
-func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
- if stmt.mc.closed.IsSet() {
- errLog.Print(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- // Send command
- err := stmt.writeExecutePacket(args)
- if err != nil {
- return nil, stmt.mc.markBadConn(err)
- }
-
- mc := stmt.mc
-
- mc.affectedRows = 0
- mc.insertId = 0
-
- // Read Result
- resLen, err := mc.readResultSetHeaderPacket()
- if err != nil {
- return nil, err
- }
-
- if resLen > 0 {
- // Columns
- if err = mc.readUntilEOF(); err != nil {
- return nil, err
- }
-
- // Rows
- if err := mc.readUntilEOF(); err != nil {
- return nil, err
- }
- }
-
- if err := mc.discardResults(); err != nil {
- return nil, err
- }
-
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
-}
-
-func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
- return stmt.query(args)
-}
-
-func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
- if stmt.mc.closed.IsSet() {
- errLog.Print(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- // Send command
- err := stmt.writeExecutePacket(args)
- if err != nil {
- return nil, stmt.mc.markBadConn(err)
- }
-
- mc := stmt.mc
-
- // Read Result
- resLen, err := mc.readResultSetHeaderPacket()
- if err != nil {
- return nil, err
- }
-
- rows := new(binaryRows)
-
- if resLen > 0 {
- rows.mc = mc
- rows.rs.columns, err = mc.readColumns(resLen)
- } else {
- rows.rs.done = true
-
- switch err := rows.NextResultSet(); err {
- case nil, io.EOF:
- return rows, nil
- default:
- return nil, err
- }
- }
-
- return rows, err
-}
-
-type converter struct{}
-
-// ConvertValue mirrors the reference/default converter in database/sql/driver
-// with _one_ exception. We support uint64 with their high bit and the default
-// implementation does not. This function should be kept in sync with
-// database/sql/driver defaultConverter.ConvertValue() except for that
-// deliberate difference.
-func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
- if driver.IsValue(v) {
- return v, nil
- }
-
- if vr, ok := v.(driver.Valuer); ok {
- sv, err := callValuerValue(vr)
- if err != nil {
- return nil, err
- }
- if !driver.IsValue(sv) {
- return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
- }
- return sv, nil
- }
-
- rv := reflect.ValueOf(v)
- switch rv.Kind() {
- case reflect.Ptr:
- // indirect pointers
- if rv.IsNil() {
- return nil, nil
- } else {
- return c.ConvertValue(rv.Elem().Interface())
- }
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return rv.Int(), nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return rv.Uint(), nil
- case reflect.Float32, reflect.Float64:
- return rv.Float(), nil
- case reflect.Bool:
- return rv.Bool(), nil
- case reflect.Slice:
- ek := rv.Type().Elem().Kind()
- if ek == reflect.Uint8 {
- return rv.Bytes(), nil
- }
- return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
- case reflect.String:
- return rv.String(), nil
- }
- return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
-}
-
-var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
-
-// callValuerValue returns vr.Value(), with one exception:
-// If vr.Value is an auto-generated method on a pointer type and the
-// pointer is nil, it would panic at runtime in the panicwrap
-// method. Treat it like nil instead.
-//
-// This is so people can implement driver.Value on value types and
-// still use nil pointers to those types to mean nil/NULL, just like
-// string/*string.
-//
-// This is an exact copy of the same-named unexported function from the
-// database/sql package.
-func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
- if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
- rv.IsNil() &&
- rv.Type().Elem().Implements(valuerReflectType) {
- return nil, nil
- }
- return vr.Value()
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/statement_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/statement_test.go
deleted file mode 100644
index 4b9914f..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/statement_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "testing"
-)
-
-func TestConvertDerivedString(t *testing.T) {
- type derived string
-
- output, err := converter{}.ConvertValue(derived("value"))
- if err != nil {
- t.Fatal("Derived string type not convertible", err)
- }
-
- if output != "value" {
- t.Fatalf("Derived string type not converted, got %#v %T", output, output)
- }
-}
-
-func TestConvertDerivedByteSlice(t *testing.T) {
- type derived []uint8
-
- output, err := converter{}.ConvertValue(derived("value"))
- if err != nil {
- t.Fatal("Byte slice not convertible", err)
- }
-
- if bytes.Compare(output.([]byte), []byte("value")) != 0 {
- t.Fatalf("Byte slice not converted, got %#v %T", output, output)
- }
-}
-
-func TestConvertDerivedUnsupportedSlice(t *testing.T) {
- type derived []int
-
- _, err := converter{}.ConvertValue(derived{1})
- if err == nil || err.Error() != "unsupported type mysql.derived, a slice of int" {
- t.Fatal("Unexpected error", err)
- }
-}
-
-func TestConvertDerivedBool(t *testing.T) {
- type derived bool
-
- output, err := converter{}.ConvertValue(derived(true))
- if err != nil {
- t.Fatal("Derived bool type not convertible", err)
- }
-
- if output != true {
- t.Fatalf("Derived bool type not converted, got %#v %T", output, output)
- }
-}
-
-func TestConvertPointer(t *testing.T) {
- str := "value"
-
- output, err := converter{}.ConvertValue(&str)
- if err != nil {
- t.Fatal("Pointer type not convertible", err)
- }
-
- if output != "value" {
- t.Fatalf("Pointer type not converted, got %#v %T", output, output)
- }
-}
-
-func TestConvertSignedIntegers(t *testing.T) {
- values := []interface{}{
- int8(-42),
- int16(-42),
- int32(-42),
- int64(-42),
- int(-42),
- }
-
- for _, value := range values {
- output, err := converter{}.ConvertValue(value)
- if err != nil {
- t.Fatalf("%T type not convertible %s", value, err)
- }
-
- if output != int64(-42) {
- t.Fatalf("%T type not converted, got %#v %T", value, output, output)
- }
- }
-}
-
-func TestConvertUnsignedIntegers(t *testing.T) {
- values := []interface{}{
- uint8(42),
- uint16(42),
- uint32(42),
- uint64(42),
- uint(42),
- }
-
- for _, value := range values {
- output, err := converter{}.ConvertValue(value)
- if err != nil {
- t.Fatalf("%T type not convertible %s", value, err)
- }
-
- if output != uint64(42) {
- t.Fatalf("%T type not converted, got %#v %T", value, output, output)
- }
- }
-
- output, err := converter{}.ConvertValue(^uint64(0))
- if err != nil {
- t.Fatal("uint64 high-bit not convertible", err)
- }
-
- if output != ^uint64(0) {
- t.Fatalf("uint64 high-bit converted, got %#v %T", output, output)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/transaction.go
deleted file mode 100644
index 417d727..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/transaction.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-type mysqlTx struct {
- mc *mysqlConn
-}
-
-func (tx *mysqlTx) Commit() (err error) {
- if tx.mc == nil || tx.mc.closed.IsSet() {
- return ErrInvalidConn
- }
- err = tx.mc.exec("COMMIT")
- tx.mc = nil
- return
-}
-
-func (tx *mysqlTx) Rollback() (err error) {
- if tx.mc == nil || tx.mc.closed.IsSet() {
- return ErrInvalidConn
- }
- err = tx.mc.exec("ROLLBACK")
- tx.mc = nil
- return
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/utils.go
deleted file mode 100644
index cfa10e9..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/utils.go
+++ /dev/null
@@ -1,755 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "crypto/tls"
- "database/sql"
- "database/sql/driver"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Registry for custom tls.Configs
-var (
- tlsConfigLock sync.RWMutex
- tlsConfigRegistry map[string]*tls.Config
-)
-
-// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
-// Use the key as a value in the DSN where tls=value.
-//
-// Note: The provided tls.Config is exclusively owned by the driver after
-// registering it.
-//
-// rootCertPool := x509.NewCertPool()
-// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
-// if err != nil {
-// log.Fatal(err)
-// }
-// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
-// log.Fatal("Failed to append PEM.")
-// }
-// clientCert := make([]tls.Certificate, 0, 1)
-// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
-// if err != nil {
-// log.Fatal(err)
-// }
-// clientCert = append(clientCert, certs)
-// mysql.RegisterTLSConfig("custom", &tls.Config{
-// RootCAs: rootCertPool,
-// Certificates: clientCert,
-// })
-// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
-//
-func RegisterTLSConfig(key string, config *tls.Config) error {
- if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
- return fmt.Errorf("key '%s' is reserved", key)
- }
-
- tlsConfigLock.Lock()
- if tlsConfigRegistry == nil {
- tlsConfigRegistry = make(map[string]*tls.Config)
- }
-
- tlsConfigRegistry[key] = config
- tlsConfigLock.Unlock()
- return nil
-}
-
-// DeregisterTLSConfig removes the tls.Config associated with key.
-func DeregisterTLSConfig(key string) {
- tlsConfigLock.Lock()
- if tlsConfigRegistry != nil {
- delete(tlsConfigRegistry, key)
- }
- tlsConfigLock.Unlock()
-}
-
-func getTLSConfigClone(key string) (config *tls.Config) {
- tlsConfigLock.RLock()
- if v, ok := tlsConfigRegistry[key]; ok {
- config = v.Clone()
- }
- tlsConfigLock.RUnlock()
- return
-}
-
-// Returns the bool value of the input.
-// The 2nd return value indicates if the input was a valid bool value
-func readBool(input string) (value bool, valid bool) {
- switch input {
- case "1", "true", "TRUE", "True":
- return true, true
- case "0", "false", "FALSE", "False":
- return false, true
- }
-
- // Not a valid bool value
- return
-}
-
-/******************************************************************************
-* Time related utils *
-******************************************************************************/
-
-// NullTime represents a time.Time that may be NULL.
-// NullTime implements the Scanner interface so
-// it can be used as a scan destination:
-//
-// var nt NullTime
-// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
-// ...
-// if nt.Valid {
-// // use nt.Time
-// } else {
-// // NULL value
-// }
-//
-// This NullTime implementation is not driver-specific
-type NullTime struct {
- Time time.Time
- Valid bool // Valid is true if Time is not NULL
-}
-
-// Scan implements the Scanner interface.
-// The value type must be time.Time or string / []byte (formatted time-string),
-// otherwise Scan fails.
-func (nt *NullTime) Scan(value interface{}) (err error) {
- if value == nil {
- nt.Time, nt.Valid = time.Time{}, false
- return
- }
-
- switch v := value.(type) {
- case time.Time:
- nt.Time, nt.Valid = v, true
- return
- case []byte:
- nt.Time, err = parseDateTime(string(v), time.UTC)
- nt.Valid = (err == nil)
- return
- case string:
- nt.Time, err = parseDateTime(v, time.UTC)
- nt.Valid = (err == nil)
- return
- }
-
- nt.Valid = false
- return fmt.Errorf("Can't convert %T to time.Time", value)
-}
-
-// Value implements the driver Valuer interface.
-func (nt NullTime) Value() (driver.Value, error) {
- if !nt.Valid {
- return nil, nil
- }
- return nt.Time, nil
-}
-
-func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
- base := "0000-00-00 00:00:00.0000000"
- switch len(str) {
- case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
- if str == base[:len(str)] {
- return
- }
- t, err = time.Parse(timeFormat[:len(str)], str)
- default:
- err = fmt.Errorf("invalid time string: %s", str)
- return
- }
-
- // Adjust location
- if err == nil && loc != time.UTC {
- y, mo, d := t.Date()
- h, mi, s := t.Clock()
- t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
- }
-
- return
-}
-
-func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
- switch num {
- case 0:
- return time.Time{}, nil
- case 4:
- return time.Date(
- int(binary.LittleEndian.Uint16(data[:2])), // year
- time.Month(data[2]), // month
- int(data[3]), // day
- 0, 0, 0, 0,
- loc,
- ), nil
- case 7:
- return time.Date(
- int(binary.LittleEndian.Uint16(data[:2])), // year
- time.Month(data[2]), // month
- int(data[3]), // day
- int(data[4]), // hour
- int(data[5]), // minutes
- int(data[6]), // seconds
- 0,
- loc,
- ), nil
- case 11:
- return time.Date(
- int(binary.LittleEndian.Uint16(data[:2])), // year
- time.Month(data[2]), // month
- int(data[3]), // day
- int(data[4]), // hour
- int(data[5]), // minutes
- int(data[6]), // seconds
- int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
- loc,
- ), nil
- }
- return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
-}
-
-// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
-// if the DATE or DATETIME has the zero value.
-// It must never be changed.
-// The current behavior depends on database/sql copying the result.
-var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
-
-const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
-const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
-
-func appendMicrosecs(dst, src []byte, decimals int) []byte {
- if decimals <= 0 {
- return dst
- }
- if len(src) == 0 {
- return append(dst, ".000000"[:decimals+1]...)
- }
-
- microsecs := binary.LittleEndian.Uint32(src[:4])
- p1 := byte(microsecs / 10000)
- microsecs -= 10000 * uint32(p1)
- p2 := byte(microsecs / 100)
- microsecs -= 100 * uint32(p2)
- p3 := byte(microsecs)
-
- switch decimals {
- default:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2], digits01[p2],
- digits10[p3], digits01[p3],
- )
- case 1:
- return append(dst, '.',
- digits10[p1],
- )
- case 2:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- )
- case 3:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2],
- )
- case 4:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2], digits01[p2],
- )
- case 5:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2], digits01[p2],
- digits10[p3],
- )
- }
-}
-
-func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
- // length expects the deterministic length of the zero value,
- // negative time and 100+ hours are automatically added if needed
- if len(src) == 0 {
- return zeroDateTime[:length], nil
- }
- var dst []byte // return value
- var p1, p2, p3 byte // current digit pair
-
- switch length {
- case 10, 19, 21, 22, 23, 24, 25, 26:
- default:
- t := "DATE"
- if length > 10 {
- t += "TIME"
- }
- return nil, fmt.Errorf("illegal %s length %d", t, length)
- }
- switch len(src) {
- case 4, 7, 11:
- default:
- t := "DATE"
- if length > 10 {
- t += "TIME"
- }
- return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
- }
- dst = make([]byte, 0, length)
- // start with the date
- year := binary.LittleEndian.Uint16(src[:2])
- pt := year / 100
- p1 = byte(year - 100*uint16(pt))
- p2, p3 = src[2], src[3]
- dst = append(dst,
- digits10[pt], digits01[pt],
- digits10[p1], digits01[p1], '-',
- digits10[p2], digits01[p2], '-',
- digits10[p3], digits01[p3],
- )
- if length == 10 {
- return dst, nil
- }
- if len(src) == 4 {
- return append(dst, zeroDateTime[10:length]...), nil
- }
- dst = append(dst, ' ')
- p1 = src[4] // hour
- src = src[5:]
-
- // p1 is 2-digit hour, src is after hour
- p2, p3 = src[0], src[1]
- dst = append(dst,
- digits10[p1], digits01[p1], ':',
- digits10[p2], digits01[p2], ':',
- digits10[p3], digits01[p3],
- )
- return appendMicrosecs(dst, src[2:], int(length)-20), nil
-}
-
-func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
- // length expects the deterministic length of the zero value,
- // negative time and 100+ hours are automatically added if needed
- if len(src) == 0 {
- return zeroDateTime[11 : 11+length], nil
- }
- var dst []byte // return value
-
- switch length {
- case
- 8, // time (can be up to 10 when negative and 100+ hours)
- 10, 11, 12, 13, 14, 15: // time with fractional seconds
- default:
- return nil, fmt.Errorf("illegal TIME length %d", length)
- }
- switch len(src) {
- case 8, 12:
- default:
- return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
- }
- // +2 to enable negative time and 100+ hours
- dst = make([]byte, 0, length+2)
- if src[0] == 1 {
- dst = append(dst, '-')
- }
- days := binary.LittleEndian.Uint32(src[1:5])
- hours := int64(days)*24 + int64(src[5])
-
- if hours >= 100 {
- dst = strconv.AppendInt(dst, hours, 10)
- } else {
- dst = append(dst, digits10[hours], digits01[hours])
- }
-
- min, sec := src[6], src[7]
- dst = append(dst, ':',
- digits10[min], digits01[min], ':',
- digits10[sec], digits01[sec],
- )
- return appendMicrosecs(dst, src[8:], int(length)-9), nil
-}
-
-/******************************************************************************
-* Convert from and to bytes *
-******************************************************************************/
-
-func uint64ToBytes(n uint64) []byte {
- return []byte{
- byte(n),
- byte(n >> 8),
- byte(n >> 16),
- byte(n >> 24),
- byte(n >> 32),
- byte(n >> 40),
- byte(n >> 48),
- byte(n >> 56),
- }
-}
-
-func uint64ToString(n uint64) []byte {
- var a [20]byte
- i := 20
-
- // U+0030 = 0
- // ...
- // U+0039 = 9
-
- var q uint64
- for n >= 10 {
- i--
- q = n / 10
- a[i] = uint8(n-q*10) + 0x30
- n = q
- }
-
- i--
- a[i] = uint8(n) + 0x30
-
- return a[i:]
-}
-
-// treats string value as unsigned integer representation
-func stringToInt(b []byte) int {
- val := 0
- for i := range b {
- val *= 10
- val += int(b[i] - 0x30)
- }
- return val
-}
-
-// returns the string read as a bytes slice, wheter the value is NULL,
-// the number of bytes read and an error, in case the string is longer than
-// the input slice
-func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
- // Get length
- num, isNull, n := readLengthEncodedInteger(b)
- if num < 1 {
- return b[n:n], isNull, n, nil
- }
-
- n += int(num)
-
- // Check data length
- if len(b) >= n {
- return b[n-int(num) : n : n], false, n, nil
- }
- return nil, false, n, io.EOF
-}
-
-// returns the number of bytes skipped and an error, in case the string is
-// longer than the input slice
-func skipLengthEncodedString(b []byte) (int, error) {
- // Get length
- num, _, n := readLengthEncodedInteger(b)
- if num < 1 {
- return n, nil
- }
-
- n += int(num)
-
- // Check data length
- if len(b) >= n {
- return n, nil
- }
- return n, io.EOF
-}
-
-// returns the number read, whether the value is NULL and the number of bytes read
-func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
- // See issue #349
- if len(b) == 0 {
- return 0, true, 1
- }
-
- switch b[0] {
- // 251: NULL
- case 0xfb:
- return 0, true, 1
-
- // 252: value of following 2
- case 0xfc:
- return uint64(b[1]) | uint64(b[2])<<8, false, 3
-
- // 253: value of following 3
- case 0xfd:
- return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
-
- // 254: value of following 8
- case 0xfe:
- return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
- uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
- uint64(b[7])<<48 | uint64(b[8])<<56,
- false, 9
- }
-
- // 0-250: value of first byte
- return uint64(b[0]), false, 1
-}
-
-// encodes a uint64 value and appends it to the given bytes slice
-func appendLengthEncodedInteger(b []byte, n uint64) []byte {
- switch {
- case n <= 250:
- return append(b, byte(n))
-
- case n <= 0xffff:
- return append(b, 0xfc, byte(n), byte(n>>8))
-
- case n <= 0xffffff:
- return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
- }
- return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
- byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
-}
-
-// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
-// If cap(buf) is not enough, reallocate new buffer.
-func reserveBuffer(buf []byte, appendSize int) []byte {
- newSize := len(buf) + appendSize
- if cap(buf) < newSize {
- // Grow buffer exponentially
- newBuf := make([]byte, len(buf)*2+appendSize)
- copy(newBuf, buf)
- buf = newBuf
- }
- return buf[:newSize]
-}
-
-// escapeBytesBackslash escapes []byte with backslashes (\)
-// This escapes the contents of a string (provided as []byte) by adding backslashes before special
-// characters, and turning others into specific escape sequences, such as
-// turning newlines into \n and null bytes into \0.
-// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
-func escapeBytesBackslash(buf, v []byte) []byte {
- pos := len(buf)
- buf = reserveBuffer(buf, len(v)*2)
-
- for _, c := range v {
- switch c {
- case '\x00':
- buf[pos] = '\\'
- buf[pos+1] = '0'
- pos += 2
- case '\n':
- buf[pos] = '\\'
- buf[pos+1] = 'n'
- pos += 2
- case '\r':
- buf[pos] = '\\'
- buf[pos+1] = 'r'
- pos += 2
- case '\x1a':
- buf[pos] = '\\'
- buf[pos+1] = 'Z'
- pos += 2
- case '\'':
- buf[pos] = '\\'
- buf[pos+1] = '\''
- pos += 2
- case '"':
- buf[pos] = '\\'
- buf[pos+1] = '"'
- pos += 2
- case '\\':
- buf[pos] = '\\'
- buf[pos+1] = '\\'
- pos += 2
- default:
- buf[pos] = c
- pos++
- }
- }
-
- return buf[:pos]
-}
-
-// escapeStringBackslash is similar to escapeBytesBackslash but for string.
-func escapeStringBackslash(buf []byte, v string) []byte {
- pos := len(buf)
- buf = reserveBuffer(buf, len(v)*2)
-
- for i := 0; i < len(v); i++ {
- c := v[i]
- switch c {
- case '\x00':
- buf[pos] = '\\'
- buf[pos+1] = '0'
- pos += 2
- case '\n':
- buf[pos] = '\\'
- buf[pos+1] = 'n'
- pos += 2
- case '\r':
- buf[pos] = '\\'
- buf[pos+1] = 'r'
- pos += 2
- case '\x1a':
- buf[pos] = '\\'
- buf[pos+1] = 'Z'
- pos += 2
- case '\'':
- buf[pos] = '\\'
- buf[pos+1] = '\''
- pos += 2
- case '"':
- buf[pos] = '\\'
- buf[pos+1] = '"'
- pos += 2
- case '\\':
- buf[pos] = '\\'
- buf[pos+1] = '\\'
- pos += 2
- default:
- buf[pos] = c
- pos++
- }
- }
-
- return buf[:pos]
-}
-
-// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
-// This escapes the contents of a string by doubling up any apostrophes that
-// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
-// effect on the server.
-// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
-func escapeBytesQuotes(buf, v []byte) []byte {
- pos := len(buf)
- buf = reserveBuffer(buf, len(v)*2)
-
- for _, c := range v {
- if c == '\'' {
- buf[pos] = '\''
- buf[pos+1] = '\''
- pos += 2
- } else {
- buf[pos] = c
- pos++
- }
- }
-
- return buf[:pos]
-}
-
-// escapeStringQuotes is similar to escapeBytesQuotes but for string.
-func escapeStringQuotes(buf []byte, v string) []byte {
- pos := len(buf)
- buf = reserveBuffer(buf, len(v)*2)
-
- for i := 0; i < len(v); i++ {
- c := v[i]
- if c == '\'' {
- buf[pos] = '\''
- buf[pos+1] = '\''
- pos += 2
- } else {
- buf[pos] = c
- pos++
- }
- }
-
- return buf[:pos]
-}
-
-/******************************************************************************
-* Sync utils *
-******************************************************************************/
-
-// noCopy may be embedded into structs which must not be copied
-// after the first use.
-//
-// See https://github.com/golang/go/issues/8005#issuecomment-190753527
-// for details.
-type noCopy struct{}
-
-// Lock is a no-op used by -copylocks checker from `go vet`.
-func (*noCopy) Lock() {}
-
-// atomicBool is a wrapper around uint32 for usage as a boolean value with
-// atomic access.
-type atomicBool struct {
- _noCopy noCopy
- value uint32
-}
-
-// IsSet returns whether the current boolean value is true
-func (ab *atomicBool) IsSet() bool {
- return atomic.LoadUint32(&ab.value) > 0
-}
-
-// Set sets the value of the bool regardless of the previous value
-func (ab *atomicBool) Set(value bool) {
- if value {
- atomic.StoreUint32(&ab.value, 1)
- } else {
- atomic.StoreUint32(&ab.value, 0)
- }
-}
-
-// TrySet sets the value of the bool and returns whether the value changed
-func (ab *atomicBool) TrySet(value bool) bool {
- if value {
- return atomic.SwapUint32(&ab.value, 1) == 0
- }
- return atomic.SwapUint32(&ab.value, 0) > 0
-}
-
-// atomicError is a wrapper for atomically accessed error values
-type atomicError struct {
- _noCopy noCopy
- value atomic.Value
-}
-
-// Set sets the error value regardless of the previous value.
-// The value must not be nil
-func (ae *atomicError) Set(value error) {
- ae.value.Store(value)
-}
-
-// Value returns the current error value
-func (ae *atomicError) Value() error {
- if v := ae.value.Load(); v != nil {
- // this will panic if the value doesn't implement the error interface
- return v.(error)
- }
- return nil
-}
-
-func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
- dargs := make([]driver.Value, len(named))
- for n, param := range named {
- if len(param.Name) > 0 {
- // TODO: support the use of Named Parameters #561
- return nil, errors.New("mysql: driver does not support the use of Named Parameters")
- }
- dargs[n] = param.Value
- }
- return dargs, nil
-}
-
-func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
- switch sql.IsolationLevel(level) {
- case sql.LevelRepeatableRead:
- return "REPEATABLE READ", nil
- case sql.LevelReadCommitted:
- return "READ COMMITTED", nil
- case sql.LevelReadUncommitted:
- return "READ UNCOMMITTED", nil
- case sql.LevelSerializable:
- return "SERIALIZABLE", nil
- default:
- return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/utils_test.go
deleted file mode 100644
index 8951a7a..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/go-sql-driver/mysql/utils_test.go
+++ /dev/null
@@ -1,334 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "database/sql"
- "database/sql/driver"
- "encoding/binary"
- "testing"
- "time"
-)
-
-func TestScanNullTime(t *testing.T) {
- var scanTests = []struct {
- in interface{}
- error bool
- valid bool
- time time.Time
- }{
- {tDate, false, true, tDate},
- {sDate, false, true, tDate},
- {[]byte(sDate), false, true, tDate},
- {tDateTime, false, true, tDateTime},
- {sDateTime, false, true, tDateTime},
- {[]byte(sDateTime), false, true, tDateTime},
- {tDate0, false, true, tDate0},
- {sDate0, false, true, tDate0},
- {[]byte(sDate0), false, true, tDate0},
- {sDateTime0, false, true, tDate0},
- {[]byte(sDateTime0), false, true, tDate0},
- {"", true, false, tDate0},
- {"1234", true, false, tDate0},
- {0, true, false, tDate0},
- }
-
- var nt = NullTime{}
- var err error
-
- for _, tst := range scanTests {
- err = nt.Scan(tst.in)
- if (err != nil) != tst.error {
- t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
- }
- if nt.Valid != tst.valid {
- t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
- }
- if nt.Time != tst.time {
- t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
- }
- }
-}
-
-func TestLengthEncodedInteger(t *testing.T) {
- var integerTests = []struct {
- num uint64
- encoded []byte
- }{
- {0x0000000000000000, []byte{0x00}},
- {0x0000000000000012, []byte{0x12}},
- {0x00000000000000fa, []byte{0xfa}},
- {0x0000000000000100, []byte{0xfc, 0x00, 0x01}},
- {0x0000000000001234, []byte{0xfc, 0x34, 0x12}},
- {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}},
- {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}},
- {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}},
- {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}},
- {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}},
- {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}},
- {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
- }
-
- for _, tst := range integerTests {
- num, isNull, numLen := readLengthEncodedInteger(tst.encoded)
- if isNull {
- t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num)
- }
- if num != tst.num {
- t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num)
- }
- if numLen != len(tst.encoded) {
- t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen)
- }
- encoded := appendLengthEncodedInteger(nil, num)
- if !bytes.Equal(encoded, tst.encoded) {
- t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded)
- }
- }
-}
-
-func TestFormatBinaryDateTime(t *testing.T) {
- rawDate := [11]byte{}
- binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years
- rawDate[2] = 12 // months
- rawDate[3] = 30 // days
- rawDate[4] = 15 // hours
- rawDate[5] = 46 // minutes
- rawDate[6] = 23 // seconds
- binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
- expect := func(expected string, inlen, outlen uint8) {
- actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen)
- bytes, ok := actual.([]byte)
- if !ok {
- t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
- }
- if string(bytes) != expected {
- t.Errorf(
- "expected %q, got %q for length in %d, out %d",
- expected, actual, inlen, outlen,
- )
- }
- }
- expect("0000-00-00", 0, 10)
- expect("0000-00-00 00:00:00", 0, 19)
- expect("1978-12-30", 4, 10)
- expect("1978-12-30 15:46:23", 7, 19)
- expect("1978-12-30 15:46:23.987654", 11, 26)
-}
-
-func TestFormatBinaryTime(t *testing.T) {
- expect := func(expected string, src []byte, outlen uint8) {
- actual, _ := formatBinaryTime(src, outlen)
- bytes, ok := actual.([]byte)
- if !ok {
- t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
- }
- if string(bytes) != expected {
- t.Errorf(
- "expected %q, got %q for src=%q and outlen=%d",
- expected, actual, src, outlen)
- }
- }
-
- // binary format:
- // sign (0: positive, 1: negative), days(4), hours, minutes, seconds, micro(4)
-
- // Zeros
- expect("00:00:00", []byte{}, 8)
- expect("00:00:00.0", []byte{}, 10)
- expect("00:00:00.000000", []byte{}, 15)
-
- // Without micro(4)
- expect("12:34:56", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 8)
- expect("-12:34:56", []byte{1, 0, 0, 0, 0, 12, 34, 56}, 8)
- expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 11)
- expect("24:34:56", []byte{0, 1, 0, 0, 0, 0, 34, 56}, 8)
- expect("-99:34:56", []byte{1, 4, 0, 0, 0, 3, 34, 56}, 8)
- expect("103079215103:34:56", []byte{0, 255, 255, 255, 255, 23, 34, 56}, 8)
-
- // With micro(4)
- expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 11)
- expect("12:34:56.000099", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 15)
-}
-
-func TestEscapeBackslash(t *testing.T) {
- expect := func(expected, value string) {
- actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
- if actual != expected {
- t.Errorf(
- "expected %s, got %s",
- expected, actual,
- )
- }
-
- actual = string(escapeStringBackslash([]byte{}, value))
- if actual != expected {
- t.Errorf(
- "expected %s, got %s",
- expected, actual,
- )
- }
- }
-
- expect("foo\\0bar", "foo\x00bar")
- expect("foo\\nbar", "foo\nbar")
- expect("foo\\rbar", "foo\rbar")
- expect("foo\\Zbar", "foo\x1abar")
- expect("foo\\\"bar", "foo\"bar")
- expect("foo\\\\bar", "foo\\bar")
- expect("foo\\'bar", "foo'bar")
-}
-
-func TestEscapeQuotes(t *testing.T) {
- expect := func(expected, value string) {
- actual := string(escapeBytesQuotes([]byte{}, []byte(value)))
- if actual != expected {
- t.Errorf(
- "expected %s, got %s",
- expected, actual,
- )
- }
-
- actual = string(escapeStringQuotes([]byte{}, value))
- if actual != expected {
- t.Errorf(
- "expected %s, got %s",
- expected, actual,
- )
- }
- }
-
- expect("foo\x00bar", "foo\x00bar") // not affected
- expect("foo\nbar", "foo\nbar") // not affected
- expect("foo\rbar", "foo\rbar") // not affected
- expect("foo\x1abar", "foo\x1abar") // not affected
- expect("foo''bar", "foo'bar") // affected
- expect("foo\"bar", "foo\"bar") // not affected
-}
-
-func TestAtomicBool(t *testing.T) {
- var ab atomicBool
- if ab.IsSet() {
- t.Fatal("Expected value to be false")
- }
-
- ab.Set(true)
- if ab.value != 1 {
- t.Fatal("Set(true) did not set value to 1")
- }
- if !ab.IsSet() {
- t.Fatal("Expected value to be true")
- }
-
- ab.Set(true)
- if !ab.IsSet() {
- t.Fatal("Expected value to be true")
- }
-
- ab.Set(false)
- if ab.value != 0 {
- t.Fatal("Set(false) did not set value to 0")
- }
- if ab.IsSet() {
- t.Fatal("Expected value to be false")
- }
-
- ab.Set(false)
- if ab.IsSet() {
- t.Fatal("Expected value to be false")
- }
- if ab.TrySet(false) {
- t.Fatal("Expected TrySet(false) to fail")
- }
- if !ab.TrySet(true) {
- t.Fatal("Expected TrySet(true) to succeed")
- }
- if !ab.IsSet() {
- t.Fatal("Expected value to be true")
- }
-
- ab.Set(true)
- if !ab.IsSet() {
- t.Fatal("Expected value to be true")
- }
- if ab.TrySet(true) {
- t.Fatal("Expected TrySet(true) to fail")
- }
- if !ab.TrySet(false) {
- t.Fatal("Expected TrySet(false) to succeed")
- }
- if ab.IsSet() {
- t.Fatal("Expected value to be false")
- }
-
- ab._noCopy.Lock() // we've "tested" it ¯\_(ツ)_/¯
-}
-
-func TestAtomicError(t *testing.T) {
- var ae atomicError
- if ae.Value() != nil {
- t.Fatal("Expected value to be nil")
- }
-
- ae.Set(ErrMalformPkt)
- if v := ae.Value(); v != ErrMalformPkt {
- if v == nil {
- t.Fatal("Value is still nil")
- }
- t.Fatal("Error did not match")
- }
- ae.Set(ErrPktSync)
- if ae.Value() == ErrMalformPkt {
- t.Fatal("Error still matches old error")
- }
- if v := ae.Value(); v != ErrPktSync {
- t.Fatal("Error did not match")
- }
-}
-
-func TestIsolationLevelMapping(t *testing.T) {
- data := []struct {
- level driver.IsolationLevel
- expected string
- }{
- {
- level: driver.IsolationLevel(sql.LevelReadCommitted),
- expected: "READ COMMITTED",
- },
- {
- level: driver.IsolationLevel(sql.LevelRepeatableRead),
- expected: "REPEATABLE READ",
- },
- {
- level: driver.IsolationLevel(sql.LevelReadUncommitted),
- expected: "READ UNCOMMITTED",
- },
- {
- level: driver.IsolationLevel(sql.LevelSerializable),
- expected: "SERIALIZABLE",
- },
- }
-
- for i, td := range data {
- if actual, err := mapIsolationLevel(td.level); actual != td.expected || err != nil {
- t.Fatal(i, td.expected, actual, err)
- }
- }
-
- // check unsupported mapping
- expectedErr := "mysql: unsupported isolation level: 7"
- actual, err := mapIsolationLevel(driver.IsolationLevel(sql.LevelLinearizable))
- if actual != "" || err == nil {
- t.Fatal("Expected error on unsupported isolation level")
- }
- if err.Error() != expectedErr {
- t.Fatalf("Expected error to be %q, got %q", expectedErr, err)
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/LICENSE
deleted file mode 100644
index 0d31edf..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
- Copyright (c) 2013, Jason Moiron
-
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation
- files (the "Software"), to deal in the Software without
- restriction, including without limitation the rights to use,
- copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following
- conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- OTHER DEALINGS IN THE SOFTWARE.
-
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/bind.go
deleted file mode 100644
index 0fdc443..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/bind.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package sqlx
-
-import (
- "bytes"
- "errors"
- "reflect"
- "strconv"
- "strings"
-
- "github.com/jmoiron/sqlx/reflectx"
-)
-
-// Bindvar types supported by Rebind, BindMap and BindStruct.
-const (
- UNKNOWN = iota
- QUESTION
- DOLLAR
- NAMED
-)
-
-// BindType returns the bindtype for a given database given a drivername.
-func BindType(driverName string) int {
- switch driverName {
- case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres":
- return DOLLAR
- case "mysql":
- return QUESTION
- case "sqlite3":
- return QUESTION
- case "oci8", "ora", "goracle":
- return NAMED
- }
- return UNKNOWN
-}
-
-// FIXME: this should be able to be tolerant of escaped ?'s in queries without
-// losing much speed, and should be to avoid confusion.
-
-// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
-func Rebind(bindType int, query string) string {
- switch bindType {
- case QUESTION, UNKNOWN:
- return query
- }
-
- // Add space enough for 10 params before we have to allocate
- rqb := make([]byte, 0, len(query)+10)
-
- var i, j int
-
- for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") {
- rqb = append(rqb, query[:i]...)
-
- switch bindType {
- case DOLLAR:
- rqb = append(rqb, '$')
- case NAMED:
- rqb = append(rqb, ':', 'a', 'r', 'g')
- }
-
- j++
- rqb = strconv.AppendInt(rqb, int64(j), 10)
-
- query = query[i+1:]
- }
-
- return string(append(rqb, query...))
-}
-
-// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
-// much simpler and should be more resistant to odd unicode, but it is twice as
-// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
-// problems arise with its somewhat naive handling of unicode.
-func rebindBuff(bindType int, query string) string {
- if bindType != DOLLAR {
- return query
- }
-
- b := make([]byte, 0, len(query))
- rqb := bytes.NewBuffer(b)
- j := 1
- for _, r := range query {
- if r == '?' {
- rqb.WriteRune('$')
- rqb.WriteString(strconv.Itoa(j))
- j++
- } else {
- rqb.WriteRune(r)
- }
- }
-
- return rqb.String()
-}
-
-// In expands slice values in args, returning the modified query string
-// and a new arg list that can be executed by a database. The `query` should
-// use the `?` bindVar. The return value uses the `?` bindVar.
-func In(query string, args ...interface{}) (string, []interface{}, error) {
- // argMeta stores reflect.Value and length for slices and
- // the value itself for non-slice arguments
- type argMeta struct {
- v reflect.Value
- i interface{}
- length int
- }
-
- var flatArgsCount int
- var anySlices bool
-
- meta := make([]argMeta, len(args))
-
- for i, arg := range args {
- v := reflect.ValueOf(arg)
- t := reflectx.Deref(v.Type())
-
- // []byte is a driver.Value type so it should not be expanded
- if t.Kind() == reflect.Slice && t != reflect.TypeOf([]byte{}) {
- meta[i].length = v.Len()
- meta[i].v = v
-
- anySlices = true
- flatArgsCount += meta[i].length
-
- if meta[i].length == 0 {
- return "", nil, errors.New("empty slice passed to 'in' query")
- }
- } else {
- meta[i].i = arg
- flatArgsCount++
- }
- }
-
- // don't do any parsing if there aren't any slices; note that this means
- // some errors that we might have caught below will not be returned.
- if !anySlices {
- return query, args, nil
- }
-
- newArgs := make([]interface{}, 0, flatArgsCount)
- buf := bytes.NewBuffer(make([]byte, 0, len(query)+len(", ?")*flatArgsCount))
-
- var arg, offset int
-
- for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
- if arg >= len(meta) {
- // if an argument wasn't passed, lets return an error; this is
- // not actually how database/sql Exec/Query works, but since we are
- // creating an argument list programmatically, we want to be able
- // to catch these programmer errors earlier.
- return "", nil, errors.New("number of bindVars exceeds arguments")
- }
-
- argMeta := meta[arg]
- arg++
-
- // not a slice, continue.
- // our questionmark will either be written before the next expansion
- // of a slice or after the loop when writing the rest of the query
- if argMeta.length == 0 {
- offset = offset + i + 1
- newArgs = append(newArgs, argMeta.i)
- continue
- }
-
- // write everything up to and including our ? character
- buf.WriteString(query[:offset+i+1])
-
- for si := 1; si < argMeta.length; si++ {
- buf.WriteString(", ?")
- }
-
- newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length)
-
- // slice the query and reset the offset. this avoids some bookkeeping for
- // the write after the loop
- query = query[offset+i+1:]
- offset = 0
- }
-
- buf.WriteString(query)
-
- if arg < len(meta) {
- return "", nil, errors.New("number of bindVars less than number arguments")
- }
-
- return buf.String(), newArgs, nil
-}
-
-func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} {
- switch val := v.Interface().(type) {
- case []interface{}:
- args = append(args, val...)
- case []int:
- for i := range val {
- args = append(args, val[i])
- }
- case []string:
- for i := range val {
- args = append(args, val[i])
- }
- default:
- for si := 0; si < vlen; si++ {
- args = append(args, v.Index(si).Interface())
- }
- }
-
- return args
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/doc.go
deleted file mode 100644
index e2b4e60..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/doc.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Package sqlx provides general purpose extensions to database/sql.
-//
-// It is intended to seamlessly wrap database/sql and provide convenience
-// methods which are useful in the development of database driven applications.
-// None of the underlying database/sql methods are changed. Instead all extended
-// behavior is implemented through new methods defined on wrapper types.
-//
-// Additions include scanning into structs, named query support, rebinding
-// queries for different drivers, convenient shorthands for common error handling
-// and more.
-//
-package sqlx
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/named.go
deleted file mode 100644
index 69eb954..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/named.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package sqlx
-
-// Named Query Support
-//
-// * BindMap - bind query bindvars to map/struct args
-// * NamedExec, NamedQuery - named query w/ struct or map
-// * NamedStmt - a pre-compiled named query which is a prepared statement
-//
-// Internal Interfaces:
-//
-// * compileNamedQuery - rebind a named query, returning a query and list of names
-// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
-//
-import (
- "database/sql"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "unicode"
-
- "github.com/jmoiron/sqlx/reflectx"
-)
-
-// NamedStmt is a prepared statement that executes named queries. Prepare it
-// how you would execute a NamedQuery, but pass in a struct or map when executing.
-type NamedStmt struct {
- Params []string
- QueryString string
- Stmt *Stmt
-}
-
-// Close closes the named statement.
-func (n *NamedStmt) Close() error {
- return n.Stmt.Close()
-}
-
-// Exec executes a named statement using the struct passed.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return *new(sql.Result), err
- }
- return n.Stmt.Exec(args...)
-}
-
-// Query executes a named statement using the struct argument, returning rows.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return nil, err
- }
- return n.Stmt.Query(args...)
-}
-
-// QueryRow executes a named statement against the database. Because sqlx cannot
-// create a *sql.Row with an error condition pre-set for binding errors, sqlx
-// returns a *sqlx.Row instead.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryRow(arg interface{}) *Row {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return &Row{err: err}
- }
- return n.Stmt.QueryRowx(args...)
-}
-
-// MustExec execs a NamedStmt, panicing on error
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
- res, err := n.Exec(arg)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// Queryx using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
- r, err := n.Query(arg)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
-}
-
-// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
-// an alias for QueryRow.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
- return n.QueryRow(arg)
-}
-
-// Select using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
- rows, err := n.Queryx(arg)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// Get using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
- r := n.QueryRowx(arg)
- return r.scanAny(dest, false)
-}
-
-// Unsafe creates an unsafe version of the NamedStmt
-func (n *NamedStmt) Unsafe() *NamedStmt {
- r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString}
- r.Stmt.unsafe = true
- return r
-}
-
-// A union interface of preparer and binder, required to be able to prepare
-// named statements (as the bindtype must be determined).
-type namedPreparer interface {
- Preparer
- binder
-}
-
-func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
- bindType := BindType(p.DriverName())
- q, args, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return nil, err
- }
- stmt, err := Preparex(p, q)
- if err != nil {
- return nil, err
- }
- return &NamedStmt{
- QueryString: q,
- Params: args,
- Stmt: stmt,
- }, nil
-}
-
-func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
- if maparg, ok := arg.(map[string]interface{}); ok {
- return bindMapArgs(names, maparg)
- }
- return bindArgs(names, arg, m)
-}
-
-// private interface to generate a list of interfaces from a given struct
-// type, given a list of names to pull out of the struct. Used by public
-// BindStruct interface.
-func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
- arglist := make([]interface{}, 0, len(names))
-
- // grab the indirected value of arg
- v := reflect.ValueOf(arg)
- for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
- v = v.Elem()
- }
-
- err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error {
- if len(t) == 0 {
- return fmt.Errorf("could not find name %s in %#v", names[i], arg)
- }
-
- val := reflectx.FieldByIndexesReadOnly(v, t)
- arglist = append(arglist, val.Interface())
-
- return nil
- })
-
- return arglist, err
-}
-
-// like bindArgs, but for maps.
-func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
- arglist := make([]interface{}, 0, len(names))
-
- for _, name := range names {
- val, ok := arg[name]
- if !ok {
- return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
- }
- arglist = append(arglist, val)
- }
- return arglist, nil
-}
-
-// bindStruct binds a named parameter query with fields from a struct argument.
-// The rules for binding field names to parameter names follow the same
-// conventions as for StructScan, including obeying the `db` struct tags.
-func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
- bound, names, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return "", []interface{}{}, err
- }
-
- arglist, err := bindArgs(names, arg, m)
- if err != nil {
- return "", []interface{}{}, err
- }
-
- return bound, arglist, nil
-}
-
-// bindMap binds a named parameter query with a map of arguments.
-func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
- bound, names, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return "", []interface{}{}, err
- }
-
- arglist, err := bindMapArgs(names, args)
- return bound, arglist, err
-}
-
-// -- Compilation of Named Queries
-
-// Allow digits and letters in bind params; additionally runes are
-// checked against underscores, meaning that bind params can have be
-// alphanumeric with underscores. Mind the difference between unicode
-// digits and numbers, where '5' is a digit but '五' is not.
-var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
-
-// FIXME: this function isn't safe for unicode named params, as a failing test
-// can testify. This is not a regression but a failure of the original code
-// as well. It should be modified to range over runes in a string rather than
-// bytes, even though this is less convenient and slower. Hopefully the
-// addition of the prepared NamedStmt (which will only do this once) will make
-// up for the slightly slower ad-hoc NamedExec/NamedQuery.
-
-// compile a NamedQuery into an unbound query (using the '?' bindvar) and
-// a list of names.
-func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
- names = make([]string, 0, 10)
- rebound := make([]byte, 0, len(qs))
-
- inName := false
- last := len(qs) - 1
- currentVar := 1
- name := make([]byte, 0, 10)
-
- for i, b := range qs {
- // a ':' while we're in a name is an error
- if b == ':' {
- // if this is the second ':' in a '::' escape sequence, append a ':'
- if inName && i > 0 && qs[i-1] == ':' {
- rebound = append(rebound, ':')
- inName = false
- continue
- } else if inName {
- err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
- return query, names, err
- }
- inName = true
- name = []byte{}
- // if we're in a name, and this is an allowed character, continue
- } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last {
- // append the byte to the name if we are in a name and not on the last byte
- name = append(name, b)
- // if we're in a name and it's not an allowed character, the name is done
- } else if inName {
- inName = false
- // if this is the final byte of the string and it is part of the name, then
- // make sure to add it to the name
- if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
- name = append(name, b)
- }
- // add the string representation to the names list
- names = append(names, string(name))
- // add a proper bindvar for the bindType
- switch bindType {
- // oracle only supports named type bind vars even for positional
- case NAMED:
- rebound = append(rebound, ':')
- rebound = append(rebound, name...)
- case QUESTION, UNKNOWN:
- rebound = append(rebound, '?')
- case DOLLAR:
- rebound = append(rebound, '$')
- for _, b := range strconv.Itoa(currentVar) {
- rebound = append(rebound, byte(b))
- }
- currentVar++
- }
- // add this byte to string unless it was not part of the name
- if i != last {
- rebound = append(rebound, b)
- } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
- rebound = append(rebound, b)
- }
- } else {
- // this is a normal byte and should just go onto the rebound query
- rebound = append(rebound, b)
- }
- }
-
- return string(rebound), names, err
-}
-
-// BindNamed binds a struct or a map to a query with named parameters.
-// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.
-func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
- return bindNamedMapper(bindType, query, arg, mapper())
-}
-
-// Named takes a query using named parameters and an argument and
-// returns a new query with a list of args that can be executed by
-// a database. The return value uses the `?` bindvar.
-func Named(query string, arg interface{}) (string, []interface{}, error) {
- return bindNamedMapper(QUESTION, query, arg, mapper())
-}
-
-func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
- if maparg, ok := arg.(map[string]interface{}); ok {
- return bindMap(bindType, query, maparg)
- }
- return bindStruct(bindType, query, arg, m)
-}
-
-// NamedQuery binds a named query and then runs Query on the result using the
-// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
-// map[string]interface{} types.
-func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.Queryx(q, args...)
-}
-
-// NamedExec uses BindStruct to get a query executable by the driver and
-// then runs Exec on the result. Returns an error from the binding
-// or the query excution itself.
-func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.Exec(q, args...)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/named_context.go
deleted file mode 100644
index 9405007..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/named_context.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// +build go1.8
-
-package sqlx
-
-import (
- "context"
- "database/sql"
-)
-
-// A union interface of contextPreparer and binder, required to be able to
-// prepare named statements with context (as the bindtype must be determined).
-type namedPreparerContext interface {
- PreparerContext
- binder
-}
-
-func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) {
- bindType := BindType(p.DriverName())
- q, args, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return nil, err
- }
- stmt, err := PreparexContext(ctx, p, q)
- if err != nil {
- return nil, err
- }
- return &NamedStmt{
- QueryString: q,
- Params: args,
- Stmt: stmt,
- }, nil
-}
-
-// ExecContext executes a named statement using the struct passed.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return *new(sql.Result), err
- }
- return n.Stmt.ExecContext(ctx, args...)
-}
-
-// QueryContext executes a named statement using the struct argument, returning rows.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return nil, err
- }
- return n.Stmt.QueryContext(ctx, args...)
-}
-
-// QueryRowContext executes a named statement against the database. Because sqlx cannot
-// create a *sql.Row with an error condition pre-set for binding errors, sqlx
-// returns a *sqlx.Row instead.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return &Row{err: err}
- }
- return n.Stmt.QueryRowxContext(ctx, args...)
-}
-
-// MustExecContext execs a NamedStmt, panicing on error
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result {
- res, err := n.ExecContext(ctx, arg)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// QueryxContext using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) {
- r, err := n.QueryContext(ctx, arg)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
-}
-
-// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is
-// an alias for QueryRow.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row {
- return n.QueryRowContext(ctx, arg)
-}
-
-// SelectContext using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error {
- rows, err := n.QueryxContext(ctx, arg)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// GetContext using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error {
- r := n.QueryRowxContext(ctx, arg)
- return r.scanAny(dest, false)
-}
-
-// NamedQueryContext binds a named query and then runs Query on the result using the
-// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
-// map[string]interface{} types.
-func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.QueryxContext(ctx, q, args...)
-}
-
-// NamedExecContext uses BindStruct to get a query executable by the driver and
-// then runs Exec on the result. Returns an error from the binding
-// or the query excution itself.
-func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.ExecContext(ctx, q, args...)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
deleted file mode 100644
index 73c21eb..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
+++ /dev/null
@@ -1,441 +0,0 @@
-// Package reflectx implements extensions to the standard reflect lib suitable
-// for implementing marshalling and unmarshalling packages. The main Mapper type
-// allows for Go-compatible named attribute access, including accessing embedded
-// struct attributes and the ability to use functions and struct tags to
-// customize field names.
-//
-package reflectx
-
-import (
- "reflect"
- "runtime"
- "strings"
- "sync"
-)
-
-// A FieldInfo is metadata for a struct field.
-type FieldInfo struct {
- Index []int
- Path string
- Field reflect.StructField
- Zero reflect.Value
- Name string
- Options map[string]string
- Embedded bool
- Children []*FieldInfo
- Parent *FieldInfo
-}
-
-// A StructMap is an index of field metadata for a struct.
-type StructMap struct {
- Tree *FieldInfo
- Index []*FieldInfo
- Paths map[string]*FieldInfo
- Names map[string]*FieldInfo
-}
-
-// GetByPath returns a *FieldInfo for a given string path.
-func (f StructMap) GetByPath(path string) *FieldInfo {
- return f.Paths[path]
-}
-
-// GetByTraversal returns a *FieldInfo for a given integer path. It is
-// analogous to reflect.FieldByIndex, but using the cached traversal
-// rather than re-executing the reflect machinery each time.
-func (f StructMap) GetByTraversal(index []int) *FieldInfo {
- if len(index) == 0 {
- return nil
- }
-
- tree := f.Tree
- for _, i := range index {
- if i >= len(tree.Children) || tree.Children[i] == nil {
- return nil
- }
- tree = tree.Children[i]
- }
- return tree
-}
-
-// Mapper is a general purpose mapper of names to struct fields. A Mapper
-// behaves like most marshallers in the standard library, obeying a field tag
-// for name mapping but also providing a basic transform function.
-type Mapper struct {
- cache map[reflect.Type]*StructMap
- tagName string
- tagMapFunc func(string) string
- mapFunc func(string) string
- mutex sync.Mutex
-}
-
-// NewMapper returns a new mapper using the tagName as its struct field tag.
-// If tagName is the empty string, it is ignored.
-func NewMapper(tagName string) *Mapper {
- return &Mapper{
- cache: make(map[reflect.Type]*StructMap),
- tagName: tagName,
- }
-}
-
-// NewMapperTagFunc returns a new mapper which contains a mapper for field names
-// AND a mapper for tag values. This is useful for tags like json which can
-// have values like "name,omitempty".
-func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper {
- return &Mapper{
- cache: make(map[reflect.Type]*StructMap),
- tagName: tagName,
- mapFunc: mapFunc,
- tagMapFunc: tagMapFunc,
- }
-}
-
-// NewMapperFunc returns a new mapper which optionally obeys a field tag and
-// a struct field name mapper func given by f. Tags will take precedence, but
-// for any other field, the mapped name will be f(field.Name)
-func NewMapperFunc(tagName string, f func(string) string) *Mapper {
- return &Mapper{
- cache: make(map[reflect.Type]*StructMap),
- tagName: tagName,
- mapFunc: f,
- }
-}
-
-// TypeMap returns a mapping of field strings to int slices representing
-// the traversal down the struct to reach the field.
-func (m *Mapper) TypeMap(t reflect.Type) *StructMap {
- m.mutex.Lock()
- mapping, ok := m.cache[t]
- if !ok {
- mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc)
- m.cache[t] = mapping
- }
- m.mutex.Unlock()
- return mapping
-}
-
-// FieldMap returns the mapper's mapping of field names to reflect values. Panics
-// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
-func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
- v = reflect.Indirect(v)
- mustBe(v, reflect.Struct)
-
- r := map[string]reflect.Value{}
- tm := m.TypeMap(v.Type())
- for tagName, fi := range tm.Names {
- r[tagName] = FieldByIndexes(v, fi.Index)
- }
- return r
-}
-
-// FieldByName returns a field by its mapped name as a reflect.Value.
-// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
-// Returns zero Value if the name is not found.
-func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
- v = reflect.Indirect(v)
- mustBe(v, reflect.Struct)
-
- tm := m.TypeMap(v.Type())
- fi, ok := tm.Names[name]
- if !ok {
- return v
- }
- return FieldByIndexes(v, fi.Index)
-}
-
-// FieldsByName returns a slice of values corresponding to the slice of names
-// for the value. Panics if v's Kind is not Struct or v is not Indirectable
-// to a struct Kind. Returns zero Value for each name not found.
-func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
- v = reflect.Indirect(v)
- mustBe(v, reflect.Struct)
-
- tm := m.TypeMap(v.Type())
- vals := make([]reflect.Value, 0, len(names))
- for _, name := range names {
- fi, ok := tm.Names[name]
- if !ok {
- vals = append(vals, *new(reflect.Value))
- } else {
- vals = append(vals, FieldByIndexes(v, fi.Index))
- }
- }
- return vals
-}
-
-// TraversalsByName returns a slice of int slices which represent the struct
-// traversals for each mapped name. Panics if t is not a struct or Indirectable
-// to a struct. Returns empty int slice for each name not found.
-func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
- r := make([][]int, 0, len(names))
- m.TraversalsByNameFunc(t, names, func(_ int, i []int) error {
- if i == nil {
- r = append(r, []int{})
- } else {
- r = append(r, i)
- }
-
- return nil
- })
- return r
-}
-
-// TraversalsByNameFunc traverses the mapped names and calls fn with the index of
-// each name and the struct traversal represented by that name. Panics if t is not
-// a struct or Indirectable to a struct. Returns the first error returned by fn or nil.
-func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error {
- t = Deref(t)
- mustBe(t, reflect.Struct)
- tm := m.TypeMap(t)
- for i, name := range names {
- fi, ok := tm.Names[name]
- if !ok {
- if err := fn(i, nil); err != nil {
- return err
- }
- } else {
- if err := fn(i, fi.Index); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// FieldByIndexes returns a value for the field given by the struct traversal
-// for the given value.
-func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
- for _, i := range indexes {
- v = reflect.Indirect(v).Field(i)
- // if this is a pointer and it's nil, allocate a new value and set it
- if v.Kind() == reflect.Ptr && v.IsNil() {
- alloc := reflect.New(Deref(v.Type()))
- v.Set(alloc)
- }
- if v.Kind() == reflect.Map && v.IsNil() {
- v.Set(reflect.MakeMap(v.Type()))
- }
- }
- return v
-}
-
-// FieldByIndexesReadOnly returns a value for a particular struct traversal,
-// but is not concerned with allocating nil pointers because the value is
-// going to be used for reading and not setting.
-func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
- for _, i := range indexes {
- v = reflect.Indirect(v).Field(i)
- }
- return v
-}
-
-// Deref is Indirect for reflect.Types
-func Deref(t reflect.Type) reflect.Type {
- if t.Kind() == reflect.Ptr {
- t = t.Elem()
- }
- return t
-}
-
-// -- helpers & utilities --
-
-type kinder interface {
- Kind() reflect.Kind
-}
-
-// mustBe checks a value against a kind, panicing with a reflect.ValueError
-// if the kind isn't that which is required.
-func mustBe(v kinder, expected reflect.Kind) {
- if k := v.Kind(); k != expected {
- panic(&reflect.ValueError{Method: methodName(), Kind: k})
- }
-}
-
-// methodName returns the caller of the function calling methodName
-func methodName() string {
- pc, _, _, _ := runtime.Caller(2)
- f := runtime.FuncForPC(pc)
- if f == nil {
- return "unknown method"
- }
- return f.Name()
-}
-
-type typeQueue struct {
- t reflect.Type
- fi *FieldInfo
- pp string // Parent path
-}
-
-// A copying append that creates a new slice each time.
-func apnd(is []int, i int) []int {
- x := make([]int, len(is)+1)
- for p, n := range is {
- x[p] = n
- }
- x[len(x)-1] = i
- return x
-}
-
-type mapf func(string) string
-
-// parseName parses the tag and the target name for the given field using
-// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the
-// field's name to a target name, and tagMapFunc for mapping the tag to
-// a target name.
-func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) {
- // first, set the fieldName to the field's name
- fieldName = field.Name
- // if a mapFunc is set, use that to override the fieldName
- if mapFunc != nil {
- fieldName = mapFunc(fieldName)
- }
-
- // if there's no tag to look for, return the field name
- if tagName == "" {
- return "", fieldName
- }
-
- // if this tag is not set using the normal convention in the tag,
- // then return the fieldname.. this check is done because according
- // to the reflect documentation:
- // If the tag does not have the conventional format,
- // the value returned by Get is unspecified.
- // which doesn't sound great.
- if !strings.Contains(string(field.Tag), tagName+":") {
- return "", fieldName
- }
-
- // at this point we're fairly sure that we have a tag, so lets pull it out
- tag = field.Tag.Get(tagName)
-
- // if we have a mapper function, call it on the whole tag
- // XXX: this is a change from the old version, which pulled out the name
- // before the tagMapFunc could be run, but I think this is the right way
- if tagMapFunc != nil {
- tag = tagMapFunc(tag)
- }
-
- // finally, split the options from the name
- parts := strings.Split(tag, ",")
- fieldName = parts[0]
-
- return tag, fieldName
-}
-
-// parseOptions parses options out of a tag string, skipping the name
-func parseOptions(tag string) map[string]string {
- parts := strings.Split(tag, ",")
- options := make(map[string]string, len(parts))
- if len(parts) > 1 {
- for _, opt := range parts[1:] {
- // short circuit potentially expensive split op
- if strings.Contains(opt, "=") {
- kv := strings.Split(opt, "=")
- options[kv[0]] = kv[1]
- continue
- }
- options[opt] = ""
- }
- }
- return options
-}
-
-// getMapping returns a mapping for the t type, using the tagName, mapFunc and
-// tagMapFunc to determine the canonical names of fields.
-func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap {
- m := []*FieldInfo{}
-
- root := &FieldInfo{}
- queue := []typeQueue{}
- queue = append(queue, typeQueue{Deref(t), root, ""})
-
-QueueLoop:
- for len(queue) != 0 {
- // pop the first item off of the queue
- tq := queue[0]
- queue = queue[1:]
-
- // ignore recursive field
- for p := tq.fi.Parent; p != nil; p = p.Parent {
- if tq.fi.Field.Type == p.Field.Type {
- continue QueueLoop
- }
- }
-
- nChildren := 0
- if tq.t.Kind() == reflect.Struct {
- nChildren = tq.t.NumField()
- }
- tq.fi.Children = make([]*FieldInfo, nChildren)
-
- // iterate through all of its fields
- for fieldPos := 0; fieldPos < nChildren; fieldPos++ {
-
- f := tq.t.Field(fieldPos)
-
- // parse the tag and the target name using the mapping options for this field
- tag, name := parseName(f, tagName, mapFunc, tagMapFunc)
-
- // if the name is "-", disabled via a tag, skip it
- if name == "-" {
- continue
- }
-
- fi := FieldInfo{
- Field: f,
- Name: name,
- Zero: reflect.New(f.Type).Elem(),
- Options: parseOptions(tag),
- }
-
- // if the path is empty this path is just the name
- if tq.pp == "" {
- fi.Path = fi.Name
- } else {
- fi.Path = tq.pp + "." + fi.Name
- }
-
- // skip unexported fields
- if len(f.PkgPath) != 0 && !f.Anonymous {
- continue
- }
-
- // bfs search of anonymous embedded structs
- if f.Anonymous {
- pp := tq.pp
- if tag != "" {
- pp = fi.Path
- }
-
- fi.Embedded = true
- fi.Index = apnd(tq.fi.Index, fieldPos)
- nChildren := 0
- ft := Deref(f.Type)
- if ft.Kind() == reflect.Struct {
- nChildren = ft.NumField()
- }
- fi.Children = make([]*FieldInfo, nChildren)
- queue = append(queue, typeQueue{Deref(f.Type), &fi, pp})
- } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) {
- fi.Index = apnd(tq.fi.Index, fieldPos)
- fi.Children = make([]*FieldInfo, Deref(f.Type).NumField())
- queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path})
- }
-
- fi.Index = apnd(tq.fi.Index, fieldPos)
- fi.Parent = tq.fi
- tq.fi.Children[fieldPos] = &fi
- m = append(m, &fi)
- }
- }
-
- flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}}
- for _, fi := range flds.Index {
- flds.Paths[fi.Path] = fi
- if fi.Name != "" && !fi.Embedded {
- flds.Names[fi.Path] = fi
- }
- }
-
- return flds
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/sqlx.go
deleted file mode 100644
index 4385c3f..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/sqlx.go
+++ /dev/null
@@ -1,1047 +0,0 @@
-package sqlx
-
-import (
- "database/sql"
- "database/sql/driver"
- "errors"
- "fmt"
-
- "io/ioutil"
- "path/filepath"
- "reflect"
- "strings"
- "sync"
-
- "github.com/jmoiron/sqlx/reflectx"
-)
-
-// Although the NameMapper is convenient, in practice it should not
-// be relied on except for application code. If you are writing a library
-// that uses sqlx, you should be aware that the name mappings you expect
-// can be overridden by your user's application.
-
-// NameMapper is used to map column names to struct field names. By default,
-// it uses strings.ToLower to lowercase struct field names. It can be set
-// to whatever you want, but it is encouraged to be set before sqlx is used
-// as name-to-field mappings are cached after first use on a type.
-var NameMapper = strings.ToLower
-var origMapper = reflect.ValueOf(NameMapper)
-
-// Rather than creating on init, this is created when necessary so that
-// importers have time to customize the NameMapper.
-var mpr *reflectx.Mapper
-
-// mprMu protects mpr.
-var mprMu sync.Mutex
-
-// mapper returns a valid mapper using the configured NameMapper func.
-func mapper() *reflectx.Mapper {
- mprMu.Lock()
- defer mprMu.Unlock()
-
- if mpr == nil {
- mpr = reflectx.NewMapperFunc("db", NameMapper)
- } else if origMapper != reflect.ValueOf(NameMapper) {
- // if NameMapper has changed, create a new mapper
- mpr = reflectx.NewMapperFunc("db", NameMapper)
- origMapper = reflect.ValueOf(NameMapper)
- }
- return mpr
-}
-
-// isScannable takes the reflect.Type and the actual dest value and returns
-// whether or not it's Scannable. Something is scannable if:
-// * it is not a struct
-// * it implements sql.Scanner
-// * it has no exported fields
-func isScannable(t reflect.Type) bool {
- if reflect.PtrTo(t).Implements(_scannerInterface) {
- return true
- }
- if t.Kind() != reflect.Struct {
- return true
- }
-
- // it's not important that we use the right mapper for this particular object,
- // we're only concerned on how many exported fields this struct has
- m := mapper()
- if len(m.TypeMap(t).Index) == 0 {
- return true
- }
- return false
-}
-
-// ColScanner is an interface used by MapScan and SliceScan
-type ColScanner interface {
- Columns() ([]string, error)
- Scan(dest ...interface{}) error
- Err() error
-}
-
-// Queryer is an interface used by Get and Select
-type Queryer interface {
- Query(query string, args ...interface{}) (*sql.Rows, error)
- Queryx(query string, args ...interface{}) (*Rows, error)
- QueryRowx(query string, args ...interface{}) *Row
-}
-
-// Execer is an interface used by MustExec and LoadFile
-type Execer interface {
- Exec(query string, args ...interface{}) (sql.Result, error)
-}
-
-// Binder is an interface for something which can bind queries (Tx, DB)
-type binder interface {
- DriverName() string
- Rebind(string) string
- BindNamed(string, interface{}) (string, []interface{}, error)
-}
-
-// Ext is a union interface which can bind, query, and exec, used by
-// NamedQuery and NamedExec.
-type Ext interface {
- binder
- Queryer
- Execer
-}
-
-// Preparer is an interface used by Preparex.
-type Preparer interface {
- Prepare(query string) (*sql.Stmt, error)
-}
-
-// determine if any of our extensions are unsafe
-func isUnsafe(i interface{}) bool {
- switch v := i.(type) {
- case Row:
- return v.unsafe
- case *Row:
- return v.unsafe
- case Rows:
- return v.unsafe
- case *Rows:
- return v.unsafe
- case NamedStmt:
- return v.Stmt.unsafe
- case *NamedStmt:
- return v.Stmt.unsafe
- case Stmt:
- return v.unsafe
- case *Stmt:
- return v.unsafe
- case qStmt:
- return v.unsafe
- case *qStmt:
- return v.unsafe
- case DB:
- return v.unsafe
- case *DB:
- return v.unsafe
- case Tx:
- return v.unsafe
- case *Tx:
- return v.unsafe
- case sql.Rows, *sql.Rows:
- return false
- default:
- return false
- }
-}
-
-func mapperFor(i interface{}) *reflectx.Mapper {
- switch i.(type) {
- case DB:
- return i.(DB).Mapper
- case *DB:
- return i.(*DB).Mapper
- case Tx:
- return i.(Tx).Mapper
- case *Tx:
- return i.(*Tx).Mapper
- default:
- return mapper()
- }
-}
-
-var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
-var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
-
-// Row is a reimplementation of sql.Row in order to gain access to the underlying
-// sql.Rows.Columns() data, necessary for StructScan.
-type Row struct {
- err error
- unsafe bool
- rows *sql.Rows
- Mapper *reflectx.Mapper
-}
-
-// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
-// underlying error from the internal rows object if it exists.
-func (r *Row) Scan(dest ...interface{}) error {
- if r.err != nil {
- return r.err
- }
-
- // TODO(bradfitz): for now we need to defensively clone all
- // []byte that the driver returned (not permitting
- // *RawBytes in Rows.Scan), since we're about to close
- // the Rows in our defer, when we return from this function.
- // the contract with the driver.Next(...) interface is that it
- // can return slices into read-only temporary memory that's
- // only valid until the next Scan/Close. But the TODO is that
- // for a lot of drivers, this copy will be unnecessary. We
- // should provide an optional interface for drivers to
- // implement to say, "don't worry, the []bytes that I return
- // from Next will not be modified again." (for instance, if
- // they were obtained from the network anyway) But for now we
- // don't care.
- defer r.rows.Close()
- for _, dp := range dest {
- if _, ok := dp.(*sql.RawBytes); ok {
- return errors.New("sql: RawBytes isn't allowed on Row.Scan")
- }
- }
-
- if !r.rows.Next() {
- if err := r.rows.Err(); err != nil {
- return err
- }
- return sql.ErrNoRows
- }
- err := r.rows.Scan(dest...)
- if err != nil {
- return err
- }
- // Make sure the query can be processed to completion with no errors.
- if err := r.rows.Close(); err != nil {
- return err
- }
- return nil
-}
-
-// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
-// returned by Row.Scan()
-func (r *Row) Columns() ([]string, error) {
- if r.err != nil {
- return []string{}, r.err
- }
- return r.rows.Columns()
-}
-
-// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error
-func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) {
- if r.err != nil {
- return []*sql.ColumnType{}, r.err
- }
- return r.rows.ColumnTypes()
-}
-
-// Err returns the error encountered while scanning.
-func (r *Row) Err() error {
- return r.err
-}
-
-// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
-// used mostly to automatically bind named queries using the right bindvars.
-type DB struct {
- *sql.DB
- driverName string
- unsafe bool
- Mapper *reflectx.Mapper
-}
-
-// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
-// driverName of the original database is required for named query support.
-func NewDb(db *sql.DB, driverName string) *DB {
- return &DB{DB: db, driverName: driverName, Mapper: mapper()}
-}
-
-// DriverName returns the driverName passed to the Open function for this DB.
-func (db *DB) DriverName() string {
- return db.driverName
-}
-
-// Open is the same as sql.Open, but returns an *sqlx.DB instead.
-func Open(driverName, dataSourceName string) (*DB, error) {
- db, err := sql.Open(driverName, dataSourceName)
- if err != nil {
- return nil, err
- }
- return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
-}
-
-// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
-func MustOpen(driverName, dataSourceName string) *DB {
- db, err := Open(driverName, dataSourceName)
- if err != nil {
- panic(err)
- }
- return db
-}
-
-// MapperFunc sets a new mapper for this db using the default sqlx struct tag
-// and the provided mapper function.
-func (db *DB) MapperFunc(mf func(string) string) {
- db.Mapper = reflectx.NewMapperFunc("db", mf)
-}
-
-// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
-func (db *DB) Rebind(query string) string {
- return Rebind(BindType(db.driverName), query)
-}
-
-// Unsafe returns a version of DB which will silently succeed to scan when
-// columns in the SQL result have no fields in the destination struct.
-// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
-// safety behavior.
-func (db *DB) Unsafe() *DB {
- return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
-}
-
-// BindNamed binds a query using the DB driver's bindvar type.
-func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
- return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper)
-}
-
-// NamedQuery using this DB.
-// Any named placeholder parameters are replaced with fields from arg.
-func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
- return NamedQuery(db, query, arg)
-}
-
-// NamedExec using this DB.
-// Any named placeholder parameters are replaced with fields from arg.
-func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
- return NamedExec(db, query, arg)
-}
-
-// Select using this DB.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
- return Select(db, dest, query, args...)
-}
-
-// Get using this DB.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
- return Get(db, dest, query, args...)
-}
-
-// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
-// of an *sql.Tx.
-func (db *DB) MustBegin() *Tx {
- tx, err := db.Beginx()
- if err != nil {
- panic(err)
- }
- return tx
-}
-
-// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
-func (db *DB) Beginx() (*Tx, error) {
- tx, err := db.DB.Begin()
- if err != nil {
- return nil, err
- }
- return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// Queryx queries the database and returns an *sqlx.Rows.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
- r, err := db.DB.Query(query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// QueryRowx queries the database and returns an *sqlx.Row.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
- rows, err := db.DB.Query(query, args...)
- return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
-}
-
-// MustExec (panic) runs MustExec using this database.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
- return MustExec(db, query, args...)
-}
-
-// Preparex returns an sqlx.Stmt instead of a sql.Stmt
-func (db *DB) Preparex(query string) (*Stmt, error) {
- return Preparex(db, query)
-}
-
-// PrepareNamed returns an sqlx.NamedStmt
-func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
- return prepareNamed(db, query)
-}
-
-// Tx is an sqlx wrapper around sql.Tx with extra functionality
-type Tx struct {
- *sql.Tx
- driverName string
- unsafe bool
- Mapper *reflectx.Mapper
-}
-
-// DriverName returns the driverName used by the DB which began this transaction.
-func (tx *Tx) DriverName() string {
- return tx.driverName
-}
-
-// Rebind a query within a transaction's bindvar type.
-func (tx *Tx) Rebind(query string) string {
- return Rebind(BindType(tx.driverName), query)
-}
-
-// Unsafe returns a version of Tx which will silently succeed to scan when
-// columns in the SQL result have no fields in the destination struct.
-func (tx *Tx) Unsafe() *Tx {
- return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
-}
-
-// BindNamed binds a query within a transaction's bindvar type.
-func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
- return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper)
-}
-
-// NamedQuery within a transaction.
-// Any named placeholder parameters are replaced with fields from arg.
-func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
- return NamedQuery(tx, query, arg)
-}
-
-// NamedExec a named query within a transaction.
-// Any named placeholder parameters are replaced with fields from arg.
-func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
- return NamedExec(tx, query, arg)
-}
-
-// Select within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
- return Select(tx, dest, query, args...)
-}
-
-// Queryx within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
- r, err := tx.Tx.Query(query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
-}
-
-// QueryRowx within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
- rows, err := tx.Tx.Query(query, args...)
- return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
-}
-
-// Get within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
- return Get(tx, dest, query, args...)
-}
-
-// MustExec runs MustExec within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
- return MustExec(tx, query, args...)
-}
-
-// Preparex a statement within a transaction.
-func (tx *Tx) Preparex(query string) (*Stmt, error) {
- return Preparex(tx, query)
-}
-
-// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
-// stmt can be either *sql.Stmt or *sqlx.Stmt.
-func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
- var s *sql.Stmt
- switch v := stmt.(type) {
- case Stmt:
- s = v.Stmt
- case *Stmt:
- s = v.Stmt
- case sql.Stmt:
- s = &v
- case *sql.Stmt:
- s = v
- default:
- panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
- }
- return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
-}
-
-// NamedStmt returns a version of the prepared statement which runs within a transaction.
-func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
- return &NamedStmt{
- QueryString: stmt.QueryString,
- Params: stmt.Params,
- Stmt: tx.Stmtx(stmt.Stmt),
- }
-}
-
-// PrepareNamed returns an sqlx.NamedStmt
-func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
- return prepareNamed(tx, query)
-}
-
-// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
-type Stmt struct {
- *sql.Stmt
- unsafe bool
- Mapper *reflectx.Mapper
-}
-
-// Unsafe returns a version of Stmt which will silently succeed to scan when
-// columns in the SQL result have no fields in the destination struct.
-func (s *Stmt) Unsafe() *Stmt {
- return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
-}
-
-// Select using the prepared statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
- return Select(&qStmt{s}, dest, "", args...)
-}
-
-// Get using the prepared statement.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
- return Get(&qStmt{s}, dest, "", args...)
-}
-
-// MustExec (panic) using this statement. Note that the query portion of the error
-// output will be blank, as Stmt does not expose its query.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) MustExec(args ...interface{}) sql.Result {
- return MustExec(&qStmt{s}, "", args...)
-}
-
-// QueryRowx using this statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) QueryRowx(args ...interface{}) *Row {
- qs := &qStmt{s}
- return qs.QueryRowx("", args...)
-}
-
-// Queryx using this statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
- qs := &qStmt{s}
- return qs.Queryx("", args...)
-}
-
-// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
-// implementing those interfaces and ignoring the `query` argument.
-type qStmt struct{ *Stmt }
-
-func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
- return q.Stmt.Query(args...)
-}
-
-func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
- r, err := q.Stmt.Query(args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
-}
-
-func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
- rows, err := q.Stmt.Query(args...)
- return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
-}
-
-func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
- return q.Stmt.Exec(args...)
-}
-
-// Rows is a wrapper around sql.Rows which caches costly reflect operations
-// during a looped StructScan
-type Rows struct {
- *sql.Rows
- unsafe bool
- Mapper *reflectx.Mapper
- // these fields cache memory use for a rows during iteration w/ structScan
- started bool
- fields [][]int
- values []interface{}
-}
-
-// SliceScan using this Rows.
-func (r *Rows) SliceScan() ([]interface{}, error) {
- return SliceScan(r)
-}
-
-// MapScan using this Rows.
-func (r *Rows) MapScan(dest map[string]interface{}) error {
- return MapScan(r, dest)
-}
-
-// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
-// Use this and iterate over Rows manually when the memory load of Select() might be
-// prohibitive. *Rows.StructScan caches the reflect work of matching up column
-// positions to fields to avoid that overhead per scan, which means it is not safe
-// to run StructScan on the same Rows instance with different struct types.
-func (r *Rows) StructScan(dest interface{}) error {
- v := reflect.ValueOf(dest)
-
- if v.Kind() != reflect.Ptr {
- return errors.New("must pass a pointer, not a value, to StructScan destination")
- }
-
- v = v.Elem()
-
- if !r.started {
- columns, err := r.Columns()
- if err != nil {
- return err
- }
- m := r.Mapper
-
- r.fields = m.TraversalsByName(v.Type(), columns)
- // if we are not unsafe and are missing fields, return an error
- if f, err := missingFields(r.fields); err != nil && !r.unsafe {
- return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
- }
- r.values = make([]interface{}, len(columns))
- r.started = true
- }
-
- err := fieldsByTraversal(v, r.fields, r.values, true)
- if err != nil {
- return err
- }
- // scan into the struct field pointers and append to our results
- err = r.Scan(r.values...)
- if err != nil {
- return err
- }
- return r.Err()
-}
-
-// Connect to a database and verify with a ping.
-func Connect(driverName, dataSourceName string) (*DB, error) {
- db, err := Open(driverName, dataSourceName)
- if err != nil {
- return nil, err
- }
- err = db.Ping()
- if err != nil {
- db.Close()
- return nil, err
- }
- return db, nil
-}
-
-// MustConnect connects to a database and panics on error.
-func MustConnect(driverName, dataSourceName string) *DB {
- db, err := Connect(driverName, dataSourceName)
- if err != nil {
- panic(err)
- }
- return db
-}
-
-// Preparex prepares a statement.
-func Preparex(p Preparer, query string) (*Stmt, error) {
- s, err := p.Prepare(query)
- if err != nil {
- return nil, err
- }
- return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
-}
-
-// Select executes a query using the provided Queryer, and StructScans each row
-// into dest, which must be a slice. If the slice elements are scannable, then
-// the result set must have only one column. Otherwise, StructScan is used.
-// The *sql.Rows are closed automatically.
-// Any placeholder parameters are replaced with supplied args.
-func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
- rows, err := q.Queryx(query, args...)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// Get does a QueryRow using the provided Queryer, and scans the resulting row
-// to dest. If dest is scannable, the result must only have one column. Otherwise,
-// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
- r := q.QueryRowx(query, args...)
- return r.scanAny(dest, false)
-}
-
-// LoadFile exec's every statement in a file (as a single call to Exec).
-// LoadFile may return a nil *sql.Result if errors are encountered locating or
-// reading the file at path. LoadFile reads the entire file into memory, so it
-// is not suitable for loading large data dumps, but can be useful for initializing
-// schemas or loading indexes.
-//
-// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
-// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
-// this by requiring something with DriverName() and then attempting to split the
-// queries will be difficult to get right, and its current driver-specific behavior
-// is deemed at least not complex in its incorrectness.
-func LoadFile(e Execer, path string) (*sql.Result, error) {
- realpath, err := filepath.Abs(path)
- if err != nil {
- return nil, err
- }
- contents, err := ioutil.ReadFile(realpath)
- if err != nil {
- return nil, err
- }
- res, err := e.Exec(string(contents))
- return &res, err
-}
-
-// MustExec execs the query using e and panics if there was an error.
-// Any placeholder parameters are replaced with supplied args.
-func MustExec(e Execer, query string, args ...interface{}) sql.Result {
- res, err := e.Exec(query, args...)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// SliceScan using this Rows.
-func (r *Row) SliceScan() ([]interface{}, error) {
- return SliceScan(r)
-}
-
-// MapScan using this Rows.
-func (r *Row) MapScan(dest map[string]interface{}) error {
- return MapScan(r, dest)
-}
-
-func (r *Row) scanAny(dest interface{}, structOnly bool) error {
- if r.err != nil {
- return r.err
- }
- if r.rows == nil {
- r.err = sql.ErrNoRows
- return r.err
- }
- defer r.rows.Close()
-
- v := reflect.ValueOf(dest)
- if v.Kind() != reflect.Ptr {
- return errors.New("must pass a pointer, not a value, to StructScan destination")
- }
- if v.IsNil() {
- return errors.New("nil pointer passed to StructScan destination")
- }
-
- base := reflectx.Deref(v.Type())
- scannable := isScannable(base)
-
- if structOnly && scannable {
- return structOnlyError(base)
- }
-
- columns, err := r.Columns()
- if err != nil {
- return err
- }
-
- if scannable && len(columns) > 1 {
- return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
- }
-
- if scannable {
- return r.Scan(dest)
- }
-
- m := r.Mapper
-
- fields := m.TraversalsByName(v.Type(), columns)
- // if we are not unsafe and are missing fields, return an error
- if f, err := missingFields(fields); err != nil && !r.unsafe {
- return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
- }
- values := make([]interface{}, len(columns))
-
- err = fieldsByTraversal(v, fields, values, true)
- if err != nil {
- return err
- }
- // scan into the struct field pointers and append to our results
- return r.Scan(values...)
-}
-
-// StructScan a single Row into dest.
-func (r *Row) StructScan(dest interface{}) error {
- return r.scanAny(dest, true)
-}
-
-// SliceScan a row, returning a []interface{} with values similar to MapScan.
-// This function is primarily intended for use where the number of columns
-// is not known. Because you can pass an []interface{} directly to Scan,
-// it's recommended that you do that as it will not have to allocate new
-// slices per row.
-func SliceScan(r ColScanner) ([]interface{}, error) {
- // ignore r.started, since we needn't use reflect for anything.
- columns, err := r.Columns()
- if err != nil {
- return []interface{}{}, err
- }
-
- values := make([]interface{}, len(columns))
- for i := range values {
- values[i] = new(interface{})
- }
-
- err = r.Scan(values...)
-
- if err != nil {
- return values, err
- }
-
- for i := range columns {
- values[i] = *(values[i].(*interface{}))
- }
-
- return values, r.Err()
-}
-
-// MapScan scans a single Row into the dest map[string]interface{}.
-// Use this to get results for SQL that might not be under your control
-// (for instance, if you're building an interface for an SQL server that
-// executes SQL from input). Please do not use this as a primary interface!
-// This will modify the map sent to it in place, so reuse the same map with
-// care. Columns which occur more than once in the result will overwrite
-// each other!
-func MapScan(r ColScanner, dest map[string]interface{}) error {
- // ignore r.started, since we needn't use reflect for anything.
- columns, err := r.Columns()
- if err != nil {
- return err
- }
-
- values := make([]interface{}, len(columns))
- for i := range values {
- values[i] = new(interface{})
- }
-
- err = r.Scan(values...)
- if err != nil {
- return err
- }
-
- for i, column := range columns {
- dest[column] = *(values[i].(*interface{}))
- }
-
- return r.Err()
-}
-
-type rowsi interface {
- Close() error
- Columns() ([]string, error)
- Err() error
- Next() bool
- Scan(...interface{}) error
-}
-
-// structOnlyError returns an error appropriate for type when a non-scannable
-// struct is expected but something else is given
-func structOnlyError(t reflect.Type) error {
- isStruct := t.Kind() == reflect.Struct
- isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
- if !isStruct {
- return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
- }
- if isScanner {
- return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
- }
- return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
-}
-
-// scanAll scans all rows into a destination, which must be a slice of any
-// type. If the destination slice type is a Struct, then StructScan will be
-// used on each row. If the destination is some other kind of base type, then
-// each row must only have one column which can scan into that type. This
-// allows you to do something like:
-//
-// rows, _ := db.Query("select id from people;")
-// var ids []int
-// scanAll(rows, &ids, false)
-//
-// and ids will be a list of the id results. I realize that this is a desirable
-// interface to expose to users, but for now it will only be exposed via changes
-// to `Get` and `Select`. The reason that this has been implemented like this is
-// this is the only way to not duplicate reflect work in the new API while
-// maintaining backwards compatibility.
-func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
- var v, vp reflect.Value
-
- value := reflect.ValueOf(dest)
-
- // json.Unmarshal returns errors for these
- if value.Kind() != reflect.Ptr {
- return errors.New("must pass a pointer, not a value, to StructScan destination")
- }
- if value.IsNil() {
- return errors.New("nil pointer passed to StructScan destination")
- }
- direct := reflect.Indirect(value)
-
- slice, err := baseType(value.Type(), reflect.Slice)
- if err != nil {
- return err
- }
-
- isPtr := slice.Elem().Kind() == reflect.Ptr
- base := reflectx.Deref(slice.Elem())
- scannable := isScannable(base)
-
- if structOnly && scannable {
- return structOnlyError(base)
- }
-
- columns, err := rows.Columns()
- if err != nil {
- return err
- }
-
- // if it's a base type make sure it only has 1 column; if not return an error
- if scannable && len(columns) > 1 {
- return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
- }
-
- if !scannable {
- var values []interface{}
- var m *reflectx.Mapper
-
- switch rows.(type) {
- case *Rows:
- m = rows.(*Rows).Mapper
- default:
- m = mapper()
- }
-
- fields := m.TraversalsByName(base, columns)
- // if we are not unsafe and are missing fields, return an error
- if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
- return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
- }
- values = make([]interface{}, len(columns))
-
- for rows.Next() {
- // create a new struct type (which returns PtrTo) and indirect it
- vp = reflect.New(base)
- v = reflect.Indirect(vp)
-
- err = fieldsByTraversal(v, fields, values, true)
- if err != nil {
- return err
- }
-
- // scan into the struct field pointers and append to our results
- err = rows.Scan(values...)
- if err != nil {
- return err
- }
-
- if isPtr {
- direct.Set(reflect.Append(direct, vp))
- } else {
- direct.Set(reflect.Append(direct, v))
- }
- }
- } else {
- for rows.Next() {
- vp = reflect.New(base)
- err = rows.Scan(vp.Interface())
- if err != nil {
- return err
- }
- // append
- if isPtr {
- direct.Set(reflect.Append(direct, vp))
- } else {
- direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
- }
- }
- }
-
- return rows.Err()
-}
-
-// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
-// it doesn't really feel like it's named properly. There is an incongruency
-// between this and the way that StructScan (which might better be ScanStruct
-// anyway) works on a rows object.
-
-// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
-// StructScan will scan in the entire rows result, so if you do not want to
-// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
-// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
-func StructScan(rows rowsi, dest interface{}) error {
- return scanAll(rows, dest, true)
-
-}
-
-// reflect helpers
-
-func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
- t = reflectx.Deref(t)
- if t.Kind() != expected {
- return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
- }
- return t, nil
-}
-
-// fieldsByName fills a values interface with fields from the passed value based
-// on the traversals in int. If ptrs is true, return addresses instead of values.
-// We write this instead of using FieldsByName to save allocations and map lookups
-// when iterating over many rows. Empty traversals will get an interface pointer.
-// Because of the necessity of requesting ptrs or values, it's considered a bit too
-// specialized for inclusion in reflectx itself.
-func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
- v = reflect.Indirect(v)
- if v.Kind() != reflect.Struct {
- return errors.New("argument not a struct")
- }
-
- for i, traversal := range traversals {
- if len(traversal) == 0 {
- values[i] = new(interface{})
- continue
- }
- f := reflectx.FieldByIndexes(v, traversal)
- if ptrs {
- values[i] = f.Addr().Interface()
- } else {
- values[i] = f.Interface()
- }
- }
- return nil
-}
-
-func missingFields(transversals [][]int) (field int, err error) {
- for i, t := range transversals {
- if len(t) == 0 {
- return i, errors.New("missing field")
- }
- }
- return 0, nil
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/sqlx_context.go
deleted file mode 100644
index d58ff33..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/jmoiron/sqlx/sqlx_context.go
+++ /dev/null
@@ -1,348 +0,0 @@
-// +build go1.8
-
-package sqlx
-
-import (
- "context"
- "database/sql"
- "fmt"
- "io/ioutil"
- "path/filepath"
- "reflect"
-)
-
-// ConnectContext to a database and verify with a ping.
-func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) {
- db, err := Open(driverName, dataSourceName)
- if err != nil {
- return db, err
- }
- err = db.PingContext(ctx)
- return db, err
-}
-
-// QueryerContext is an interface used by GetContext and SelectContext
-type QueryerContext interface {
- QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
- QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error)
- QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row
-}
-
-// PreparerContext is an interface used by PreparexContext.
-type PreparerContext interface {
- PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
-}
-
-// ExecerContext is an interface used by MustExecContext and LoadFileContext
-type ExecerContext interface {
- ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
-}
-
-// ExtContext is a union interface which can bind, query, and exec, with Context
-// used by NamedQueryContext and NamedExecContext.
-type ExtContext interface {
- binder
- QueryerContext
- ExecerContext
-}
-
-// SelectContext executes a query using the provided Queryer, and StructScans
-// each row into dest, which must be a slice. If the slice elements are
-// scannable, then the result set must have only one column. Otherwise,
-// StructScan is used. The *sql.Rows are closed automatically.
-// Any placeholder parameters are replaced with supplied args.
-func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
- rows, err := q.QueryxContext(ctx, query, args...)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// PreparexContext prepares a statement.
-//
-// The provided context is used for the preparation of the statement, not for
-// the execution of the statement.
-func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) {
- s, err := p.PrepareContext(ctx, query)
- if err != nil {
- return nil, err
- }
- return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
-}
-
-// GetContext does a QueryRow using the provided Queryer, and scans the
-// resulting row to dest. If dest is scannable, the result must only have one
-// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like
-// row.Scan would. Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
- r := q.QueryRowxContext(ctx, query, args...)
- return r.scanAny(dest, false)
-}
-
-// LoadFileContext exec's every statement in a file (as a single call to Exec).
-// LoadFileContext may return a nil *sql.Result if errors are encountered
-// locating or reading the file at path. LoadFile reads the entire file into
-// memory, so it is not suitable for loading large data dumps, but can be useful
-// for initializing schemas or loading indexes.
-//
-// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
-// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
-// this by requiring something with DriverName() and then attempting to split the
-// queries will be difficult to get right, and its current driver-specific behavior
-// is deemed at least not complex in its incorrectness.
-func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) {
- realpath, err := filepath.Abs(path)
- if err != nil {
- return nil, err
- }
- contents, err := ioutil.ReadFile(realpath)
- if err != nil {
- return nil, err
- }
- res, err := e.ExecContext(ctx, string(contents))
- return &res, err
-}
-
-// MustExecContext execs the query using e and panics if there was an error.
-// Any placeholder parameters are replaced with supplied args.
-func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result {
- res, err := e.ExecContext(ctx, query, args...)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// PrepareNamedContext returns an sqlx.NamedStmt
-func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
- return prepareNamedContext(ctx, db, query)
-}
-
-// NamedQueryContext using this DB.
-// Any named placeholder parameters are replaced with fields from arg.
-func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) {
- return NamedQueryContext(ctx, db, query, arg)
-}
-
-// NamedExecContext using this DB.
-// Any named placeholder parameters are replaced with fields from arg.
-func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
- return NamedExecContext(ctx, db, query, arg)
-}
-
-// SelectContext using this DB.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
- return SelectContext(ctx, db, dest, query, args...)
-}
-
-// GetContext using this DB.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
- return GetContext(ctx, db, dest, query, args...)
-}
-
-// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
-//
-// The provided context is used for the preparation of the statement, not for
-// the execution of the statement.
-func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
- return PreparexContext(ctx, db, query)
-}
-
-// QueryxContext queries the database and returns an *sqlx.Rows.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
- r, err := db.DB.QueryContext(ctx, query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// QueryRowxContext queries the database and returns an *sqlx.Row.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
- rows, err := db.DB.QueryContext(ctx, query, args...)
- return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
-}
-
-// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead
-// of an *sql.Tx.
-//
-// The provided context is used until the transaction is committed or rolled
-// back. If the context is canceled, the sql package will roll back the
-// transaction. Tx.Commit will return an error if the context provided to
-// MustBeginContext is canceled.
-func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx {
- tx, err := db.BeginTxx(ctx, opts)
- if err != nil {
- panic(err)
- }
- return tx
-}
-
-// MustExecContext (panic) runs MustExec using this database.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
- return MustExecContext(ctx, db, query, args...)
-}
-
-// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an
-// *sql.Tx.
-//
-// The provided context is used until the transaction is committed or rolled
-// back. If the context is canceled, the sql package will roll back the
-// transaction. Tx.Commit will return an error if the context provided to
-// BeginxContext is canceled.
-func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
- tx, err := db.DB.BeginTx(ctx, opts)
- if err != nil {
- return nil, err
- }
- return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// StmtxContext returns a version of the prepared statement which runs within a
-// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt.
-func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt {
- var s *sql.Stmt
- switch v := stmt.(type) {
- case Stmt:
- s = v.Stmt
- case *Stmt:
- s = v.Stmt
- case sql.Stmt:
- s = &v
- case *sql.Stmt:
- s = v
- default:
- panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
- }
- return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper}
-}
-
-// NamedStmtContext returns a version of the prepared statement which runs
-// within a transaction.
-func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt {
- return &NamedStmt{
- QueryString: stmt.QueryString,
- Params: stmt.Params,
- Stmt: tx.StmtxContext(ctx, stmt.Stmt),
- }
-}
-
-// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
-//
-// The provided context is used for the preparation of the statement, not for
-// the execution of the statement.
-func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
- return PreparexContext(ctx, tx, query)
-}
-
-// PrepareNamedContext returns an sqlx.NamedStmt
-func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
- return prepareNamedContext(ctx, tx, query)
-}
-
-// MustExecContext runs MustExecContext within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
- return MustExecContext(ctx, tx, query, args...)
-}
-
-// QueryxContext within a transaction and context.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
- r, err := tx.Tx.QueryContext(ctx, query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
-}
-
-// SelectContext within a transaction and context.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
- return SelectContext(ctx, tx, dest, query, args...)
-}
-
-// GetContext within a transaction and context.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
- return GetContext(ctx, tx, dest, query, args...)
-}
-
-// QueryRowxContext within a transaction and context.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
- rows, err := tx.Tx.QueryContext(ctx, query, args...)
- return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
-}
-
-// NamedExecContext using this Tx.
-// Any named placeholder parameters are replaced with fields from arg.
-func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
- return NamedExecContext(ctx, tx, query, arg)
-}
-
-// SelectContext using the prepared statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error {
- return SelectContext(ctx, &qStmt{s}, dest, "", args...)
-}
-
-// GetContext using the prepared statement.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error {
- return GetContext(ctx, &qStmt{s}, dest, "", args...)
-}
-
-// MustExecContext (panic) using this statement. Note that the query portion of
-// the error output will be blank, as Stmt does not expose its query.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result {
- return MustExecContext(ctx, &qStmt{s}, "", args...)
-}
-
-// QueryRowxContext using this statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row {
- qs := &qStmt{s}
- return qs.QueryRowxContext(ctx, "", args...)
-}
-
-// QueryxContext using this statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) {
- qs := &qStmt{s}
- return qs.QueryxContext(ctx, "", args...)
-}
-
-func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
- return q.Stmt.QueryContext(ctx, args...)
-}
-
-func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
- r, err := q.Stmt.QueryContext(ctx, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
-}
-
-func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
- rows, err := q.Stmt.QueryContext(ctx, args...)
- return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
-}
-
-func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
- return q.Stmt.ExecContext(ctx, args...)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/LICENSE b/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/LICENSE
deleted file mode 100644
index ade9307..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-All files in this repository are licensed as follows. If you contribute
-to this repository, it is assumed that you license your contribution
-under the same license unless you state otherwise.
-
-All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/doc.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/doc.go
deleted file mode 100644
index 35b119a..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/doc.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2013, 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-/*
-[godoc-link-here]
-
-The juju/errors provides an easy way to annotate errors without losing the
-orginal error context.
-
-The exported `New` and `Errorf` functions are designed to replace the
-`errors.New` and `fmt.Errorf` functions respectively. The same underlying
-error is there, but the package also records the location at which the error
-was created.
-
-A primary use case for this library is to add extra context any time an
-error is returned from a function.
-
- if err := SomeFunc(); err != nil {
- return err
- }
-
-This instead becomes:
-
- if err := SomeFunc(); err != nil {
- return errors.Trace(err)
- }
-
-which just records the file and line number of the Trace call, or
-
- if err := SomeFunc(); err != nil {
- return errors.Annotate(err, "more context")
- }
-
-which also adds an annotation to the error.
-
-When you want to check to see if an error is of a particular type, a helper
-function is normally exported by the package that returned the error, like the
-`os` package does. The underlying cause of the error is available using the
-`Cause` function.
-
- os.IsNotExist(errors.Cause(err))
-
-The result of the `Error()` call on an annotated error is the annotations joined
-with colons, then the result of the `Error()` method for the underlying error
-that was the cause.
-
- err := errors.Errorf("original")
- err = errors.Annotatef(err, "context")
- err = errors.Annotatef(err, "more context")
- err.Error() -> "more context: context: original"
-
-Obviously recording the file, line and functions is not very useful if you
-cannot get them back out again.
-
- errors.ErrorStack(err)
-
-will return something like:
-
- first error
- github.com/juju/errors/annotation_test.go:193:
- github.com/juju/errors/annotation_test.go:194: annotation
- github.com/juju/errors/annotation_test.go:195:
- github.com/juju/errors/annotation_test.go:196: more context
- github.com/juju/errors/annotation_test.go:197:
-
-The first error was generated by an external system, so there was no location
-associated. The second, fourth, and last lines were generated with Trace calls,
-and the other two through Annotate.
-
-Sometimes when responding to an error you want to return a more specific error
-for the situation.
-
- if err := FindField(field); err != nil {
- return errors.Wrap(err, errors.NotFoundf(field))
- }
-
-This returns an error where the complete error stack is still available, and
-`errors.Cause()` will return the `NotFound` error.
-
-*/
-package errors
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/error.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/error.go
deleted file mode 100644
index b7df735..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/error.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors
-
-import (
- "fmt"
- "reflect"
- "runtime"
-)
-
-// Err holds a description of an error along with information about
-// where the error was created.
-//
-// It may be embedded in custom error types to add extra information that
-// this errors package can understand.
-type Err struct {
- // message holds an annotation of the error.
- message string
-
- // cause holds the cause of the error as returned
- // by the Cause method.
- cause error
-
- // previous holds the previous error in the error stack, if any.
- previous error
-
- // file and line hold the source code location where the error was
- // created.
- file string
- line int
-}
-
-// NewErr is used to return an Err for the purpose of embedding in other
-// structures. The location is not specified, and needs to be set with a call
-// to SetLocation.
-//
-// For example:
-// type FooError struct {
-// errors.Err
-// code int
-// }
-//
-// func NewFooError(code int) error {
-// err := &FooError{errors.NewErr("foo"), code}
-// err.SetLocation(1)
-// return err
-// }
-func NewErr(format string, args ...interface{}) Err {
- return Err{
- message: fmt.Sprintf(format, args...),
- }
-}
-
-// NewErrWithCause is used to return an Err with case by other error for the purpose of embedding in other
-// structures. The location is not specified, and needs to be set with a call
-// to SetLocation.
-//
-// For example:
-// type FooError struct {
-// errors.Err
-// code int
-// }
-//
-// func (e *FooError) Annotate(format string, args ...interface{}) error {
-// err := &FooError{errors.NewErrWithCause(e.Err, format, args...), e.code}
-// err.SetLocation(1)
-// return err
-// })
-func NewErrWithCause(other error, format string, args ...interface{}) Err {
- return Err{
- message: fmt.Sprintf(format, args...),
- cause: Cause(other),
- previous: other,
- }
-}
-
-// Location is the file and line of where the error was most recently
-// created or annotated.
-func (e *Err) Location() (filename string, line int) {
- return e.file, e.line
-}
-
-// Underlying returns the previous error in the error stack, if any. A client
-// should not ever really call this method. It is used to build the error
-// stack and should not be introspected by client calls. Or more
-// specifically, clients should not depend on anything but the `Cause` of an
-// error.
-func (e *Err) Underlying() error {
- return e.previous
-}
-
-// The Cause of an error is the most recent error in the error stack that
-// meets one of these criteria: the original error that was raised; the new
-// error that was passed into the Wrap function; the most recently masked
-// error; or nil if the error itself is considered the Cause. Normally this
-// method is not invoked directly, but instead through the Cause stand alone
-// function.
-func (e *Err) Cause() error {
- return e.cause
-}
-
-// Message returns the message stored with the most recent location. This is
-// the empty string if the most recent call was Trace, or the message stored
-// with Annotate or Mask.
-func (e *Err) Message() string {
- return e.message
-}
-
-// Error implements error.Error.
-func (e *Err) Error() string {
- // We want to walk up the stack of errors showing the annotations
- // as long as the cause is the same.
- err := e.previous
- if !sameError(Cause(err), e.cause) && e.cause != nil {
- err = e.cause
- }
- switch {
- case err == nil:
- return e.message
- case e.message == "":
- return err.Error()
- }
- return fmt.Sprintf("%s: %v", e.message, err)
-}
-
-// Format implements fmt.Formatter
-// When printing errors with %+v it also prints the stack trace.
-// %#v unsurprisingly will print the real underlying type.
-func (e *Err) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case s.Flag('+'):
- fmt.Fprintf(s, "%s", ErrorStack(e))
- return
- case s.Flag('#'):
- // avoid infinite recursion by wrapping e into a type
- // that doesn't implement Formatter.
- fmt.Fprintf(s, "%#v", (*unformatter)(e))
- return
- }
- fallthrough
- case 's':
- fmt.Fprintf(s, "%s", e.Error())
- }
-}
-
-// helper for Format
-type unformatter Err
-
-func (unformatter) Format() { /* break the fmt.Formatter interface */ }
-
-// SetLocation records the source location of the error at callDepth stack
-// frames above the call.
-func (e *Err) SetLocation(callDepth int) {
- _, file, line, _ := runtime.Caller(callDepth + 1)
- e.file = trimGoPath(file)
- e.line = line
-}
-
-// StackTrace returns one string for each location recorded in the stack of
-// errors. The first value is the originating error, with a line for each
-// other annotation or tracing of the error.
-func (e *Err) StackTrace() []string {
- return errorStack(e)
-}
-
-// Ideally we'd have a way to check identity, but deep equals will do.
-func sameError(e1, e2 error) bool {
- return reflect.DeepEqual(e1, e2)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/errortypes.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/errortypes.go
deleted file mode 100644
index 9b731c4..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/errortypes.go
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors
-
-import (
- "fmt"
-)
-
-// wrap is a helper to construct an *wrapper.
-func wrap(err error, format, suffix string, args ...interface{}) Err {
- newErr := Err{
- message: fmt.Sprintf(format+suffix, args...),
- previous: err,
- }
- newErr.SetLocation(2)
- return newErr
-}
-
-// notFound represents an error when something has not been found.
-type notFound struct {
- Err
-}
-
-// NotFoundf returns an error which satisfies IsNotFound().
-func NotFoundf(format string, args ...interface{}) error {
- return ¬Found{wrap(nil, format, " not found", args...)}
-}
-
-// NewNotFound returns an error which wraps err that satisfies
-// IsNotFound().
-func NewNotFound(err error, msg string) error {
- return ¬Found{wrap(err, msg, "")}
-}
-
-// IsNotFound reports whether err was created with NotFoundf() or
-// NewNotFound().
-func IsNotFound(err error) bool {
- err = Cause(err)
- _, ok := err.(*notFound)
- return ok
-}
-
-// userNotFound represents an error when an inexistent user is looked up.
-type userNotFound struct {
- Err
-}
-
-// UserNotFoundf returns an error which satisfies IsUserNotFound().
-func UserNotFoundf(format string, args ...interface{}) error {
- return &userNotFound{wrap(nil, format, " user not found", args...)}
-}
-
-// NewUserNotFound returns an error which wraps err and satisfies
-// IsUserNotFound().
-func NewUserNotFound(err error, msg string) error {
- return &userNotFound{wrap(err, msg, "")}
-}
-
-// IsUserNotFound reports whether err was created with UserNotFoundf() or
-// NewUserNotFound().
-func IsUserNotFound(err error) bool {
- err = Cause(err)
- _, ok := err.(*userNotFound)
- return ok
-}
-
-// unauthorized represents an error when an operation is unauthorized.
-type unauthorized struct {
- Err
-}
-
-// Unauthorizedf returns an error which satisfies IsUnauthorized().
-func Unauthorizedf(format string, args ...interface{}) error {
- return &unauthorized{wrap(nil, format, "", args...)}
-}
-
-// NewUnauthorized returns an error which wraps err and satisfies
-// IsUnauthorized().
-func NewUnauthorized(err error, msg string) error {
- return &unauthorized{wrap(err, msg, "")}
-}
-
-// IsUnauthorized reports whether err was created with Unauthorizedf() or
-// NewUnauthorized().
-func IsUnauthorized(err error) bool {
- err = Cause(err)
- _, ok := err.(*unauthorized)
- return ok
-}
-
-// notImplemented represents an error when something is not
-// implemented.
-type notImplemented struct {
- Err
-}
-
-// NotImplementedf returns an error which satisfies IsNotImplemented().
-func NotImplementedf(format string, args ...interface{}) error {
- return ¬Implemented{wrap(nil, format, " not implemented", args...)}
-}
-
-// NewNotImplemented returns an error which wraps err and satisfies
-// IsNotImplemented().
-func NewNotImplemented(err error, msg string) error {
- return ¬Implemented{wrap(err, msg, "")}
-}
-
-// IsNotImplemented reports whether err was created with
-// NotImplementedf() or NewNotImplemented().
-func IsNotImplemented(err error) bool {
- err = Cause(err)
- _, ok := err.(*notImplemented)
- return ok
-}
-
-// alreadyExists represents and error when something already exists.
-type alreadyExists struct {
- Err
-}
-
-// AlreadyExistsf returns an error which satisfies IsAlreadyExists().
-func AlreadyExistsf(format string, args ...interface{}) error {
- return &alreadyExists{wrap(nil, format, " already exists", args...)}
-}
-
-// NewAlreadyExists returns an error which wraps err and satisfies
-// IsAlreadyExists().
-func NewAlreadyExists(err error, msg string) error {
- return &alreadyExists{wrap(err, msg, "")}
-}
-
-// IsAlreadyExists reports whether the error was created with
-// AlreadyExistsf() or NewAlreadyExists().
-func IsAlreadyExists(err error) bool {
- err = Cause(err)
- _, ok := err.(*alreadyExists)
- return ok
-}
-
-// notSupported represents an error when something is not supported.
-type notSupported struct {
- Err
-}
-
-// NotSupportedf returns an error which satisfies IsNotSupported().
-func NotSupportedf(format string, args ...interface{}) error {
- return ¬Supported{wrap(nil, format, " not supported", args...)}
-}
-
-// NewNotSupported returns an error which wraps err and satisfies
-// IsNotSupported().
-func NewNotSupported(err error, msg string) error {
- return ¬Supported{wrap(err, msg, "")}
-}
-
-// IsNotSupported reports whether the error was created with
-// NotSupportedf() or NewNotSupported().
-func IsNotSupported(err error) bool {
- err = Cause(err)
- _, ok := err.(*notSupported)
- return ok
-}
-
-// notValid represents an error when something is not valid.
-type notValid struct {
- Err
-}
-
-// NotValidf returns an error which satisfies IsNotValid().
-func NotValidf(format string, args ...interface{}) error {
- return ¬Valid{wrap(nil, format, " not valid", args...)}
-}
-
-// NewNotValid returns an error which wraps err and satisfies IsNotValid().
-func NewNotValid(err error, msg string) error {
- return ¬Valid{wrap(err, msg, "")}
-}
-
-// IsNotValid reports whether the error was created with NotValidf() or
-// NewNotValid().
-func IsNotValid(err error) bool {
- err = Cause(err)
- _, ok := err.(*notValid)
- return ok
-}
-
-// notProvisioned represents an error when something is not yet provisioned.
-type notProvisioned struct {
- Err
-}
-
-// NotProvisionedf returns an error which satisfies IsNotProvisioned().
-func NotProvisionedf(format string, args ...interface{}) error {
- return ¬Provisioned{wrap(nil, format, " not provisioned", args...)}
-}
-
-// NewNotProvisioned returns an error which wraps err that satisfies
-// IsNotProvisioned().
-func NewNotProvisioned(err error, msg string) error {
- return ¬Provisioned{wrap(err, msg, "")}
-}
-
-// IsNotProvisioned reports whether err was created with NotProvisionedf() or
-// NewNotProvisioned().
-func IsNotProvisioned(err error) bool {
- err = Cause(err)
- _, ok := err.(*notProvisioned)
- return ok
-}
-
-// notAssigned represents an error when something is not yet assigned to
-// something else.
-type notAssigned struct {
- Err
-}
-
-// NotAssignedf returns an error which satisfies IsNotAssigned().
-func NotAssignedf(format string, args ...interface{}) error {
- return ¬Assigned{wrap(nil, format, " not assigned", args...)}
-}
-
-// NewNotAssigned returns an error which wraps err that satisfies
-// IsNotAssigned().
-func NewNotAssigned(err error, msg string) error {
- return ¬Assigned{wrap(err, msg, "")}
-}
-
-// IsNotAssigned reports whether err was created with NotAssignedf() or
-// NewNotAssigned().
-func IsNotAssigned(err error) bool {
- err = Cause(err)
- _, ok := err.(*notAssigned)
- return ok
-}
-
-// badRequest represents an error when a request has bad parameters.
-type badRequest struct {
- Err
-}
-
-// BadRequestf returns an error which satisfies IsBadRequest().
-func BadRequestf(format string, args ...interface{}) error {
- return &badRequest{wrap(nil, format, "", args...)}
-}
-
-// NewBadRequest returns an error which wraps err that satisfies
-// IsBadRequest().
-func NewBadRequest(err error, msg string) error {
- return &badRequest{wrap(err, msg, "")}
-}
-
-// IsBadRequest reports whether err was created with BadRequestf() or
-// NewBadRequest().
-func IsBadRequest(err error) bool {
- err = Cause(err)
- _, ok := err.(*badRequest)
- return ok
-}
-
-// methodNotAllowed represents an error when an HTTP request
-// is made with an inappropriate method.
-type methodNotAllowed struct {
- Err
-}
-
-// MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed().
-func MethodNotAllowedf(format string, args ...interface{}) error {
- return &methodNotAllowed{wrap(nil, format, "", args...)}
-}
-
-// NewMethodNotAllowed returns an error which wraps err that satisfies
-// IsMethodNotAllowed().
-func NewMethodNotAllowed(err error, msg string) error {
- return &methodNotAllowed{wrap(err, msg, "")}
-}
-
-// IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or
-// NewMethodNotAllowed().
-func IsMethodNotAllowed(err error) bool {
- err = Cause(err)
- _, ok := err.(*methodNotAllowed)
- return ok
-}
-
-// forbidden represents an error when a request cannot be completed because of
-// missing privileges
-type forbidden struct {
- Err
-}
-
-// Forbiddenf returns an error which satistifes IsForbidden()
-func Forbiddenf(format string, args ...interface{}) error {
- return &forbidden{wrap(nil, format, "", args...)}
-}
-
-// NewForbidden returns an error which wraps err that satisfies
-// IsForbidden().
-func NewForbidden(err error, msg string) error {
- return &forbidden{wrap(err, msg, "")}
-}
-
-// IsForbidden reports whether err was created with Forbiddenf() or
-// NewForbidden().
-func IsForbidden(err error) bool {
- err = Cause(err)
- _, ok := err.(*forbidden)
- return ok
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/functions.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/functions.go
deleted file mode 100644
index f86b09b..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/functions.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors
-
-import (
- "fmt"
- "strings"
-)
-
-// New is a drop in replacement for the standard library errors module that records
-// the location that the error is created.
-//
-// For example:
-// return errors.New("validation failed")
-//
-func New(message string) error {
- err := &Err{message: message}
- err.SetLocation(1)
- return err
-}
-
-// Errorf creates a new annotated error and records the location that the
-// error is created. This should be a drop in replacement for fmt.Errorf.
-//
-// For example:
-// return errors.Errorf("validation failed: %s", message)
-//
-func Errorf(format string, args ...interface{}) error {
- err := &Err{message: fmt.Sprintf(format, args...)}
- err.SetLocation(1)
- return err
-}
-
-// Trace adds the location of the Trace call to the stack. The Cause of the
-// resulting error is the same as the error parameter. If the other error is
-// nil, the result will be nil.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// return errors.Trace(err)
-// }
-//
-func Trace(other error) error {
- if other == nil {
- return nil
- }
- err := &Err{previous: other, cause: Cause(other)}
- err.SetLocation(1)
- return err
-}
-
-// Annotate is used to add extra context to an existing error. The location of
-// the Annotate call is recorded with the annotations. The file, line and
-// function are also recorded.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// return errors.Annotate(err, "failed to frombulate")
-// }
-//
-func Annotate(other error, message string) error {
- if other == nil {
- return nil
- }
- err := &Err{
- previous: other,
- cause: Cause(other),
- message: message,
- }
- err.SetLocation(1)
- return err
-}
-
-// Annotatef is used to add extra context to an existing error. The location of
-// the Annotate call is recorded with the annotations. The file, line and
-// function are also recorded.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// return errors.Annotatef(err, "failed to frombulate the %s", arg)
-// }
-//
-func Annotatef(other error, format string, args ...interface{}) error {
- if other == nil {
- return nil
- }
- err := &Err{
- previous: other,
- cause: Cause(other),
- message: fmt.Sprintf(format, args...),
- }
- err.SetLocation(1)
- return err
-}
-
-// DeferredAnnotatef annotates the given error (when it is not nil) with the given
-// format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
-// does nothing. This method is used in a defer statement in order to annotate any
-// resulting error with the same message.
-//
-// For example:
-//
-// defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
-//
-func DeferredAnnotatef(err *error, format string, args ...interface{}) {
- if *err == nil {
- return
- }
- newErr := &Err{
- message: fmt.Sprintf(format, args...),
- cause: Cause(*err),
- previous: *err,
- }
- newErr.SetLocation(1)
- *err = newErr
-}
-
-// Wrap changes the Cause of the error. The location of the Wrap call is also
-// stored in the error stack.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// newErr := &packageError{"more context", private_value}
-// return errors.Wrap(err, newErr)
-// }
-//
-func Wrap(other, newDescriptive error) error {
- err := &Err{
- previous: other,
- cause: newDescriptive,
- }
- err.SetLocation(1)
- return err
-}
-
-// Wrapf changes the Cause of the error, and adds an annotation. The location
-// of the Wrap call is also stored in the error stack.
-//
-// For example:
-// if err := SomeFunc(); err != nil {
-// return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
-// }
-//
-func Wrapf(other, newDescriptive error, format string, args ...interface{}) error {
- err := &Err{
- message: fmt.Sprintf(format, args...),
- previous: other,
- cause: newDescriptive,
- }
- err.SetLocation(1)
- return err
-}
-
-// Mask masks the given error with the given format string and arguments (like
-// fmt.Sprintf), returning a new error that maintains the error stack, but
-// hides the underlying error type. The error string still contains the full
-// annotations. If you want to hide the annotations, call Wrap.
-func Maskf(other error, format string, args ...interface{}) error {
- if other == nil {
- return nil
- }
- err := &Err{
- message: fmt.Sprintf(format, args...),
- previous: other,
- }
- err.SetLocation(1)
- return err
-}
-
-// Mask hides the underlying error type, and records the location of the masking.
-func Mask(other error) error {
- if other == nil {
- return nil
- }
- err := &Err{
- previous: other,
- }
- err.SetLocation(1)
- return err
-}
-
-// Cause returns the cause of the given error. This will be either the
-// original error, or the result of a Wrap or Mask call.
-//
-// Cause is the usual way to diagnose errors that may have been wrapped by
-// the other errors functions.
-func Cause(err error) error {
- var diag error
- if err, ok := err.(causer); ok {
- diag = err.Cause()
- }
- if diag != nil {
- return diag
- }
- return err
-}
-
-type causer interface {
- Cause() error
-}
-
-type wrapper interface {
- // Message returns the top level error message,
- // not including the message from the Previous
- // error.
- Message() string
-
- // Underlying returns the Previous error, or nil
- // if there is none.
- Underlying() error
-}
-
-type locationer interface {
- Location() (string, int)
-}
-
-var (
- _ wrapper = (*Err)(nil)
- _ locationer = (*Err)(nil)
- _ causer = (*Err)(nil)
-)
-
-// Details returns information about the stack of errors wrapped by err, in
-// the format:
-//
-// [{filename:99: error one} {otherfile:55: cause of error one}]
-//
-// This is a terse alternative to ErrorStack as it returns a single line.
-func Details(err error) string {
- if err == nil {
- return "[]"
- }
- var s []byte
- s = append(s, '[')
- for {
- s = append(s, '{')
- if err, ok := err.(locationer); ok {
- file, line := err.Location()
- if file != "" {
- s = append(s, fmt.Sprintf("%s:%d", file, line)...)
- s = append(s, ": "...)
- }
- }
- if cerr, ok := err.(wrapper); ok {
- s = append(s, cerr.Message()...)
- err = cerr.Underlying()
- } else {
- s = append(s, err.Error()...)
- err = nil
- }
- s = append(s, '}')
- if err == nil {
- break
- }
- s = append(s, ' ')
- }
- s = append(s, ']')
- return string(s)
-}
-
-// ErrorStack returns a string representation of the annotated error. If the
-// error passed as the parameter is not an annotated error, the result is
-// simply the result of the Error() method on that error.
-//
-// If the error is an annotated error, a multi-line string is returned where
-// each line represents one entry in the annotation stack. The full filename
-// from the call stack is used in the output.
-//
-// first error
-// github.com/juju/errors/annotation_test.go:193:
-// github.com/juju/errors/annotation_test.go:194: annotation
-// github.com/juju/errors/annotation_test.go:195:
-// github.com/juju/errors/annotation_test.go:196: more context
-// github.com/juju/errors/annotation_test.go:197:
-func ErrorStack(err error) string {
- return strings.Join(errorStack(err), "\n")
-}
-
-func errorStack(err error) []string {
- if err == nil {
- return nil
- }
-
- // We want the first error first
- var lines []string
- for {
- var buff []byte
- if err, ok := err.(locationer); ok {
- file, line := err.Location()
- // Strip off the leading GOPATH/src path elements.
- file = trimGoPath(file)
- if file != "" {
- buff = append(buff, fmt.Sprintf("%s:%d", file, line)...)
- buff = append(buff, ": "...)
- }
- }
- if cerr, ok := err.(wrapper); ok {
- message := cerr.Message()
- buff = append(buff, message...)
- // If there is a cause for this error, and it is different to the cause
- // of the underlying error, then output the error string in the stack trace.
- var cause error
- if err1, ok := err.(causer); ok {
- cause = err1.Cause()
- }
- err = cerr.Underlying()
- if cause != nil && !sameError(Cause(err), cause) {
- if message != "" {
- buff = append(buff, ": "...)
- }
- buff = append(buff, cause.Error()...)
- }
- } else {
- buff = append(buff, err.Error()...)
- err = nil
- }
- lines = append(lines, string(buff))
- if err == nil {
- break
- }
- }
- // reverse the lines to get the original error, which was at the end of
- // the list, back to the start.
- var result []string
- for i := len(lines); i > 0; i-- {
- result = append(result, lines[i-1])
- }
- return result
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/path.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/path.go
deleted file mode 100644
index a7b726a..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/juju/errors/path.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2013, 2014 Canonical Ltd.
-// Licensed under the LGPLv3, see LICENCE file for details.
-
-package errors
-
-import (
- "runtime"
- "strings"
-)
-
-// prefixSize is used internally to trim the user specific path from the
-// front of the returned filenames from the runtime call stack.
-var prefixSize int
-
-// goPath is the deduced path based on the location of this file as compiled.
-var goPath string
-
-func init() {
- _, file, _, ok := runtime.Caller(0)
- if file == "?" {
- return
- }
- if ok {
- // We know that the end of the file should be:
- // github.com/juju/errors/path.go
- size := len(file)
- suffix := len("github.com/juju/errors/path.go")
- goPath = file[:size-suffix]
- prefixSize = len(goPath)
- }
-}
-
-func trimGoPath(filename string) string {
- if strings.HasPrefix(filename, goPath) {
- return filename[prefixSize:]
- }
- return filename
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/benchmark.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/benchmark.go
deleted file mode 100644
index 46ea9dc..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/benchmark.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright (c) 2012 The Go Authors. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package check
-
-import (
- "fmt"
- "runtime"
- "time"
-)
-
-var memStats runtime.MemStats
-
-// testingB is a type passed to Benchmark functions to manage benchmark
-// timing and to specify the number of iterations to run.
-type timer struct {
- start time.Time // Time test or benchmark started
- duration time.Duration
- N int
- bytes int64
- timerOn bool
- benchTime time.Duration
- // The initial states of memStats.Mallocs and memStats.TotalAlloc.
- startAllocs uint64
- startBytes uint64
- // The net total of this test after being run.
- netAllocs uint64
- netBytes uint64
-}
-
-// StartTimer starts timing a test. This function is called automatically
-// before a benchmark starts, but it can also used to resume timing after
-// a call to StopTimer.
-func (c *C) StartTimer() {
- if !c.timerOn {
- c.start = time.Now()
- c.timerOn = true
-
- runtime.ReadMemStats(&memStats)
- c.startAllocs = memStats.Mallocs
- c.startBytes = memStats.TotalAlloc
- }
-}
-
-// StopTimer stops timing a test. This can be used to pause the timer
-// while performing complex initialization that you don't
-// want to measure.
-func (c *C) StopTimer() {
- if c.timerOn {
- c.duration += time.Now().Sub(c.start)
- c.timerOn = false
- runtime.ReadMemStats(&memStats)
- c.netAllocs += memStats.Mallocs - c.startAllocs
- c.netBytes += memStats.TotalAlloc - c.startBytes
- }
-}
-
-// ResetTimer sets the elapsed benchmark time to zero.
-// It does not affect whether the timer is running.
-func (c *C) ResetTimer() {
- if c.timerOn {
- c.start = time.Now()
- runtime.ReadMemStats(&memStats)
- c.startAllocs = memStats.Mallocs
- c.startBytes = memStats.TotalAlloc
- }
- c.duration = 0
- c.netAllocs = 0
- c.netBytes = 0
-}
-
-// SetBytes informs the number of bytes that the benchmark processes
-// on each iteration. If this is called in a benchmark it will also
-// report MB/s.
-func (c *C) SetBytes(n int64) {
- c.bytes = n
-}
-
-func (c *C) nsPerOp() int64 {
- if c.N <= 0 {
- return 0
- }
- return c.duration.Nanoseconds() / int64(c.N)
-}
-
-func (c *C) mbPerSec() float64 {
- if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
- return 0
- }
- return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
-}
-
-func (c *C) timerString() string {
- if c.N <= 0 {
- return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9)
- }
- mbs := c.mbPerSec()
- mb := ""
- if mbs != 0 {
- mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
- }
- nsop := c.nsPerOp()
- ns := fmt.Sprintf("%10d ns/op", nsop)
- if c.N > 0 && nsop < 100 {
- // The format specifiers here make sure that
- // the ones digits line up for all three possible formats.
- if nsop < 10 {
- ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
- } else {
- ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
- }
- }
- memStats := ""
- if c.benchMem {
- allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N))
- allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N))
- memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs)
- }
- return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats)
-}
-
-func min(x, y int) int {
- if x > y {
- return y
- }
- return x
-}
-
-func max(x, y int) int {
- if x < y {
- return y
- }
- return x
-}
-
-// roundDown10 rounds a number down to the nearest power of 10.
-func roundDown10(n int) int {
- var tens = 0
- // tens = floor(log_10(n))
- for n > 10 {
- n = n / 10
- tens++
- }
- // result = 10^tens
- result := 1
- for i := 0; i < tens; i++ {
- result *= 10
- }
- return result
-}
-
-// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
-func roundUp(n int) int {
- base := roundDown10(n)
- if n < (2 * base) {
- return 2 * base
- }
- if n < (5 * base) {
- return 5 * base
- }
- return 10 * base
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/check.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/check.go
deleted file mode 100644
index fc535bc..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/check.go
+++ /dev/null
@@ -1,980 +0,0 @@
-// Package check is a rich testing extension for Go's testing package.
-//
-// For details about the project, see:
-//
-// http://labix.org/gocheck
-//
-package check
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "math/rand"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// -----------------------------------------------------------------------
-// Internal type which deals with suite method calling.
-
-const (
- fixtureKd = iota
- testKd
-)
-
-type funcKind int
-
-const (
- succeededSt = iota
- failedSt
- skippedSt
- panickedSt
- fixturePanickedSt
- missedSt
-)
-
-type funcStatus uint32
-
-// A method value can't reach its own Method structure.
-type methodType struct {
- reflect.Value
- Info reflect.Method
-}
-
-func newMethod(receiver reflect.Value, i int) *methodType {
- return &methodType{receiver.Method(i), receiver.Type().Method(i)}
-}
-
-func (method *methodType) PC() uintptr {
- return method.Info.Func.Pointer()
-}
-
-func (method *methodType) suiteName() string {
- t := method.Info.Type.In(0)
- if t.Kind() == reflect.Ptr {
- t = t.Elem()
- }
- return t.Name()
-}
-
-func (method *methodType) String() string {
- return method.suiteName() + "." + method.Info.Name
-}
-
-func (method *methodType) matches(re *regexp.Regexp) bool {
- return (re.MatchString(method.Info.Name) ||
- re.MatchString(method.suiteName()) ||
- re.MatchString(method.String()))
-}
-
-type C struct {
- method *methodType
- kind funcKind
- testName string
- _status funcStatus
- logb *logger
- logw io.Writer
- done chan *C
- parallel chan *C
- reason string
- mustFail bool
- tempDir *tempDir
- benchMem bool
- startTime time.Time
- timer
-}
-
-func (c *C) status() funcStatus {
- return funcStatus(atomic.LoadUint32((*uint32)(&c._status)))
-}
-
-func (c *C) setStatus(s funcStatus) {
- atomic.StoreUint32((*uint32)(&c._status), uint32(s))
-}
-
-func (c *C) stopNow() {
- runtime.Goexit()
-}
-
-// logger is a concurrency safe byte.Buffer
-type logger struct {
- sync.Mutex
- writer bytes.Buffer
-}
-
-func (l *logger) Write(buf []byte) (int, error) {
- l.Lock()
- defer l.Unlock()
- return l.writer.Write(buf)
-}
-
-func (l *logger) WriteTo(w io.Writer) (int64, error) {
- l.Lock()
- defer l.Unlock()
- return l.writer.WriteTo(w)
-}
-
-func (l *logger) String() string {
- l.Lock()
- defer l.Unlock()
- return l.writer.String()
-}
-
-// -----------------------------------------------------------------------
-// Handling of temporary files and directories.
-
-type tempDir struct {
- sync.Mutex
- path string
- counter int
-}
-
-func (td *tempDir) newPath() string {
- td.Lock()
- defer td.Unlock()
- if td.path == "" {
- var err error
- for i := 0; i != 100; i++ {
- path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int())
- if err = os.Mkdir(path, 0700); err == nil {
- td.path = path
- break
- }
- }
- if td.path == "" {
- panic("Couldn't create temporary directory: " + err.Error())
- }
- }
- result := filepath.Join(td.path, strconv.Itoa(td.counter))
- td.counter += 1
- return result
-}
-
-func (td *tempDir) removeAll() {
- td.Lock()
- defer td.Unlock()
- if td.path != "" {
- err := os.RemoveAll(td.path)
- if err != nil {
- fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error())
- }
- }
-}
-
-// Create a new temporary directory which is automatically removed after
-// the suite finishes running.
-func (c *C) MkDir() string {
- path := c.tempDir.newPath()
- if err := os.Mkdir(path, 0700); err != nil {
- panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error()))
- }
- return path
-}
-
-// -----------------------------------------------------------------------
-// Low-level logging functions.
-
-func (c *C) log(args ...interface{}) {
- c.writeLog([]byte(fmt.Sprint(args...) + "\n"))
-}
-
-func (c *C) logf(format string, args ...interface{}) {
- c.writeLog([]byte(fmt.Sprintf(format+"\n", args...)))
-}
-
-func (c *C) logNewLine() {
- c.writeLog([]byte{'\n'})
-}
-
-func (c *C) writeLog(buf []byte) {
- c.logb.Write(buf)
- if c.logw != nil {
- c.logw.Write(buf)
- }
-}
-
-func hasStringOrError(x interface{}) (ok bool) {
- _, ok = x.(fmt.Stringer)
- if ok {
- return
- }
- _, ok = x.(error)
- return
-}
-
-func (c *C) logValue(label string, value interface{}) {
- if label == "" {
- if hasStringOrError(value) {
- c.logf("... %#v (%q)", value, value)
- } else {
- c.logf("... %#v", value)
- }
- } else if value == nil {
- c.logf("... %s = nil", label)
- } else {
- if hasStringOrError(value) {
- fv := fmt.Sprintf("%#v", value)
- qv := fmt.Sprintf("%q", value)
- if fv != qv {
- c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv)
- return
- }
- }
- if s, ok := value.(string); ok && isMultiLine(s) {
- c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value))
- c.logMultiLine(s)
- } else {
- c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value)
- }
- }
-}
-
-func (c *C) logMultiLine(s string) {
- b := make([]byte, 0, len(s)*2)
- i := 0
- n := len(s)
- for i < n {
- j := i + 1
- for j < n && s[j-1] != '\n' {
- j++
- }
- b = append(b, "... "...)
- b = strconv.AppendQuote(b, s[i:j])
- if j < n {
- b = append(b, " +"...)
- }
- b = append(b, '\n')
- i = j
- }
- c.writeLog(b)
-}
-
-func isMultiLine(s string) bool {
- for i := 0; i+1 < len(s); i++ {
- if s[i] == '\n' {
- return true
- }
- }
- return false
-}
-
-func (c *C) logString(issue string) {
- c.log("... ", issue)
-}
-
-func (c *C) logCaller(skip int) {
- // This is a bit heavier than it ought to be.
- skip += 1 // Our own frame.
- pc, callerFile, callerLine, ok := runtime.Caller(skip)
- if !ok {
- return
- }
- var testFile string
- var testLine int
- testFunc := runtime.FuncForPC(c.method.PC())
- if runtime.FuncForPC(pc) != testFunc {
- for {
- skip += 1
- if pc, file, line, ok := runtime.Caller(skip); ok {
- // Note that the test line may be different on
- // distinct calls for the same test. Showing
- // the "internal" line is helpful when debugging.
- if runtime.FuncForPC(pc) == testFunc {
- testFile, testLine = file, line
- break
- }
- } else {
- break
- }
- }
- }
- if testFile != "" && (testFile != callerFile || testLine != callerLine) {
- c.logCode(testFile, testLine)
- }
- c.logCode(callerFile, callerLine)
-}
-
-func (c *C) logCode(path string, line int) {
- c.logf("%s:%d:", nicePath(path), line)
- code, err := printLine(path, line)
- if code == "" {
- code = "..." // XXX Open the file and take the raw line.
- if err != nil {
- code += err.Error()
- }
- }
- c.log(indent(code, " "))
-}
-
-var valueGo = filepath.Join("reflect", "value.go")
-var asmGo = filepath.Join("runtime", "asm_")
-
-func (c *C) logPanic(skip int, value interface{}) {
- skip++ // Our own frame.
- initialSkip := skip
- for ; ; skip++ {
- if pc, file, line, ok := runtime.Caller(skip); ok {
- if skip == initialSkip {
- c.logf("... Panic: %s (PC=0x%X)\n", value, pc)
- }
- name := niceFuncName(pc)
- path := nicePath(file)
- if strings.Contains(path, "/gopkg.in/check.v") {
- continue
- }
- if name == "Value.call" && strings.HasSuffix(path, valueGo) {
- continue
- }
- if (name == "call16" || name == "call32") && strings.Contains(path, asmGo) {
- continue
- }
- c.logf("%s:%d\n in %s", nicePath(file), line, name)
- } else {
- break
- }
- }
-}
-
-func (c *C) logSoftPanic(issue string) {
- c.log("... Panic: ", issue)
-}
-
-func (c *C) logArgPanic(method *methodType, expectedType string) {
- c.logf("... Panic: %s argument should be %s",
- niceFuncName(method.PC()), expectedType)
-}
-
-// -----------------------------------------------------------------------
-// Some simple formatting helpers.
-
-var initWD, initWDErr = os.Getwd()
-
-func init() {
- if initWDErr == nil {
- initWD = strings.Replace(initWD, "\\", "/", -1) + "/"
- }
-}
-
-func nicePath(path string) string {
- if initWDErr == nil {
- if strings.HasPrefix(path, initWD) {
- return path[len(initWD):]
- }
- }
- return path
-}
-
-func niceFuncPath(pc uintptr) string {
- function := runtime.FuncForPC(pc)
- if function != nil {
- filename, line := function.FileLine(pc)
- return fmt.Sprintf("%s:%d", nicePath(filename), line)
- }
- return ""
-}
-
-func niceFuncName(pc uintptr) string {
- function := runtime.FuncForPC(pc)
- if function != nil {
- name := path.Base(function.Name())
- if i := strings.Index(name, "."); i > 0 {
- name = name[i+1:]
- }
- if strings.HasPrefix(name, "(*") {
- if i := strings.Index(name, ")"); i > 0 {
- name = name[2:i] + name[i+1:]
- }
- }
- if i := strings.LastIndex(name, ".*"); i != -1 {
- name = name[:i] + "." + name[i+2:]
- }
- if i := strings.LastIndex(name, "·"); i != -1 {
- name = name[:i] + "." + name[i+2:]
- }
- return name
- }
- return ""
-}
-
-// -----------------------------------------------------------------------
-// Result tracker to aggregate call results.
-
-type Result struct {
- Succeeded int
- Failed int
- Skipped int
- Panicked int
- FixturePanicked int
- ExpectedFailures int
- Missed int // Not even tried to run, related to a panic in the fixture.
- RunError error // Houston, we've got a problem.
- WorkDir string // If KeepWorkDir is true
-}
-
-type resultTracker struct {
- result Result
- _lastWasProblem bool
- _waiting int
- _missed int
- _expectChan chan *C
- _doneChan chan *C
- _stopChan chan bool
-}
-
-func newResultTracker() *resultTracker {
- return &resultTracker{_expectChan: make(chan *C), // Synchronous
- _doneChan: make(chan *C, 32), // Asynchronous
- _stopChan: make(chan bool)} // Synchronous
-}
-
-func (tracker *resultTracker) start() {
- go tracker._loopRoutine()
-}
-
-func (tracker *resultTracker) waitAndStop() {
- <-tracker._stopChan
-}
-
-func (tracker *resultTracker) expectCall(c *C) {
- tracker._expectChan <- c
-}
-
-func (tracker *resultTracker) callDone(c *C) {
- tracker._doneChan <- c
-}
-
-func (tracker *resultTracker) _loopRoutine() {
- for {
- var c *C
- if tracker._waiting > 0 {
- // Calls still running. Can't stop.
- select {
- // XXX Reindent this (not now to make diff clear)
- case c = <-tracker._expectChan:
- tracker._waiting += 1
- case c = <-tracker._doneChan:
- tracker._waiting -= 1
- switch c.status() {
- case succeededSt:
- if c.kind == testKd {
- if c.mustFail {
- tracker.result.ExpectedFailures++
- } else {
- tracker.result.Succeeded++
- }
- }
- case failedSt:
- tracker.result.Failed++
- case panickedSt:
- if c.kind == fixtureKd {
- tracker.result.FixturePanicked++
- } else {
- tracker.result.Panicked++
- }
- case fixturePanickedSt:
- // Track it as missed, since the panic
- // was on the fixture, not on the test.
- tracker.result.Missed++
- case missedSt:
- tracker.result.Missed++
- case skippedSt:
- if c.kind == testKd {
- tracker.result.Skipped++
- }
- }
- }
- } else {
- // No calls. Can stop, but no done calls here.
- select {
- case tracker._stopChan <- true:
- return
- case c = <-tracker._expectChan:
- tracker._waiting += 1
- case c = <-tracker._doneChan:
- panic("Tracker got an unexpected done call.")
- }
- }
- }
-}
-
-// -----------------------------------------------------------------------
-// The underlying suite runner.
-
-type suiteRunner struct {
- suite interface{}
- setUpSuite, tearDownSuite *methodType
- setUpTest, tearDownTest *methodType
- tests []*methodType
- tracker *resultTracker
- tempDir *tempDir
- keepDir bool
- output *outputWriter
- reportedProblemLast bool
- benchTime time.Duration
- benchMem bool
-}
-
-type RunConf struct {
- Output io.Writer
- Stream bool
- Verbose bool
- Filter string
- Benchmark bool
- BenchmarkTime time.Duration // Defaults to 1 second
- BenchmarkMem bool
- KeepWorkDir bool
- Exclude string
-}
-
-// Create a new suiteRunner able to run all methods in the given suite.
-func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
- var conf RunConf
- if runConf != nil {
- conf = *runConf
- }
- if conf.Output == nil {
- conf.Output = os.Stdout
- }
- if conf.Benchmark {
- conf.Verbose = true
- }
-
- suiteType := reflect.TypeOf(suite)
- suiteNumMethods := suiteType.NumMethod()
- suiteValue := reflect.ValueOf(suite)
-
- runner := &suiteRunner{
- suite: suite,
- output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose),
- tracker: newResultTracker(),
- benchTime: conf.BenchmarkTime,
- benchMem: conf.BenchmarkMem,
- tempDir: &tempDir{},
- keepDir: conf.KeepWorkDir,
- tests: make([]*methodType, 0, suiteNumMethods),
- }
- if runner.benchTime == 0 {
- runner.benchTime = 1 * time.Second
- }
-
- var filterRegexp *regexp.Regexp
- if conf.Filter != "" {
- if regexp, err := regexp.Compile(conf.Filter); err != nil {
- msg := "Bad filter expression: " + err.Error()
- runner.tracker.result.RunError = errors.New(msg)
- return runner
- } else {
- filterRegexp = regexp
- }
- }
-
- var excludeRegexp *regexp.Regexp
- if conf.Exclude != "" {
- if regexp, err := regexp.Compile(conf.Exclude); err != nil {
- msg := "Bad exclude expression: " + err.Error()
- runner.tracker.result.RunError = errors.New(msg)
- return runner
- } else {
- excludeRegexp = regexp
- }
- }
-
- for i := 0; i != suiteNumMethods; i++ {
- method := newMethod(suiteValue, i)
- switch method.Info.Name {
- case "SetUpSuite":
- runner.setUpSuite = method
- case "TearDownSuite":
- runner.tearDownSuite = method
- case "SetUpTest":
- runner.setUpTest = method
- case "TearDownTest":
- runner.tearDownTest = method
- default:
- prefix := "Test"
- if conf.Benchmark {
- prefix = "Benchmark"
- }
- if !strings.HasPrefix(method.Info.Name, prefix) {
- continue
- }
- if filterRegexp == nil || method.matches(filterRegexp) {
- if excludeRegexp == nil || !method.matches(excludeRegexp) {
- runner.tests = append(runner.tests, method)
- }
- }
- }
- }
- return runner
-}
-
-// Run all methods in the given suite.
-func (runner *suiteRunner) run() *Result {
- if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {
- runner.tracker.start()
- if runner.checkFixtureArgs() {
- c := runner.runFixture(runner.setUpSuite, "", nil)
- if c == nil || c.status() == succeededSt {
- var delayedC []*C
- for i := 0; i != len(runner.tests); i++ {
- c := runner.forkTest(runner.tests[i])
- select {
- case <-c.done:
- case <-c.parallel:
- delayedC = append(delayedC, c)
- }
- if c.status() == fixturePanickedSt {
- runner.skipTests(missedSt, runner.tests[i+1:])
- break
- }
- }
- // Wait those parallel tests finish.
- for _, delayed := range delayedC {
- <-delayed.done
- }
- } else if c != nil && c.status() == skippedSt {
- runner.skipTests(skippedSt, runner.tests)
- } else {
- runner.skipTests(missedSt, runner.tests)
- }
- runner.runFixture(runner.tearDownSuite, "", nil)
- } else {
- runner.skipTests(missedSt, runner.tests)
- }
- runner.tracker.waitAndStop()
- if runner.keepDir {
- runner.tracker.result.WorkDir = runner.tempDir.path
- } else {
- runner.tempDir.removeAll()
- }
- }
- return &runner.tracker.result
-}
-
-// Create a call object with the given suite method, and fork a
-// goroutine with the provided dispatcher for running it.
-func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
- var logw io.Writer
- if runner.output.Stream {
- logw = runner.output
- }
- if logb == nil {
- logb = new(logger)
- }
- c := &C{
- method: method,
- kind: kind,
- testName: testName,
- logb: logb,
- logw: logw,
- tempDir: runner.tempDir,
- done: make(chan *C, 1),
- parallel: make(chan *C, 1),
- timer: timer{benchTime: runner.benchTime},
- startTime: time.Now(),
- benchMem: runner.benchMem,
- }
- runner.tracker.expectCall(c)
- go (func() {
- runner.reportCallStarted(c)
- defer runner.callDone(c)
- dispatcher(c)
- })()
- return c
-}
-
-// Same as forkCall(), but wait for call to finish before returning.
-func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
- c := runner.forkCall(method, kind, testName, logb, dispatcher)
- <-c.done
- return c
-}
-
-// Handle a finished call. If there were any panics, update the call status
-// accordingly. Then, mark the call as done and report to the tracker.
-func (runner *suiteRunner) callDone(c *C) {
- value := recover()
- if value != nil {
- switch v := value.(type) {
- case *fixturePanic:
- if v.status == skippedSt {
- c.setStatus(skippedSt)
- } else {
- c.logSoftPanic("Fixture has panicked (see related PANIC)")
- c.setStatus(fixturePanickedSt)
- }
- default:
- c.logPanic(1, value)
- c.setStatus(panickedSt)
- }
- }
- if c.mustFail {
- switch c.status() {
- case failedSt:
- c.setStatus(succeededSt)
- case succeededSt:
- c.setStatus(failedSt)
- c.logString("Error: Test succeeded, but was expected to fail")
- c.logString("Reason: " + c.reason)
- }
- }
-
- runner.reportCallDone(c)
- c.done <- c
-}
-
-// Runs a fixture call synchronously. The fixture will still be run in a
-// goroutine like all suite methods, but this method will not return
-// while the fixture goroutine is not done, because the fixture must be
-// run in a desired order.
-func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C {
- if method != nil {
- c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) {
- c.ResetTimer()
- c.StartTimer()
- defer c.StopTimer()
- c.method.Call([]reflect.Value{reflect.ValueOf(c)})
- })
- return c
- }
- return nil
-}
-
-// Run the fixture method with runFixture(), but panic with a fixturePanic{}
-// in case the fixture method panics. This makes it easier to track the
-// fixture panic together with other call panics within forkTest().
-func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C {
- if skipped != nil && *skipped {
- return nil
- }
- c := runner.runFixture(method, testName, logb)
- if c != nil && c.status() != succeededSt {
- if skipped != nil {
- *skipped = c.status() == skippedSt
- }
- panic(&fixturePanic{c.status(), method})
- }
- return c
-}
-
-type fixturePanic struct {
- status funcStatus
- method *methodType
-}
-
-// Run the suite test method, together with the test-specific fixture,
-// asynchronously.
-func (runner *suiteRunner) forkTest(method *methodType) *C {
- testName := method.String()
- return runner.forkCall(method, testKd, testName, nil, func(c *C) {
- var skipped bool
- defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped)
- defer c.StopTimer()
- benchN := 1
- for {
- runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped)
- mt := c.method.Type()
- if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) {
- // Rather than a plain panic, provide a more helpful message when
- // the argument type is incorrect.
- c.setStatus(panickedSt)
- c.logArgPanic(c.method, "*check.C")
- return
- }
- if strings.HasPrefix(c.method.Info.Name, "Test") {
- c.ResetTimer()
- c.StartTimer()
- c.method.Call([]reflect.Value{reflect.ValueOf(c)})
- return
- }
- if !strings.HasPrefix(c.method.Info.Name, "Benchmark") {
- panic("unexpected method prefix: " + c.method.Info.Name)
- }
-
- runtime.GC()
- c.N = benchN
- c.ResetTimer()
- c.StartTimer()
- c.method.Call([]reflect.Value{reflect.ValueOf(c)})
- c.StopTimer()
- if c.status() != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 {
- return
- }
- perOpN := int(1e9)
- if c.nsPerOp() != 0 {
- perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp())
- }
-
- // Logic taken from the stock testing package:
- // - Run more iterations than we think we'll need for a second (1.5x).
- // - Don't grow too fast in case we had timing errors previously.
- // - Be sure to run at least one more than last time.
- benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1)
- benchN = roundUp(benchN)
-
- skipped = true // Don't run the deferred one if this panics.
- runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil)
- skipped = false
- }
- })
-}
-
-// Same as forkTest(), but wait for the test to finish before returning.
-func (runner *suiteRunner) runTest(method *methodType) *C {
- c := runner.forkTest(method)
- <-c.done
- return c
-}
-
-// Helper to mark tests as skipped or missed. A bit heavy for what
-// it does, but it enables homogeneous handling of tracking, including
-// nice verbose output.
-func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) {
- for _, method := range methods {
- runner.runFunc(method, testKd, "", nil, func(c *C) {
- c.setStatus(status)
- })
- }
-}
-
-// Verify if the fixture arguments are *check.C. In case of errors,
-// log the error as a panic in the fixture method call, and return false.
-func (runner *suiteRunner) checkFixtureArgs() bool {
- succeeded := true
- argType := reflect.TypeOf(&C{})
- for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} {
- if method != nil {
- mt := method.Type()
- if mt.NumIn() != 1 || mt.In(0) != argType {
- succeeded = false
- runner.runFunc(method, fixtureKd, "", nil, func(c *C) {
- c.logArgPanic(method, "*check.C")
- c.setStatus(panickedSt)
- })
- }
- }
- }
- return succeeded
-}
-
-func (runner *suiteRunner) reportCallStarted(c *C) {
- runner.output.WriteCallStarted("START", c)
-}
-
-func (runner *suiteRunner) reportCallDone(c *C) {
- runner.tracker.callDone(c)
- switch c.status() {
- case succeededSt:
- if c.mustFail {
- runner.output.WriteCallSuccess("FAIL EXPECTED", c)
- } else {
- runner.output.WriteCallSuccess("PASS", c)
- }
- case skippedSt:
- runner.output.WriteCallSuccess("SKIP", c)
- case failedSt:
- runner.output.WriteCallProblem("FAIL", c)
- case panickedSt:
- runner.output.WriteCallProblem("PANIC", c)
- case fixturePanickedSt:
- // That's a testKd call reporting that its fixture
- // has panicked. The fixture call which caused the
- // panic itself was tracked above. We'll report to
- // aid debugging.
- runner.output.WriteCallProblem("PANIC", c)
- case missedSt:
- runner.output.WriteCallSuccess("MISS", c)
- }
-}
-
-// -----------------------------------------------------------------------
-// Output writer manages atomic output writing according to settings.
-
-type outputWriter struct {
- m sync.Mutex
- writer io.Writer
- wroteCallProblemLast bool
- Stream bool
- Verbose bool
-}
-
-func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
- return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
-}
-
-func (ow *outputWriter) Write(content []byte) (n int, err error) {
- ow.m.Lock()
- n, err = ow.writer.Write(content)
- ow.m.Unlock()
- return
-}
-
-func (ow *outputWriter) WriteCallStarted(label string, c *C) {
- if ow.Stream {
- header := renderCallHeader(label, c, "", "\n")
- ow.m.Lock()
- ow.writer.Write([]byte(header))
- ow.m.Unlock()
- }
-}
-
-func (ow *outputWriter) WriteCallProblem(label string, c *C) {
- var prefix string
- if !ow.Stream {
- prefix = "\n-----------------------------------" +
- "-----------------------------------\n"
- }
- header := renderCallHeader(label, c, prefix, "\n\n")
- ow.m.Lock()
- ow.wroteCallProblemLast = true
- ow.writer.Write([]byte(header))
- if !ow.Stream {
- c.logb.WriteTo(ow.writer)
- }
- ow.m.Unlock()
-}
-
-func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
- if ow.Stream || (ow.Verbose && c.kind == testKd) {
- // TODO Use a buffer here.
- var suffix string
- if c.reason != "" {
- suffix = " (" + c.reason + ")"
- }
- if c.status() == succeededSt {
- suffix += "\t" + c.timerString()
- }
- suffix += "\n"
- if ow.Stream {
- suffix += "\n"
- }
- header := renderCallHeader(label, c, "", suffix)
- ow.m.Lock()
- // Resist temptation of using line as prefix above due to race.
- if !ow.Stream && ow.wroteCallProblemLast {
- header = "\n-----------------------------------" +
- "-----------------------------------\n" +
- header
- }
- ow.wroteCallProblemLast = false
- ow.writer.Write([]byte(header))
- ow.m.Unlock()
- }
-}
-
-func renderCallHeader(label string, c *C, prefix, suffix string) string {
- pc := c.method.PC()
- return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
- niceFuncName(pc), suffix)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/checkers.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/checkers.go
deleted file mode 100644
index 3749545..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/checkers.go
+++ /dev/null
@@ -1,458 +0,0 @@
-package check
-
-import (
- "fmt"
- "reflect"
- "regexp"
-)
-
-// -----------------------------------------------------------------------
-// CommentInterface and Commentf helper, to attach extra information to checks.
-
-type comment struct {
- format string
- args []interface{}
-}
-
-// Commentf returns an infomational value to use with Assert or Check calls.
-// If the checker test fails, the provided arguments will be passed to
-// fmt.Sprintf, and will be presented next to the logged failure.
-//
-// For example:
-//
-// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i))
-//
-// Note that if the comment is constant, a better option is to
-// simply use a normal comment right above or next to the line, as
-// it will also get printed with any errors:
-//
-// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123)
-//
-func Commentf(format string, args ...interface{}) CommentInterface {
- return &comment{format, args}
-}
-
-// CommentInterface must be implemented by types that attach extra
-// information to failed checks. See the Commentf function for details.
-type CommentInterface interface {
- CheckCommentString() string
-}
-
-func (c *comment) CheckCommentString() string {
- return fmt.Sprintf(c.format, c.args...)
-}
-
-// -----------------------------------------------------------------------
-// The Checker interface.
-
-// The Checker interface must be provided by checkers used with
-// the Assert and Check verification methods.
-type Checker interface {
- Info() *CheckerInfo
- Check(params []interface{}, names []string) (result bool, error string)
-}
-
-// See the Checker interface.
-type CheckerInfo struct {
- Name string
- Params []string
-}
-
-func (info *CheckerInfo) Info() *CheckerInfo {
- return info
-}
-
-// -----------------------------------------------------------------------
-// Not checker logic inverter.
-
-// The Not checker inverts the logic of the provided checker. The
-// resulting checker will succeed where the original one failed, and
-// vice-versa.
-//
-// For example:
-//
-// c.Assert(a, Not(Equals), b)
-//
-func Not(checker Checker) Checker {
- return ¬Checker{checker}
-}
-
-type notChecker struct {
- sub Checker
-}
-
-func (checker *notChecker) Info() *CheckerInfo {
- info := *checker.sub.Info()
- info.Name = "Not(" + info.Name + ")"
- return &info
-}
-
-func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
- result, error = checker.sub.Check(params, names)
- result = !result
- return
-}
-
-// -----------------------------------------------------------------------
-// IsNil checker.
-
-type isNilChecker struct {
- *CheckerInfo
-}
-
-// The IsNil checker tests whether the obtained value is nil.
-//
-// For example:
-//
-// c.Assert(err, IsNil)
-//
-var IsNil Checker = &isNilChecker{
- &CheckerInfo{Name: "IsNil", Params: []string{"value"}},
-}
-
-func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
- return isNil(params[0]), ""
-}
-
-func isNil(obtained interface{}) (result bool) {
- if obtained == nil {
- result = true
- } else {
- switch v := reflect.ValueOf(obtained); v.Kind() {
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
- }
- }
- return
-}
-
-// -----------------------------------------------------------------------
-// NotNil checker. Alias for Not(IsNil), since it's so common.
-
-type notNilChecker struct {
- *CheckerInfo
-}
-
-// The NotNil checker verifies that the obtained value is not nil.
-//
-// For example:
-//
-// c.Assert(iface, NotNil)
-//
-// This is an alias for Not(IsNil), made available since it's a
-// fairly common check.
-//
-var NotNil Checker = ¬NilChecker{
- &CheckerInfo{Name: "NotNil", Params: []string{"value"}},
-}
-
-func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
- return !isNil(params[0]), ""
-}
-
-// -----------------------------------------------------------------------
-// Equals checker.
-
-type equalsChecker struct {
- *CheckerInfo
-}
-
-// The Equals checker verifies that the obtained value is equal to
-// the expected value, according to usual Go semantics for ==.
-//
-// For example:
-//
-// c.Assert(value, Equals, 42)
-//
-var Equals Checker = &equalsChecker{
- &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
-}
-
-func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) {
- defer func() {
- if v := recover(); v != nil {
- result = false
- error = fmt.Sprint(v)
- }
- }()
- return params[0] == params[1], ""
-}
-
-// -----------------------------------------------------------------------
-// DeepEquals checker.
-
-type deepEqualsChecker struct {
- *CheckerInfo
-}
-
-// The DeepEquals checker verifies that the obtained value is deep-equal to
-// the expected value. The check will work correctly even when facing
-// slices, interfaces, and values of different types (which always fail
-// the test).
-//
-// For example:
-//
-// c.Assert(value, DeepEquals, 42)
-// c.Assert(array, DeepEquals, []string{"hi", "there"})
-//
-var DeepEquals Checker = &deepEqualsChecker{
- &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}},
-}
-
-func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
- return reflect.DeepEqual(params[0], params[1]), ""
-}
-
-// -----------------------------------------------------------------------
-// HasLen checker.
-
-type hasLenChecker struct {
- *CheckerInfo
-}
-
-// The HasLen checker verifies that the obtained value has the
-// provided length. In many cases this is superior to using Equals
-// in conjunction with the len function because in case the check
-// fails the value itself will be printed, instead of its length,
-// providing more details for figuring the problem.
-//
-// For example:
-//
-// c.Assert(list, HasLen, 5)
-//
-var HasLen Checker = &hasLenChecker{
- &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}},
-}
-
-func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) {
- n, ok := params[1].(int)
- if !ok {
- return false, "n must be an int"
- }
- value := reflect.ValueOf(params[0])
- switch value.Kind() {
- case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String:
- default:
- return false, "obtained value type has no length"
- }
- return value.Len() == n, ""
-}
-
-// -----------------------------------------------------------------------
-// ErrorMatches checker.
-
-type errorMatchesChecker struct {
- *CheckerInfo
-}
-
-// The ErrorMatches checker verifies that the error value
-// is non nil and matches the regular expression provided.
-//
-// For example:
-//
-// c.Assert(err, ErrorMatches, "perm.*denied")
-//
-var ErrorMatches Checker = errorMatchesChecker{
- &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}},
-}
-
-func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) {
- if params[0] == nil {
- return false, "Error value is nil"
- }
- err, ok := params[0].(error)
- if !ok {
- return false, "Value is not an error"
- }
- params[0] = err.Error()
- names[0] = "error"
- return matches(params[0], params[1])
-}
-
-// -----------------------------------------------------------------------
-// Matches checker.
-
-type matchesChecker struct {
- *CheckerInfo
-}
-
-// The Matches checker verifies that the string provided as the obtained
-// value (or the string resulting from obtained.String()) matches the
-// regular expression provided.
-//
-// For example:
-//
-// c.Assert(err, Matches, "perm.*denied")
-//
-var Matches Checker = &matchesChecker{
- &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}},
-}
-
-func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) {
- return matches(params[0], params[1])
-}
-
-func matches(value, regex interface{}) (result bool, error string) {
- reStr, ok := regex.(string)
- if !ok {
- return false, "Regex must be a string"
- }
- valueStr, valueIsStr := value.(string)
- if !valueIsStr {
- if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr {
- valueStr, valueIsStr = valueWithStr.String(), true
- }
- }
- if valueIsStr {
- matches, err := regexp.MatchString("^"+reStr+"$", valueStr)
- if err != nil {
- return false, "Can't compile regex: " + err.Error()
- }
- return matches, ""
- }
- return false, "Obtained value is not a string and has no .String()"
-}
-
-// -----------------------------------------------------------------------
-// Panics checker.
-
-type panicsChecker struct {
- *CheckerInfo
-}
-
-// The Panics checker verifies that calling the provided zero-argument
-// function will cause a panic which is deep-equal to the provided value.
-//
-// For example:
-//
-// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}).
-//
-//
-var Panics Checker = &panicsChecker{
- &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}},
-}
-
-func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) {
- f := reflect.ValueOf(params[0])
- if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
- return false, "Function must take zero arguments"
- }
- defer func() {
- // If the function has not panicked, then don't do the check.
- if error != "" {
- return
- }
- params[0] = recover()
- names[0] = "panic"
- result = reflect.DeepEqual(params[0], params[1])
- }()
- f.Call(nil)
- return false, "Function has not panicked"
-}
-
-type panicMatchesChecker struct {
- *CheckerInfo
-}
-
-// The PanicMatches checker verifies that calling the provided zero-argument
-// function will cause a panic with an error value matching
-// the regular expression provided.
-//
-// For example:
-//
-// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`).
-//
-//
-var PanicMatches Checker = &panicMatchesChecker{
- &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}},
-}
-
-func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) {
- f := reflect.ValueOf(params[0])
- if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
- return false, "Function must take zero arguments"
- }
- defer func() {
- // If the function has not panicked, then don't do the check.
- if errmsg != "" {
- return
- }
- obtained := recover()
- names[0] = "panic"
- if e, ok := obtained.(error); ok {
- params[0] = e.Error()
- } else if _, ok := obtained.(string); ok {
- params[0] = obtained
- } else {
- errmsg = "Panic value is not a string or an error"
- return
- }
- result, errmsg = matches(params[0], params[1])
- }()
- f.Call(nil)
- return false, "Function has not panicked"
-}
-
-// -----------------------------------------------------------------------
-// FitsTypeOf checker.
-
-type fitsTypeChecker struct {
- *CheckerInfo
-}
-
-// The FitsTypeOf checker verifies that the obtained value is
-// assignable to a variable with the same type as the provided
-// sample value.
-//
-// For example:
-//
-// c.Assert(value, FitsTypeOf, int64(0))
-// c.Assert(value, FitsTypeOf, os.Error(nil))
-//
-var FitsTypeOf Checker = &fitsTypeChecker{
- &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}},
-}
-
-func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) {
- obtained := reflect.ValueOf(params[0])
- sample := reflect.ValueOf(params[1])
- if !obtained.IsValid() {
- return false, ""
- }
- if !sample.IsValid() {
- return false, "Invalid sample value"
- }
- return obtained.Type().AssignableTo(sample.Type()), ""
-}
-
-// -----------------------------------------------------------------------
-// Implements checker.
-
-type implementsChecker struct {
- *CheckerInfo
-}
-
-// The Implements checker verifies that the obtained value
-// implements the interface specified via a pointer to an interface
-// variable.
-//
-// For example:
-//
-// var e os.Error
-// c.Assert(err, Implements, &e)
-//
-var Implements Checker = &implementsChecker{
- &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}},
-}
-
-func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) {
- obtained := reflect.ValueOf(params[0])
- ifaceptr := reflect.ValueOf(params[1])
- if !obtained.IsValid() {
- return false, ""
- }
- if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface {
- return false, "ifaceptr should be a pointer to an interface variable"
- }
- return obtained.Type().Implements(ifaceptr.Elem().Type()), ""
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/checkers2.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/checkers2.go
deleted file mode 100644
index c09bcdc..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/checkers2.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Extensions to the go-check unittest framework.
-//
-// NOTE: see https://github.com/go-check/check/pull/6 for reasons why these
-// checkers live here.
-package check
-
-import (
- "bytes"
- "reflect"
-)
-
-// -----------------------------------------------------------------------
-// IsTrue / IsFalse checker.
-
-type isBoolValueChecker struct {
- *CheckerInfo
- expected bool
-}
-
-func (checker *isBoolValueChecker) Check(
- params []interface{},
- names []string) (
- result bool,
- error string) {
-
- obtained, ok := params[0].(bool)
- if !ok {
- return false, "Argument to " + checker.Name + " must be bool"
- }
-
- return obtained == checker.expected, ""
-}
-
-// The IsTrue checker verifies that the obtained value is true.
-//
-// For example:
-//
-// c.Assert(value, IsTrue)
-//
-var IsTrue Checker = &isBoolValueChecker{
- &CheckerInfo{Name: "IsTrue", Params: []string{"obtained"}},
- true,
-}
-
-// The IsFalse checker verifies that the obtained value is false.
-//
-// For example:
-//
-// c.Assert(value, IsFalse)
-//
-var IsFalse Checker = &isBoolValueChecker{
- &CheckerInfo{Name: "IsFalse", Params: []string{"obtained"}},
- false,
-}
-
-// -----------------------------------------------------------------------
-// BytesEquals checker.
-
-type bytesEquals struct{}
-
-func (b *bytesEquals) Check(params []interface{}, names []string) (bool, string) {
- if len(params) != 2 {
- return false, "BytesEqual takes 2 bytestring arguments"
- }
- b1, ok1 := params[0].([]byte)
- b2, ok2 := params[1].([]byte)
-
- if !(ok1 && ok2) {
- return false, "Arguments to BytesEqual must both be bytestrings"
- }
-
- return bytes.Equal(b1, b2), ""
-}
-
-func (b *bytesEquals) Info() *CheckerInfo {
- return &CheckerInfo{
- Name: "BytesEquals",
- Params: []string{"bytes_one", "bytes_two"},
- }
-}
-
-// BytesEquals checker compares two bytes sequence using bytes.Equal.
-//
-// For example:
-//
-// c.Assert(b, BytesEquals, []byte("bar"))
-//
-// Main difference between DeepEquals and BytesEquals is that BytesEquals treats
-// `nil` as empty byte sequence while DeepEquals doesn't.
-//
-// c.Assert(nil, BytesEquals, []byte("")) // succeeds
-// c.Assert(nil, DeepEquals, []byte("")) // fails
-var BytesEquals = &bytesEquals{}
-
-// -----------------------------------------------------------------------
-// HasKey checker.
-
-type hasKey struct{}
-
-func (h *hasKey) Check(params []interface{}, names []string) (bool, string) {
- if len(params) != 2 {
- return false, "HasKey takes 2 arguments: a map and a key"
- }
-
- mapValue := reflect.ValueOf(params[0])
- if mapValue.Kind() != reflect.Map {
- return false, "First argument to HasKey must be a map"
- }
-
- keyValue := reflect.ValueOf(params[1])
- if !keyValue.Type().AssignableTo(mapValue.Type().Key()) {
- return false, "Second argument must be assignable to the map key type"
- }
-
- return mapValue.MapIndex(keyValue).IsValid(), ""
-}
-
-func (h *hasKey) Info() *CheckerInfo {
- return &CheckerInfo{
- Name: "HasKey",
- Params: []string{"obtained", "key"},
- }
-}
-
-// The HasKey checker verifies that the obtained map contains the given key.
-//
-// For example:
-//
-// c.Assert(myMap, HasKey, "foo")
-//
-var HasKey = &hasKey{}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/compare.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/compare.go
deleted file mode 100644
index 7005cba..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/compare.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package check
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "time"
-)
-
-type compareFunc func(v1 interface{}, v2 interface{}) (bool, error)
-
-type valueCompare struct {
- Name string
-
- Func compareFunc
-
- Operator string
-}
-
-// v1 and v2 must have the same type
-// return >0 if v1 > v2
-// return 0 if v1 = v2
-// return <0 if v1 < v2
-// now we only support int, uint, float64, string and []byte comparison
-func compare(v1 interface{}, v2 interface{}) (int, error) {
- value1 := reflect.ValueOf(v1)
- value2 := reflect.ValueOf(v2)
-
- switch v1.(type) {
- case int, int8, int16, int32, int64:
- a1 := value1.Int()
- a2 := value2.Int()
- if a1 > a2 {
- return 1, nil
- } else if a1 == a2 {
- return 0, nil
- }
- return -1, nil
- case uint, uint8, uint16, uint32, uint64:
- a1 := value1.Uint()
- a2 := value2.Uint()
- if a1 > a2 {
- return 1, nil
- } else if a1 == a2 {
- return 0, nil
- }
- return -1, nil
- case float32, float64:
- a1 := value1.Float()
- a2 := value2.Float()
- if a1 > a2 {
- return 1, nil
- } else if a1 == a2 {
- return 0, nil
- }
- return -1, nil
- case string:
- a1 := value1.String()
- a2 := value2.String()
- if a1 > a2 {
- return 1, nil
- } else if a1 == a2 {
- return 0, nil
- }
- return -1, nil
- case []byte:
- a1 := value1.Bytes()
- a2 := value2.Bytes()
- return bytes.Compare(a1, a2), nil
- case time.Time:
- a1 := v1.(time.Time)
- a2 := v2.(time.Time)
- if a1.After(a2) {
- return 1, nil
- } else if a1.Equal(a2) {
- return 0, nil
- }
- return -1, nil
- case time.Duration:
- a1 := v1.(time.Duration)
- a2 := v2.(time.Duration)
- if a1 > a2 {
- return 1, nil
- } else if a1 == a2 {
- return 0, nil
- }
- return -1, nil
- default:
- return 0, fmt.Errorf("type %T is not supported now", v1)
- }
-}
-
-func less(v1 interface{}, v2 interface{}) (bool, error) {
- n, err := compare(v1, v2)
- if err != nil {
- return false, err
- }
-
- return n < 0, nil
-}
-
-func lessEqual(v1 interface{}, v2 interface{}) (bool, error) {
- n, err := compare(v1, v2)
- if err != nil {
- return false, err
- }
-
- return n <= 0, nil
-}
-
-func greater(v1 interface{}, v2 interface{}) (bool, error) {
- n, err := compare(v1, v2)
- if err != nil {
- return false, err
- }
-
- return n > 0, nil
-}
-
-func greaterEqual(v1 interface{}, v2 interface{}) (bool, error) {
- n, err := compare(v1, v2)
- if err != nil {
- return false, err
- }
-
- return n >= 0, nil
-}
-
-func (v *valueCompare) Check(params []interface{}, names []string) (bool, string) {
- if len(params) != 2 {
- return false, fmt.Sprintf("%s needs 2 arguments", v.Name)
- }
-
- v1 := params[0]
- v2 := params[1]
- v1Type := reflect.TypeOf(v1)
- v2Type := reflect.TypeOf(v2)
-
- if v1Type.Kind() != v2Type.Kind() {
- return false, fmt.Sprintf("%s needs two same type, but %s != %s", v.Name, v1Type.Kind(), v2Type.Kind())
- }
-
- b, err := v.Func(v1, v2)
- if err != nil {
- return false, fmt.Sprintf("%s check err %v", v.Name, err)
- }
-
- return b, ""
-}
-
-func (v *valueCompare) Info() *CheckerInfo {
- return &CheckerInfo{
- Name: v.Name,
- Params: []string{"compare_one", "compare_two"},
- }
-}
-
-var Less = &valueCompare{Name: "Less", Func: less, Operator: "<"}
-var LessEqual = &valueCompare{Name: "LessEqual", Func: lessEqual, Operator: "<="}
-var Greater = &valueCompare{Name: "Greater", Func: greater, Operator: ">"}
-var GreaterEqual = &valueCompare{Name: "GreaterEqual", Func: greaterEqual, Operator: ">="}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/helpers.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/helpers.go
deleted file mode 100644
index 68e861d..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/helpers.go
+++ /dev/null
@@ -1,236 +0,0 @@
-package check
-
-import (
- "fmt"
- "strings"
- "time"
-)
-
-// TestName returns the current test name in the form "SuiteName.TestName"
-func (c *C) TestName() string {
- return c.testName
-}
-
-// -----------------------------------------------------------------------
-// Basic succeeding/failing logic.
-
-// Failed returns whether the currently running test has already failed.
-func (c *C) Failed() bool {
- return c.status() == failedSt
-}
-
-// Fail marks the currently running test as failed.
-//
-// Something ought to have been previously logged so the developer can tell
-// what went wrong. The higher level helper functions will fail the test
-// and do the logging properly.
-func (c *C) Fail() {
- c.setStatus(failedSt)
-}
-
-// FailNow marks the currently running test as failed and stops running it.
-// Something ought to have been previously logged so the developer can tell
-// what went wrong. The higher level helper functions will fail the test
-// and do the logging properly.
-func (c *C) FailNow() {
- c.Fail()
- c.stopNow()
-}
-
-// Succeed marks the currently running test as succeeded, undoing any
-// previous failures.
-func (c *C) Succeed() {
- c.setStatus(succeededSt)
-}
-
-// SucceedNow marks the currently running test as succeeded, undoing any
-// previous failures, and stops running the test.
-func (c *C) SucceedNow() {
- c.Succeed()
- c.stopNow()
-}
-
-// ExpectFailure informs that the running test is knowingly broken for
-// the provided reason. If the test does not fail, an error will be reported
-// to raise attention to this fact. This method is useful to temporarily
-// disable tests which cover well known problems until a better time to
-// fix the problem is found, without forgetting about the fact that a
-// failure still exists.
-func (c *C) ExpectFailure(reason string) {
- if reason == "" {
- panic("Missing reason why the test is expected to fail")
- }
- c.mustFail = true
- c.reason = reason
-}
-
-// Skip skips the running test for the provided reason. If run from within
-// SetUpTest, the individual test being set up will be skipped, and if run
-// from within SetUpSuite, the whole suite is skipped.
-func (c *C) Skip(reason string) {
- if reason == "" {
- panic("Missing reason why the test is being skipped")
- }
- c.reason = reason
- c.setStatus(skippedSt)
- c.stopNow()
-}
-
-// Parallel will mark the test run parallel within a test suite.
-func (c *C) Parallel() {
- c.parallel <- c
-}
-
-// -----------------------------------------------------------------------
-// Basic logging.
-
-// GetTestLog returns the current test error output.
-func (c *C) GetTestLog() string {
- return c.logb.String()
-}
-
-// Log logs some information into the test error output.
-// The provided arguments are assembled together into a string with fmt.Sprint.
-func (c *C) Log(args ...interface{}) {
- c.log(args...)
-}
-
-// Log logs some information into the test error output.
-// The provided arguments are assembled together into a string with fmt.Sprintf.
-func (c *C) Logf(format string, args ...interface{}) {
- c.logf(format, args...)
-}
-
-// Output enables *C to be used as a logger in functions that require only
-// the minimum interface of *log.Logger.
-func (c *C) Output(calldepth int, s string) error {
- d := time.Now().Sub(c.startTime)
- msec := d / time.Millisecond
- sec := d / time.Second
- min := d / time.Minute
-
- c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s)
- return nil
-}
-
-// Error logs an error into the test error output and marks the test as failed.
-// The provided arguments are assembled together into a string with fmt.Sprint.
-func (c *C) Error(args ...interface{}) {
- c.logCaller(1)
- c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
- c.logNewLine()
- c.Fail()
-}
-
-// Errorf logs an error into the test error output and marks the test as failed.
-// The provided arguments are assembled together into a string with fmt.Sprintf.
-func (c *C) Errorf(format string, args ...interface{}) {
- c.logCaller(1)
- c.logString(fmt.Sprintf("Error: "+format, args...))
- c.logNewLine()
- c.Fail()
-}
-
-// Fatal logs an error into the test error output, marks the test as failed, and
-// stops the test execution. The provided arguments are assembled together into
-// a string with fmt.Sprint.
-func (c *C) Fatal(args ...interface{}) {
- c.logCaller(1)
- c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
- c.logNewLine()
- c.FailNow()
-}
-
-// Fatlaf logs an error into the test error output, marks the test as failed, and
-// stops the test execution. The provided arguments are assembled together into
-// a string with fmt.Sprintf.
-func (c *C) Fatalf(format string, args ...interface{}) {
- c.logCaller(1)
- c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...)))
- c.logNewLine()
- c.FailNow()
-}
-
-// -----------------------------------------------------------------------
-// Generic checks and assertions based on checkers.
-
-// Check verifies if the first value matches the expected value according
-// to the provided checker. If they do not match, an error is logged, the
-// test is marked as failed, and the test execution continues.
-//
-// Some checkers may not need the expected argument (e.g. IsNil).
-//
-// Extra arguments provided to the function are logged next to the reported
-// problem when the matching fails.
-func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool {
- return c.internalCheck("Check", obtained, checker, args...)
-}
-
-// Assert ensures that the first value matches the expected value according
-// to the provided checker. If they do not match, an error is logged, the
-// test is marked as failed, and the test execution stops.
-//
-// Some checkers may not need the expected argument (e.g. IsNil).
-//
-// Extra arguments provided to the function are logged next to the reported
-// problem when the matching fails.
-func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) {
- if !c.internalCheck("Assert", obtained, checker, args...) {
- c.stopNow()
- }
-}
-
-func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool {
- if checker == nil {
- c.logCaller(2)
- c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName))
- c.logString("Oops.. you've provided a nil checker!")
- c.logNewLine()
- c.Fail()
- return false
- }
-
- // If the last argument is a bug info, extract it out.
- var comment CommentInterface
- if len(args) > 0 {
- if c, ok := args[len(args)-1].(CommentInterface); ok {
- comment = c
- args = args[:len(args)-1]
- }
- }
-
- params := append([]interface{}{obtained}, args...)
- info := checker.Info()
-
- if len(params) != len(info.Params) {
- names := append([]string{info.Params[0], info.Name}, info.Params[1:]...)
- c.logCaller(2)
- c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", ")))
- c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1))
- c.logNewLine()
- c.Fail()
- return false
- }
-
- // Copy since it may be mutated by Check.
- names := append([]string{}, info.Params...)
-
- // Do the actual check.
- result, error := checker.Check(params, names)
- if !result || error != "" {
- c.logCaller(2)
- for i := 0; i != len(params); i++ {
- c.logValue(names[i], params[i])
- }
- if comment != nil {
- c.logString(comment.CheckCommentString())
- }
- if error != "" {
- c.logString(error)
- }
- c.logNewLine()
- c.Fail()
- return false
- }
- return true
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/printer.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/printer.go
deleted file mode 100644
index e0f7557..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/printer.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package check
-
-import (
- "bytes"
- "go/ast"
- "go/parser"
- "go/printer"
- "go/token"
- "os"
-)
-
-func indent(s, with string) (r string) {
- eol := true
- for i := 0; i != len(s); i++ {
- c := s[i]
- switch {
- case eol && c == '\n' || c == '\r':
- case c == '\n' || c == '\r':
- eol = true
- case eol:
- eol = false
- s = s[:i] + with + s[i:]
- i += len(with)
- }
- }
- return s
-}
-
-func printLine(filename string, line int) (string, error) {
- fset := token.NewFileSet()
- file, err := os.Open(filename)
- if err != nil {
- return "", err
- }
- fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments)
- if err != nil {
- return "", err
- }
- config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4}
- lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config}
- ast.Walk(lp, fnode)
- result := lp.output.Bytes()
- // Comments leave \n at the end.
- n := len(result)
- for n > 0 && result[n-1] == '\n' {
- n--
- }
- return string(result[:n]), nil
-}
-
-type linePrinter struct {
- config *printer.Config
- fset *token.FileSet
- fnode *ast.File
- line int
- output bytes.Buffer
- stmt ast.Stmt
-}
-
-func (lp *linePrinter) emit() bool {
- if lp.stmt != nil {
- lp.trim(lp.stmt)
- lp.printWithComments(lp.stmt)
- lp.stmt = nil
- return true
- }
- return false
-}
-
-func (lp *linePrinter) printWithComments(n ast.Node) {
- nfirst := lp.fset.Position(n.Pos()).Line
- nlast := lp.fset.Position(n.End()).Line
- for _, g := range lp.fnode.Comments {
- cfirst := lp.fset.Position(g.Pos()).Line
- clast := lp.fset.Position(g.End()).Line
- if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column {
- for _, c := range g.List {
- lp.output.WriteString(c.Text)
- lp.output.WriteByte('\n')
- }
- }
- if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash {
- // The printer will not include the comment if it starts past
- // the node itself. Trick it into printing by overlapping the
- // slash with the end of the statement.
- g.List[0].Slash = n.End() - 1
- }
- }
- node := &printer.CommentedNode{n, lp.fnode.Comments}
- lp.config.Fprint(&lp.output, lp.fset, node)
-}
-
-func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) {
- if n == nil {
- if lp.output.Len() == 0 {
- lp.emit()
- }
- return nil
- }
- first := lp.fset.Position(n.Pos()).Line
- last := lp.fset.Position(n.End()).Line
- if first <= lp.line && last >= lp.line {
- // Print the innermost statement containing the line.
- if stmt, ok := n.(ast.Stmt); ok {
- if _, ok := n.(*ast.BlockStmt); !ok {
- lp.stmt = stmt
- }
- }
- if first == lp.line && lp.emit() {
- return nil
- }
- return lp
- }
- return nil
-}
-
-func (lp *linePrinter) trim(n ast.Node) bool {
- stmt, ok := n.(ast.Stmt)
- if !ok {
- return true
- }
- line := lp.fset.Position(n.Pos()).Line
- if line != lp.line {
- return false
- }
- switch stmt := stmt.(type) {
- case *ast.IfStmt:
- stmt.Body = lp.trimBlock(stmt.Body)
- case *ast.SwitchStmt:
- stmt.Body = lp.trimBlock(stmt.Body)
- case *ast.TypeSwitchStmt:
- stmt.Body = lp.trimBlock(stmt.Body)
- case *ast.CaseClause:
- stmt.Body = lp.trimList(stmt.Body)
- case *ast.CommClause:
- stmt.Body = lp.trimList(stmt.Body)
- case *ast.BlockStmt:
- stmt.List = lp.trimList(stmt.List)
- }
- return true
-}
-
-func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt {
- if !lp.trim(stmt) {
- return lp.emptyBlock(stmt)
- }
- stmt.Rbrace = stmt.Lbrace
- return stmt
-}
-
-func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt {
- for i := 0; i != len(stmts); i++ {
- if !lp.trim(stmts[i]) {
- stmts[i] = lp.emptyStmt(stmts[i])
- break
- }
- }
- return stmts
-}
-
-func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt {
- return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}}
-}
-
-func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt {
- p := n.Pos()
- return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p}
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/run.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/run.go
deleted file mode 100644
index afa631f..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/pingcap/check/run.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package check
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "testing"
- "time"
-)
-
-// -----------------------------------------------------------------------
-// Test suite registry.
-
-var allSuites []interface{}
-
-// Suite registers the given value as a test suite to be run. Any methods
-// starting with the Test prefix in the given value will be considered as
-// a test method.
-func Suite(suite interface{}) interface{} {
- allSuites = append(allSuites, suite)
- return suite
-}
-
-// -----------------------------------------------------------------------
-// Public running interface.
-
-var (
- oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run")
- oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode")
- oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)")
- oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks")
- oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark")
- oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run")
- oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory")
-
- newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run")
- newVerboseFlag = flag.Bool("check.v", false, "Verbose mode")
- newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)")
- newBenchFlag = flag.Bool("check.b", false, "Run benchmarks")
- newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark")
- newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks")
- newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run")
- newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory")
- newExcludeFlag = flag.String("check.exclude", "", "Regular expression to exclude tests to run")
-)
-
-var CustomVerboseFlag bool
-
-// TestingT runs all test suites registered with the Suite function,
-// printing results to stdout, and reporting any failures back to
-// the "testing" package.
-func TestingT(testingT *testing.T) {
- benchTime := *newBenchTime
- if benchTime == 1*time.Second {
- benchTime = *oldBenchTime
- }
- conf := &RunConf{
- Filter: *oldFilterFlag + *newFilterFlag,
- Verbose: *oldVerboseFlag || *newVerboseFlag || CustomVerboseFlag,
- Stream: *oldStreamFlag || *newStreamFlag,
- Benchmark: *oldBenchFlag || *newBenchFlag,
- BenchmarkTime: benchTime,
- BenchmarkMem: *newBenchMem,
- KeepWorkDir: *oldWorkFlag || *newWorkFlag,
- Exclude: *newExcludeFlag,
- }
- if *oldListFlag || *newListFlag {
- w := bufio.NewWriter(os.Stdout)
- for _, name := range ListAll(conf) {
- fmt.Fprintln(w, name)
- }
- w.Flush()
- return
- }
- result := RunAll(conf)
- println(result.String())
- if !result.Passed() {
- testingT.Fail()
- }
-}
-
-// RunAll runs all test suites registered with the Suite function, using the
-// provided run configuration.
-func RunAll(runConf *RunConf) *Result {
- result := Result{}
- for _, suite := range allSuites {
- result.Add(Run(suite, runConf))
- }
- return &result
-}
-
-// Run runs the provided test suite using the provided run configuration.
-func Run(suite interface{}, runConf *RunConf) *Result {
- runner := newSuiteRunner(suite, runConf)
- return runner.run()
-}
-
-// ListAll returns the names of all the test functions registered with the
-// Suite function that will be run with the provided run configuration.
-func ListAll(runConf *RunConf) []string {
- var names []string
- for _, suite := range allSuites {
- names = append(names, List(suite, runConf)...)
- }
- return names
-}
-
-// List returns the names of the test functions in the given
-// suite that will be run with the provided run configuration.
-func List(suite interface{}, runConf *RunConf) []string {
- var names []string
- runner := newSuiteRunner(suite, runConf)
- for _, t := range runner.tests {
- names = append(names, t.String())
- }
- return names
-}
-
-// -----------------------------------------------------------------------
-// Result methods.
-
-func (r *Result) Add(other *Result) {
- r.Succeeded += other.Succeeded
- r.Skipped += other.Skipped
- r.Failed += other.Failed
- r.Panicked += other.Panicked
- r.FixturePanicked += other.FixturePanicked
- r.ExpectedFailures += other.ExpectedFailures
- r.Missed += other.Missed
- if r.WorkDir != "" && other.WorkDir != "" {
- r.WorkDir += ":" + other.WorkDir
- } else if other.WorkDir != "" {
- r.WorkDir = other.WorkDir
- }
-}
-
-func (r *Result) Passed() bool {
- return (r.Failed == 0 && r.Panicked == 0 &&
- r.FixturePanicked == 0 && r.Missed == 0 &&
- r.RunError == nil)
-}
-
-func (r *Result) String() string {
- if r.RunError != nil {
- return "ERROR: " + r.RunError.Error()
- }
-
- var value string
- if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 &&
- r.Missed == 0 {
- value = "OK: "
- } else {
- value = "OOPS: "
- }
- value += fmt.Sprintf("%d passed", r.Succeeded)
- if r.Skipped != 0 {
- value += fmt.Sprintf(", %d skipped", r.Skipped)
- }
- if r.ExpectedFailures != 0 {
- value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures)
- }
- if r.Failed != 0 {
- value += fmt.Sprintf(", %d FAILED", r.Failed)
- }
- if r.Panicked != 0 {
- value += fmt.Sprintf(", %d PANICKED", r.Panicked)
- }
- if r.FixturePanicked != 0 {
- value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked)
- }
- if r.Missed != 0 {
- value += fmt.Sprintf(", %d MISSED", r.Missed)
- }
- if r.WorkDir != "" {
- value += "\nWORK=" + r.WorkDir
- }
- return value
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/LICENSE
deleted file mode 100644
index 926d549..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (C) 2013-2018 by Maxim Bublis
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/uuid.go
deleted file mode 100644
index a2b8e2c..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/satori/go.uuid/uuid.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-// Package uuid provides implementation of Universally Unique Identifier (UUID).
-// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
-// version 2 (as specified in DCE 1.1).
-package uuid
-
-import (
- "bytes"
- "encoding/hex"
-)
-
-// Size of a UUID in bytes.
-const Size = 16
-
-// UUID representation compliant with specification
-// described in RFC 4122.
-type UUID [Size]byte
-
-// UUID versions
-const (
- _ byte = iota
- V1
- V2
- V3
- V4
- V5
-)
-
-// UUID layout variants.
-const (
- VariantNCS byte = iota
- VariantRFC4122
- VariantMicrosoft
- VariantFuture
-)
-
-// UUID DCE domains.
-const (
- DomainPerson = iota
- DomainGroup
- DomainOrg
-)
-
-// String parse helpers.
-var (
- urnPrefix = []byte("urn:uuid:")
- byteGroups = []int{8, 4, 4, 4, 12}
-)
-
-// Nil is special form of UUID that is specified to have all
-// 128 bits set to zero.
-var Nil = UUID{}
-
-// Predefined namespace UUIDs.
-var (
- NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
-)
-
-// Equal returns true if u1 and u2 equals, otherwise returns false.
-func Equal(u1 UUID, u2 UUID) bool {
- return bytes.Equal(u1[:], u2[:])
-}
-
-// Version returns algorithm version used to generate UUID.
-func (u UUID) Version() byte {
- return u[6] >> 4
-}
-
-// Variant returns UUID layout variant.
-func (u UUID) Variant() byte {
- switch {
- case (u[8] >> 7) == 0x00:
- return VariantNCS
- case (u[8] >> 6) == 0x02:
- return VariantRFC4122
- case (u[8] >> 5) == 0x06:
- return VariantMicrosoft
- case (u[8] >> 5) == 0x07:
- fallthrough
- default:
- return VariantFuture
- }
-}
-
-// Bytes returns bytes slice representation of UUID.
-func (u UUID) Bytes() []byte {
- return u[:]
-}
-
-// Returns canonical string representation of UUID:
-// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
-func (u UUID) String() string {
- buf := make([]byte, 36)
-
- hex.Encode(buf[0:8], u[0:4])
- buf[8] = '-'
- hex.Encode(buf[9:13], u[4:6])
- buf[13] = '-'
- hex.Encode(buf[14:18], u[6:8])
- buf[18] = '-'
- hex.Encode(buf[19:23], u[8:10])
- buf[23] = '-'
- hex.Encode(buf[24:], u[10:])
-
- return string(buf)
-}
-
-// SetVersion sets version bits.
-func (u *UUID) SetVersion(v byte) {
- u[6] = (u[6] & 0x0f) | (v << 4)
-}
-
-// SetVariant sets variant bits.
-func (u *UUID) SetVariant(v byte) {
- switch v {
- case VariantNCS:
- u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
- case VariantRFC4122:
- u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
- case VariantMicrosoft:
- u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
- case VariantFuture:
- fallthrough
- default:
- u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
- }
-}
-
-// Must is a helper that wraps a call to a function returning (UUID, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"));
-func Must(u UUID, err error) UUID {
- if err != nil {
- panic(err)
- }
- return u
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/bson/LICENSE b/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/bson/LICENSE
deleted file mode 100644
index 8903260..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/bson/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-BSON library for Go
-
-Copyright (c) 2010-2012 - Gustavo Niemeyer
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/hack/hack.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/hack/hack.go
deleted file mode 100644
index 74ee83c..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/hack/hack.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package hack
-
-import (
- "reflect"
- "unsafe"
-)
-
-// no copy to change slice to string
-// use your own risk
-func String(b []byte) (s string) {
- pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
- pstring.Data = pbytes.Data
- pstring.Len = pbytes.Len
- return
-}
-
-// no copy to change string to slice
-// use your own risk
-func Slice(s string) (b []byte) {
- pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
- pbytes.Data = pstring.Data
- pbytes.Len = pstring.Len
- pbytes.Cap = pstring.Len
- return
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/sync2/atomic.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/sync2/atomic.go
deleted file mode 100644
index 382fc20..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/sync2/atomic.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2013, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync2
-
-import (
- "sync"
- "sync/atomic"
- "time"
-)
-
-type AtomicInt32 int32
-
-func (i *AtomicInt32) Add(n int32) int32 {
- return atomic.AddInt32((*int32)(i), n)
-}
-
-func (i *AtomicInt32) Set(n int32) {
- atomic.StoreInt32((*int32)(i), n)
-}
-
-func (i *AtomicInt32) Get() int32 {
- return atomic.LoadInt32((*int32)(i))
-}
-
-func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) {
- return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval)
-}
-
-type AtomicUint32 uint32
-
-func (i *AtomicUint32) Add(n uint32) uint32 {
- return atomic.AddUint32((*uint32)(i), n)
-}
-
-func (i *AtomicUint32) Set(n uint32) {
- atomic.StoreUint32((*uint32)(i), n)
-}
-
-func (i *AtomicUint32) Get() uint32 {
- return atomic.LoadUint32((*uint32)(i))
-}
-
-func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) {
- return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval)
-}
-
-type AtomicInt64 int64
-
-func (i *AtomicInt64) Add(n int64) int64 {
- return atomic.AddInt64((*int64)(i), n)
-}
-
-func (i *AtomicInt64) Set(n int64) {
- atomic.StoreInt64((*int64)(i), n)
-}
-
-func (i *AtomicInt64) Get() int64 {
- return atomic.LoadInt64((*int64)(i))
-}
-
-func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) {
- return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval)
-}
-
-type AtomicUint64 uint64
-
-func (i *AtomicUint64) Add(n uint64) uint64 {
- return atomic.AddUint64((*uint64)(i), n)
-}
-
-func (i *AtomicUint64) Set(n uint64) {
- atomic.StoreUint64((*uint64)(i), n)
-}
-
-func (i *AtomicUint64) Get() uint64 {
- return atomic.LoadUint64((*uint64)(i))
-}
-
-func (i *AtomicUint64) CompareAndSwap(oldval, newval uint64) (swapped bool) {
- return atomic.CompareAndSwapUint64((*uint64)(i), oldval, newval)
-}
-
-type AtomicDuration int64
-
-func (d *AtomicDuration) Add(duration time.Duration) time.Duration {
- return time.Duration(atomic.AddInt64((*int64)(d), int64(duration)))
-}
-
-func (d *AtomicDuration) Set(duration time.Duration) {
- atomic.StoreInt64((*int64)(d), int64(duration))
-}
-
-func (d *AtomicDuration) Get() time.Duration {
- return time.Duration(atomic.LoadInt64((*int64)(d)))
-}
-
-func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) {
- return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval))
-}
-
-// AtomicString gives you atomic-style APIs for string, but
-// it's only a convenience wrapper that uses a mutex. So, it's
-// not as efficient as the rest of the atomic types.
-type AtomicString struct {
- mu sync.Mutex
- str string
-}
-
-func (s *AtomicString) Set(str string) {
- s.mu.Lock()
- s.str = str
- s.mu.Unlock()
-}
-
-func (s *AtomicString) Get() string {
- s.mu.Lock()
- str := s.str
- s.mu.Unlock()
- return str
-}
-
-func (s *AtomicString) CompareAndSwap(oldval, newval string) (swapped bool) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.str == oldval {
- s.str = newval
- return true
- }
- return false
-}
-
-type AtomicBool int32
-
-func (b *AtomicBool) Set(v bool) {
- if v {
- atomic.StoreInt32((*int32)(b), 1)
- } else {
- atomic.StoreInt32((*int32)(b), 0)
- }
-}
-
-func (b *AtomicBool) Get() bool {
- return atomic.LoadInt32((*int32)(b)) == 1
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/sync2/semaphore.go b/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/sync2/semaphore.go
deleted file mode 100644
index d310da7..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/sync2/semaphore.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package sync2
-
-import (
- "sync"
- "sync/atomic"
- "time"
-)
-
-func NewSemaphore(initialCount int) *Semaphore {
- res := &Semaphore{
- counter: int64(initialCount),
- }
- res.cond.L = &res.lock
- return res
-}
-
-type Semaphore struct {
- lock sync.Mutex
- cond sync.Cond
- counter int64
-}
-
-func (s *Semaphore) Release() {
- s.lock.Lock()
- s.counter += 1
- if s.counter >= 0 {
- s.cond.Signal()
- }
- s.lock.Unlock()
-}
-
-func (s *Semaphore) Acquire() {
- s.lock.Lock()
- for s.counter < 1 {
- s.cond.Wait()
- }
- s.counter -= 1
- s.lock.Unlock()
-}
-
-func (s *Semaphore) AcquireTimeout(timeout time.Duration) bool {
- done := make(chan bool, 1)
- // Gate used to communicate between the threads and decide what the result
- // is. If the main thread decides, we have timed out, otherwise we succeed.
- decided := new(int32)
- go func() {
- s.Acquire()
- if atomic.SwapInt32(decided, 1) == 0 {
- done <- true
- } else {
- // If we already decided the result, and this thread did not win
- s.Release()
- }
- }()
- select {
- case <-done:
- return true
- case <-time.NewTimer(timeout).C:
- if atomic.SwapInt32(decided, 1) == 1 {
- // The other thread already decided the result
- return true
- }
- return false
- }
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/cloudsql/cloudsql.go b/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
deleted file mode 100644
index 7b27e6b..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-/*
-Package cloudsql exposes access to Google Cloud SQL databases.
-
-This package does not work in App Engine "flexible environment".
-
-This package is intended for MySQL drivers to make App Engine-specific
-connections. Applications should use this package through database/sql:
-Select a pure Go MySQL driver that supports this package, and use sql.Open
-with protocol "cloudsql" and an address of the Cloud SQL instance.
-
-A Go MySQL driver that has been tested to work well with Cloud SQL
-is the go-sql-driver:
- import "database/sql"
- import _ "github.com/go-sql-driver/mysql"
-
- db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname")
-
-
-Another driver that works well with Cloud SQL is the mymysql driver:
- import "database/sql"
- import _ "github.com/ziutek/mymysql/godrv"
-
- db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password")
-
-
-Using either of these drivers, you can perform a standard SQL query.
-This example assumes there is a table named 'users' with
-columns 'first_name' and 'last_name':
-
- rows, err := db.Query("SELECT first_name, last_name FROM users")
- if err != nil {
- log.Errorf(ctx, "db.Query: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var firstName string
- var lastName string
- if err := rows.Scan(&firstName, &lastName); err != nil {
- log.Errorf(ctx, "rows.Scan: %v", err)
- continue
- }
- log.Infof(ctx, "First: %v - Last: %v", firstName, lastName)
- }
- if err := rows.Err(); err != nil {
- log.Errorf(ctx, "Row error: %v", err)
- }
-*/
-package cloudsql
-
-import (
- "net"
-)
-
-// Dial connects to the named Cloud SQL instance.
-func Dial(instance string) (net.Conn, error) {
- return connect(instance)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go b/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
deleted file mode 100644
index af62dba..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package cloudsql
-
-import (
- "net"
-
- "appengine/cloudsql"
-)
-
-func connect(instance string) (net.Conn, error) {
- return cloudsql.Dial(instance)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go b/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
deleted file mode 100644
index 90fa7b3..0000000
--- a/vendor/github.com/siddontang/go-mysql/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package cloudsql
-
-import (
- "errors"
- "net"
-)
-
-func connect(instance string) (net.Conn, error) {
- return nil, errors.New(`cloudsql: not supported in App Engine "flexible environment"`)
-}
diff --git a/vendor/github.com/siddontang/go-mysql/vitess_license b/vendor/github.com/siddontang/go-mysql/vitess_license
deleted file mode 100644
index 989d02e..0000000
--- a/vendor/github.com/siddontang/go-mysql/vitess_license
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2012, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/LICENSE b/vendor/github.com/siddontang/go/LICENSE
similarity index 100%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/LICENSE
rename to vendor/github.com/siddontang/go/LICENSE
diff --git a/vendor/github.com/siddontang/go/hack/hack_test.go b/vendor/github.com/siddontang/go/hack/hack_test.go
deleted file mode 100644
index 7b11b0b..0000000
--- a/vendor/github.com/siddontang/go/hack/hack_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package hack
-
-import (
- "bytes"
- "testing"
-)
-
-func TestString(t *testing.T) {
- b := []byte("hello world")
- a := String(b)
-
- if a != "hello world" {
- t.Fatal(a)
- }
-
- b[0] = 'a'
-
- if a != "aello world" {
- t.Fatal(a)
- }
-
- b = append(b, "abc"...)
- if a != "aello world" {
- t.Fatal(a)
- }
-}
-
-func TestByte(t *testing.T) {
- a := "hello world"
-
- b := Slice(a)
-
- if !bytes.Equal(b, []byte("hello world")) {
- t.Fatal(string(b))
- }
-}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
new file mode 100644
index 0000000..571116c
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.codecov.yml
@@ -0,0 +1,19 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
+# Also update COVER_IGNORE_PKGS in the Makefile.
+ignore:
+ - /internal/gen-atomicint/
+ - /internal/gen-valuewrapper/
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
new file mode 100644
index 0000000..c3fa253
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -0,0 +1,12 @@
+/bin
+.DS_Store
+/vendor
+cover.html
+cover.out
+lint.log
+
+# Binaries
+*.test
+
+# Profiling output
+*.prof
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
new file mode 100644
index 0000000..13d0a4f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.travis.yml
@@ -0,0 +1,27 @@
+sudo: false
+language: go
+go_import_path: go.uber.org/atomic
+
+env:
+ global:
+ - GO111MODULE=on
+
+matrix:
+ include:
+ - go: oldstable
+ - go: stable
+ env: LINT=1
+
+cache:
+ directories:
+ - vendor
+
+before_install:
+ - go version
+
+script:
+ - test -z "$LINT" || make lint
+ - make cover
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
new file mode 100644
index 0000000..24c0274
--- /dev/null
+++ b/vendor/go.uber.org/atomic/CHANGELOG.md
@@ -0,0 +1,76 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [1.7.0] - 2020-09-14
+### Added
+- Support JSON serialization and deserialization of primitive atomic types.
+- Support Text marshalling and unmarshalling for string atomics.
+
+### Changed
+- Disallow incorrect comparison of atomic values in a non-atomic way.
+
+### Removed
+- Remove dependency on `golang.org/x/{lint, tools}`.
+
+## [1.6.0] - 2020-02-24
+### Changed
+- Drop library dependency on `golang.org/x/{lint, tools}`.
+
+## [1.5.1] - 2019-11-19
+- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
+ causing `CAS` to fail even though the old value matches.
+
+## [1.5.0] - 2019-10-29
+### Changed
+- With Go modules, only the `go.uber.org/atomic` import path is supported now.
+ If you need to use the old import path, please add a `replace` directive to
+ your `go.mod`.
+
+## [1.4.0] - 2019-05-01
+### Added
+ - Add `atomic.Error` type for atomic operations on `error` values.
+
+## [1.3.2] - 2018-05-02
+### Added
+- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
+
+## [1.3.1] - 2017-11-14
+### Fixed
+- Revert optimization for `atomic.String.Store("")` which caused data races.
+
+## [1.3.0] - 2017-11-13
+### Added
+- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
+
+### Changed
+- Optimize `atomic.String.Store("")` by avoiding an allocation.
+
+## [1.2.0] - 2017-04-12
+### Added
+- Shadow `atomic.Value` from `sync/atomic`.
+
+## [1.1.0] - 2017-03-10
+### Added
+- Add atomic `Float64` type.
+
+### Changed
+- Support new `go.uber.org/atomic` import path.
+
+## [1.0.0] - 2016-07-18
+
+- Initial release.
+
+[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
+[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
+[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
+[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
+[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
+[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
+[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
+[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
+[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
+[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
+[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
new file mode 100644
index 0000000..8765c9f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
new file mode 100644
index 0000000..1b1376d
--- /dev/null
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -0,0 +1,78 @@
+# Directory to place `go install`ed binaries into.
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+GEN_ATOMICINT = $(GOBIN)/gen-atomicint
+GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
+STATICCHECK = $(GOBIN)/staticcheck
+
+GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
+
+# Also update ignore section in .codecov.yml.
+COVER_IGNORE_PKGS = \
+ go.uber.org/atomic/internal/gen-atomicint \
+ go.uber.org/atomic/internal/gen-atomicwrapper
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
+
+$(GOLINT):
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK):
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
+ go build -o $@ ./internal/gen-atomicwrapper
+
+$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
+ go build -o $@ ./internal/gen-atomicint
+
+.PHONY: golint
+golint: $(GOLINT)
+ $(GOLINT) ./...
+
+.PHONY: staticcheck
+staticcheck: $(STATICCHECK)
+ $(STATICCHECK) ./...
+
+.PHONY: lint
+lint: gofmt golint staticcheck generatenodirty
+
+# comma separated list of packages to consider for code coverage.
+COVER_PKG = $(shell \
+ go list -find ./... | \
+ grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
+ paste -sd, -)
+
+.PHONY: cover
+cover:
+ go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
+ go tool cover -html=cover.out -o cover.html
+
+.PHONY: generate
+generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
+ go generate ./...
+
+.PHONY: generatenodirty
+generatenodirty:
+ @[ -z "$$(git status --porcelain)" ] || ( \
+ echo "Working tree is dirty. Commit your changes first."; \
+ exit 1 )
+ @make generate
+ @status=$$(git status --porcelain); \
+ [ -z "$$status" ] || ( \
+ echo "Working tree is dirty after `make generate`:"; \
+ echo "$$status"; \
+ echo "Please ensure that the generated code is up-to-date." )
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
new file mode 100644
index 0000000..ade0c20
--- /dev/null
+++ b/vendor/go.uber.org/atomic/README.md
@@ -0,0 +1,63 @@
+# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
+
+Simple wrappers for primitive types to enforce atomic access.
+
+## Installation
+
+```shell
+$ go get -u go.uber.org/atomic@v1
+```
+
+### Legacy Import Path
+
+As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
+of using this package. If you are using Go modules, this package will fail to
+compile with the legacy import path path `github.com/uber-go/atomic`.
+
+We recommend migrating your code to the new import path but if you're unable
+to do so, or if your dependencies are still using the old import path, you
+will have to add a `replace` directive to your `go.mod` file downgrading the
+legacy import path to an older version.
+
+```
+replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
+```
+
+You can do so automatically by running the following command.
+
+```shell
+$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
+```
+
+## Usage
+
+The standard library's `sync/atomic` is powerful, but it's easy to forget which
+variables must be accessed atomically. `go.uber.org/atomic` preserves all the
+functionality of the standard library, but wraps the primitive types to
+provide a safer, more convenient API.
+
+```go
+var atom atomic.Uint32
+atom.Store(42)
+atom.Sub(2)
+atom.CAS(40, 11)
+```
+
+See the [documentation][doc] for a complete API specification.
+
+## Development Status
+
+Stable.
+
+---
+
+Released under the [MIT License](LICENSE.txt).
+
+[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
+[doc]: https://godoc.org/go.uber.org/atomic
+[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
+[ci]: https://travis-ci.com/uber-go/atomic
+[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/atomic
+[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
+[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
new file mode 100644
index 0000000..9cf1914
--- /dev/null
+++ b/vendor/go.uber.org/atomic/bool.go
@@ -0,0 +1,81 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+)
+
+// Bool is an atomic type-safe wrapper for bool values.
+type Bool struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint32
+}
+
+var _zeroBool bool
+
+// NewBool creates a new Bool.
+func NewBool(v bool) *Bool {
+ x := &Bool{}
+ if v != _zeroBool {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped bool.
+func (x *Bool) Load() bool {
+ return truthy(x.v.Load())
+}
+
+// Store atomically stores the passed bool.
+func (x *Bool) Store(v bool) {
+ x.v.Store(boolToInt(v))
+}
+
+// CAS is an atomic compare-and-swap for bool values.
+func (x *Bool) CAS(o, n bool) bool {
+ return x.v.CAS(boolToInt(o), boolToInt(n))
+}
+
+// Swap atomically stores the given bool and returns the old
+// value.
+func (x *Bool) Swap(o bool) bool {
+ return truthy(x.v.Swap(boolToInt(o)))
+}
+
+// MarshalJSON encodes the wrapped bool into JSON.
+func (x *Bool) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a bool from JSON.
+func (x *Bool) UnmarshalJSON(b []byte) error {
+ var v bool
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
new file mode 100644
index 0000000..c7bf7a8
--- /dev/null
+++ b/vendor/go.uber.org/atomic/bool_ext.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go
+
+func truthy(n uint32) bool {
+ return n == 1
+}
+
+func boolToInt(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// Toggle atomically negates the Boolean and returns the previous value.
+func (b *Bool) Toggle() bool {
+ for {
+ old := b.Load()
+ if b.CAS(old, !old) {
+ return old
+ }
+ }
+}
+
+// String encodes the wrapped value as a string.
+func (b *Bool) String() string {
+ return strconv.FormatBool(b.Load())
+}
diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go
new file mode 100644
index 0000000..ae7390e
--- /dev/null
+++ b/vendor/go.uber.org/atomic/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package atomic provides simple wrappers around numerics to enforce atomic
+// access.
+package atomic
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
new file mode 100644
index 0000000..027cfcb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/duration.go
@@ -0,0 +1,82 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// Duration is an atomic type-safe wrapper for time.Duration values.
+type Duration struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Int64
+}
+
+var _zeroDuration time.Duration
+
+// NewDuration creates a new Duration.
+func NewDuration(v time.Duration) *Duration {
+ x := &Duration{}
+ if v != _zeroDuration {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped time.Duration.
+func (x *Duration) Load() time.Duration {
+ return time.Duration(x.v.Load())
+}
+
+// Store atomically stores the passed time.Duration.
+func (x *Duration) Store(v time.Duration) {
+ x.v.Store(int64(v))
+}
+
+// CAS is an atomic compare-and-swap for time.Duration values.
+func (x *Duration) CAS(o, n time.Duration) bool {
+ return x.v.CAS(int64(o), int64(n))
+}
+
+// Swap atomically stores the given time.Duration and returns the old
+// value.
+func (x *Duration) Swap(o time.Duration) time.Duration {
+ return time.Duration(x.v.Swap(int64(o)))
+}
+
+// MarshalJSON encodes the wrapped time.Duration into JSON.
+func (x *Duration) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a time.Duration from JSON.
+func (x *Duration) UnmarshalJSON(b []byte) error {
+ var v time.Duration
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
new file mode 100644
index 0000000..6273b66
--- /dev/null
+++ b/vendor/go.uber.org/atomic/duration_ext.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "time"
+
+//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
+
+// Add atomically adds to the wrapped time.Duration and returns the new value.
+func (d *Duration) Add(n time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(n)))
+}
+
+// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
+func (d *Duration) Sub(n time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(n)))
+}
+
+// String encodes the wrapped value as a string.
+func (d *Duration) String() string {
+ return d.Load().String()
+}
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
new file mode 100644
index 0000000..a6166fb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error.go
@@ -0,0 +1,51 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// Error is an atomic type-safe wrapper for error values.
+type Error struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroError error
+
+// NewError creates a new Error.
+func NewError(v error) *Error {
+ x := &Error{}
+ if v != _zeroError {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped error.
+func (x *Error) Load() error {
+ return unpackError(x.v.Load())
+}
+
+// Store atomically stores the passed error.
+func (x *Error) Store(v error) {
+ x.v.Store(packError(v))
+}
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go
new file mode 100644
index 0000000..ffe0be2
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error_ext.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// atomic.Value panics on nil inputs, or if the underlying type changes.
+// Stabilize by always storing a custom struct that we control.
+
+//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go
+
+type packedError struct{ Value error }
+
+func packError(v error) interface{} {
+ return packedError{v}
+}
+
+func unpackError(v interface{}) error {
+ if err, ok := v.(packedError); ok {
+ return err.Value
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
new file mode 100644
index 0000000..0719060
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float64.go
@@ -0,0 +1,76 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+)
+
+// Float64 is an atomic type-safe wrapper for float64 values.
+type Float64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint64
+}
+
+var _zeroFloat64 float64
+
+// NewFloat64 creates a new Float64.
+func NewFloat64(v float64) *Float64 {
+ x := &Float64{}
+ if v != _zeroFloat64 {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped float64.
+func (x *Float64) Load() float64 {
+ return math.Float64frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float64.
+func (x *Float64) Store(v float64) {
+ x.v.Store(math.Float64bits(v))
+}
+
+// CAS is an atomic compare-and-swap for float64 values.
+func (x *Float64) CAS(o, n float64) bool {
+ return x.v.CAS(math.Float64bits(o), math.Float64bits(n))
+}
+
+// MarshalJSON encodes the wrapped float64 into JSON.
+func (x *Float64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float64 from JSON.
+func (x *Float64) UnmarshalJSON(b []byte) error {
+ var v float64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
new file mode 100644
index 0000000..927b1ad
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float64_ext.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "strconv"
+
+//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go
+
+// Add atomically adds to the wrapped float64 and returns the new value.
+func (f *Float64) Add(s float64) float64 {
+ for {
+ old := f.Load()
+ new := old + s
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float64 and returns the new value.
+func (f *Float64) Sub(s float64) float64 {
+ return f.Add(-s)
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float64) String() string {
+ // 'g' is the behavior for floats with %v.
+ return strconv.FormatFloat(f.Load(), 'g', -1, 64)
+}
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
new file mode 100644
index 0000000..50d6b24
--- /dev/null
+++ b/vendor/go.uber.org/atomic/gen.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
+//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
+//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
+//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
new file mode 100644
index 0000000..18ae564
--- /dev/null
+++ b/vendor/go.uber.org/atomic/int32.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int32 is an atomic wrapper around int32.
+type Int32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int32
+}
+
+// NewInt32 creates a new Int32.
+func NewInt32(i int32) *Int32 {
+ return &Int32{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int32) Load() int32 {
+ return atomic.LoadInt32(&i.v)
+}
+
+// Add atomically adds to the wrapped int32 and returns the new value.
+func (i *Int32) Add(n int32) int32 {
+ return atomic.AddInt32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int32 and returns the new value.
+func (i *Int32) Sub(n int32) int32 {
+ return atomic.AddInt32(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int32 and returns the new value.
+func (i *Int32) Inc() int32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Int32) Dec() int32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int32) CAS(old, new int32) bool {
+ return atomic.CompareAndSwapInt32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int32) Store(n int32) {
+ atomic.StoreInt32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int32 and returns the old value.
+func (i *Int32) Swap(n int32) int32 {
+ return atomic.SwapInt32(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped int32 into JSON.
+func (i *Int32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int32.
+func (i *Int32) UnmarshalJSON(b []byte) error {
+ var v int32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int32) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
new file mode 100644
index 0000000..2bcbbfa
--- /dev/null
+++ b/vendor/go.uber.org/atomic/int64.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int64 is an atomic wrapper around int64.
+type Int64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int64
+}
+
+// NewInt64 creates a new Int64.
+func NewInt64(i int64) *Int64 {
+ return &Int64{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int64) Load() int64 {
+ return atomic.LoadInt64(&i.v)
+}
+
+// Add atomically adds to the wrapped int64 and returns the new value.
+func (i *Int64) Add(n int64) int64 {
+ return atomic.AddInt64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int64 and returns the new value.
+func (i *Int64) Sub(n int64) int64 {
+ return atomic.AddInt64(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int64 and returns the new value.
+func (i *Int64) Inc() int64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int64 and returns the new value.
+func (i *Int64) Dec() int64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int64) CAS(old, new int64) bool {
+ return atomic.CompareAndSwapInt64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int64) Store(n int64) {
+ atomic.StoreInt64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int64 and returns the old value.
+func (i *Int64) Swap(n int64) int64 {
+ return atomic.SwapInt64(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped int64 into JSON.
+func (i *Int64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int64.
+func (i *Int64) UnmarshalJSON(b []byte) error {
+ var v int64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int64) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
new file mode 100644
index 0000000..a8201cb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/nocmp.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// nocmp is an uncomparable struct. Embed this inside another struct to make
+// it uncomparable.
+//
+// type Foo struct {
+// nocmp
+// // ...
+// }
+//
+// This DOES NOT:
+//
+// - Disallow shallow copies of structs
+// - Disallow comparison of pointers to uncomparable structs
+type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
new file mode 100644
index 0000000..225b7a2
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string.go
@@ -0,0 +1,54 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// String is an atomic type-safe wrapper for string values.
+type String struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroString string
+
+// NewString creates a new String.
+func NewString(v string) *String {
+ x := &String{}
+ if v != _zeroString {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped string.
+func (x *String) Load() string {
+ if v := x.v.Load(); v != nil {
+ return v.(string)
+ }
+ return _zeroString
+}
+
+// Store atomically stores the passed string.
+func (x *String) Store(v string) {
+ x.v.Store(v)
+}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
new file mode 100644
index 0000000..3a95582
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string_ext.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
+
+// String returns the wrapped value.
+func (s *String) String() string {
+ return s.Load()
+}
+
+// MarshalText encodes the wrapped string into a textual form.
+//
+// This makes it encodable as JSON, YAML, XML, and more.
+func (s *String) MarshalText() ([]byte, error) {
+ return []byte(s.Load()), nil
+}
+
+// UnmarshalText decodes text and replaces the wrapped string with it.
+//
+// This makes it decodable from JSON, YAML, XML, and more.
+func (s *String) UnmarshalText(b []byte) error {
+ s.Store(string(b))
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
new file mode 100644
index 0000000..a973aba
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uint32.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint32 is an atomic wrapper around uint32.
+type Uint32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint32
+}
+
+// NewUint32 creates a new Uint32.
+func NewUint32(i uint32) *Uint32 {
+ return &Uint32{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint32) Load() uint32 {
+ return atomic.LoadUint32(&i.v)
+}
+
+// Add atomically adds to the wrapped uint32 and returns the new value.
+func (i *Uint32) Add(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint32 and returns the new value.
+func (i *Uint32) Sub(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint32 and returns the new value.
+func (i *Uint32) Inc() uint32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint32 and returns the new value.
+func (i *Uint32) Dec() uint32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint32) CAS(old, new uint32) bool {
+ return atomic.CompareAndSwapUint32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint32) Store(n uint32) {
+ atomic.StoreUint32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint32 and returns the old value.
+func (i *Uint32) Swap(n uint32) uint32 {
+ return atomic.SwapUint32(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped uint32 into JSON.
+func (i *Uint32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint32.
+func (i *Uint32) UnmarshalJSON(b []byte) error {
+ var v uint32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint32) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
new file mode 100644
index 0000000..3b6c71f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uint64.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint64 is an atomic wrapper around uint64.
+type Uint64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint64
+}
+
+// NewUint64 creates a new Uint64.
+func NewUint64(i uint64) *Uint64 {
+ return &Uint64{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint64) Load() uint64 {
+ return atomic.LoadUint64(&i.v)
+}
+
+// Add atomically adds to the wrapped uint64 and returns the new value.
+func (i *Uint64) Add(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint64 and returns the new value.
+func (i *Uint64) Sub(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint64 and returns the new value.
+func (i *Uint64) Inc() uint64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint64 and returns the new value.
+func (i *Uint64) Dec() uint64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint64) CAS(old, new uint64) bool {
+ return atomic.CompareAndSwapUint64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint64) Store(n uint64) {
+ atomic.StoreUint64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint64 and returns the old value.
+func (i *Uint64) Swap(n uint64) uint64 {
+ return atomic.SwapUint64(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped uint64 into JSON.
+func (i *Uint64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint64.
+func (i *Uint64) UnmarshalJSON(b []byte) error {
+ var v uint64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint64) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
new file mode 100644
index 0000000..671f3a3
--- /dev/null
+++ b/vendor/go.uber.org/atomic/value.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "sync/atomic"
+
+// Value shadows the type of the same name from sync/atomic
+// https://godoc.org/sync/atomic#Value
+type Value struct {
+ atomic.Value
+
+ _ nocmp // disallow non-atomic comparison
+}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go
deleted file mode 100644
index ecfd7c5..0000000
--- a/vendor/golang.org/x/crypto/ssh/agent/client.go
+++ /dev/null
@@ -1,659 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package agent implements the ssh-agent protocol, and provides both
-// a client and a server. The client can talk to a standard ssh-agent
-// that uses UNIX sockets, and one could implement an alternative
-// ssh-agent process using the sample server.
-//
-// References:
-// [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD
-package agent // import "golang.org/x/crypto/ssh/agent"
-
-import (
- "bytes"
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rsa"
- "encoding/base64"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math/big"
- "sync"
-
- "golang.org/x/crypto/ed25519"
- "golang.org/x/crypto/ssh"
-)
-
-// Agent represents the capabilities of an ssh-agent.
-type Agent interface {
- // List returns the identities known to the agent.
- List() ([]*Key, error)
-
- // Sign has the agent sign the data using a protocol 2 key as defined
- // in [PROTOCOL.agent] section 2.6.2.
- Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error)
-
- // Add adds a private key to the agent.
- Add(key AddedKey) error
-
- // Remove removes all identities with the given public key.
- Remove(key ssh.PublicKey) error
-
- // RemoveAll removes all identities.
- RemoveAll() error
-
- // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list.
- Lock(passphrase []byte) error
-
- // Unlock undoes the effect of Lock
- Unlock(passphrase []byte) error
-
- // Signers returns signers for all the known keys.
- Signers() ([]ssh.Signer, error)
-}
-
-// AddedKey describes an SSH key to be added to an Agent.
-type AddedKey struct {
- // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or
- // *ecdsa.PrivateKey, which will be inserted into the agent.
- PrivateKey interface{}
- // Certificate, if not nil, is communicated to the agent and will be
- // stored with the key.
- Certificate *ssh.Certificate
- // Comment is an optional, free-form string.
- Comment string
- // LifetimeSecs, if not zero, is the number of seconds that the
- // agent will store the key for.
- LifetimeSecs uint32
- // ConfirmBeforeUse, if true, requests that the agent confirm with the
- // user before each use of this key.
- ConfirmBeforeUse bool
-}
-
-// See [PROTOCOL.agent], section 3.
-const (
- agentRequestV1Identities = 1
- agentRemoveAllV1Identities = 9
-
- // 3.2 Requests from client to agent for protocol 2 key operations
- agentAddIdentity = 17
- agentRemoveIdentity = 18
- agentRemoveAllIdentities = 19
- agentAddIdConstrained = 25
-
- // 3.3 Key-type independent requests from client to agent
- agentAddSmartcardKey = 20
- agentRemoveSmartcardKey = 21
- agentLock = 22
- agentUnlock = 23
- agentAddSmartcardKeyConstrained = 26
-
- // 3.7 Key constraint identifiers
- agentConstrainLifetime = 1
- agentConstrainConfirm = 2
-)
-
-// maxAgentResponseBytes is the maximum agent reply size that is accepted. This
-// is a sanity check, not a limit in the spec.
-const maxAgentResponseBytes = 16 << 20
-
-// Agent messages:
-// These structures mirror the wire format of the corresponding ssh agent
-// messages found in [PROTOCOL.agent].
-
-// 3.4 Generic replies from agent to client
-const agentFailure = 5
-
-type failureAgentMsg struct{}
-
-const agentSuccess = 6
-
-type successAgentMsg struct{}
-
-// See [PROTOCOL.agent], section 2.5.2.
-const agentRequestIdentities = 11
-
-type requestIdentitiesAgentMsg struct{}
-
-// See [PROTOCOL.agent], section 2.5.2.
-const agentIdentitiesAnswer = 12
-
-type identitiesAnswerAgentMsg struct {
- NumKeys uint32 `sshtype:"12"`
- Keys []byte `ssh:"rest"`
-}
-
-// See [PROTOCOL.agent], section 2.6.2.
-const agentSignRequest = 13
-
-type signRequestAgentMsg struct {
- KeyBlob []byte `sshtype:"13"`
- Data []byte
- Flags uint32
-}
-
-// See [PROTOCOL.agent], section 2.6.2.
-
-// 3.6 Replies from agent to client for protocol 2 key operations
-const agentSignResponse = 14
-
-type signResponseAgentMsg struct {
- SigBlob []byte `sshtype:"14"`
-}
-
-type publicKey struct {
- Format string
- Rest []byte `ssh:"rest"`
-}
-
-// Key represents a protocol 2 public key as defined in
-// [PROTOCOL.agent], section 2.5.2.
-type Key struct {
- Format string
- Blob []byte
- Comment string
-}
-
-func clientErr(err error) error {
- return fmt.Errorf("agent: client error: %v", err)
-}
-
-// String returns the storage form of an agent key with the format, base64
-// encoded serialized key, and the comment if it is not empty.
-func (k *Key) String() string {
- s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob)
-
- if k.Comment != "" {
- s += " " + k.Comment
- }
-
- return s
-}
-
-// Type returns the public key type.
-func (k *Key) Type() string {
- return k.Format
-}
-
-// Marshal returns key blob to satisfy the ssh.PublicKey interface.
-func (k *Key) Marshal() []byte {
- return k.Blob
-}
-
-// Verify satisfies the ssh.PublicKey interface.
-func (k *Key) Verify(data []byte, sig *ssh.Signature) error {
- pubKey, err := ssh.ParsePublicKey(k.Blob)
- if err != nil {
- return fmt.Errorf("agent: bad public key: %v", err)
- }
- return pubKey.Verify(data, sig)
-}
-
-type wireKey struct {
- Format string
- Rest []byte `ssh:"rest"`
-}
-
-func parseKey(in []byte) (out *Key, rest []byte, err error) {
- var record struct {
- Blob []byte
- Comment string
- Rest []byte `ssh:"rest"`
- }
-
- if err := ssh.Unmarshal(in, &record); err != nil {
- return nil, nil, err
- }
-
- var wk wireKey
- if err := ssh.Unmarshal(record.Blob, &wk); err != nil {
- return nil, nil, err
- }
-
- return &Key{
- Format: wk.Format,
- Blob: record.Blob,
- Comment: record.Comment,
- }, record.Rest, nil
-}
-
-// client is a client for an ssh-agent process.
-type client struct {
- // conn is typically a *net.UnixConn
- conn io.ReadWriter
- // mu is used to prevent concurrent access to the agent
- mu sync.Mutex
-}
-
-// NewClient returns an Agent that talks to an ssh-agent process over
-// the given connection.
-func NewClient(rw io.ReadWriter) Agent {
- return &client{conn: rw}
-}
-
-// call sends an RPC to the agent. On success, the reply is
-// unmarshaled into reply and replyType is set to the first byte of
-// the reply, which contains the type of the message.
-func (c *client) call(req []byte) (reply interface{}, err error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- msg := make([]byte, 4+len(req))
- binary.BigEndian.PutUint32(msg, uint32(len(req)))
- copy(msg[4:], req)
- if _, err = c.conn.Write(msg); err != nil {
- return nil, clientErr(err)
- }
-
- var respSizeBuf [4]byte
- if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil {
- return nil, clientErr(err)
- }
- respSize := binary.BigEndian.Uint32(respSizeBuf[:])
- if respSize > maxAgentResponseBytes {
- return nil, clientErr(err)
- }
-
- buf := make([]byte, respSize)
- if _, err = io.ReadFull(c.conn, buf); err != nil {
- return nil, clientErr(err)
- }
- reply, err = unmarshal(buf)
- if err != nil {
- return nil, clientErr(err)
- }
- return reply, err
-}
-
-func (c *client) simpleCall(req []byte) error {
- resp, err := c.call(req)
- if err != nil {
- return err
- }
- if _, ok := resp.(*successAgentMsg); ok {
- return nil
- }
- return errors.New("agent: failure")
-}
-
-func (c *client) RemoveAll() error {
- return c.simpleCall([]byte{agentRemoveAllIdentities})
-}
-
-func (c *client) Remove(key ssh.PublicKey) error {
- req := ssh.Marshal(&agentRemoveIdentityMsg{
- KeyBlob: key.Marshal(),
- })
- return c.simpleCall(req)
-}
-
-func (c *client) Lock(passphrase []byte) error {
- req := ssh.Marshal(&agentLockMsg{
- Passphrase: passphrase,
- })
- return c.simpleCall(req)
-}
-
-func (c *client) Unlock(passphrase []byte) error {
- req := ssh.Marshal(&agentUnlockMsg{
- Passphrase: passphrase,
- })
- return c.simpleCall(req)
-}
-
-// List returns the identities known to the agent.
-func (c *client) List() ([]*Key, error) {
- // see [PROTOCOL.agent] section 2.5.2.
- req := []byte{agentRequestIdentities}
-
- msg, err := c.call(req)
- if err != nil {
- return nil, err
- }
-
- switch msg := msg.(type) {
- case *identitiesAnswerAgentMsg:
- if msg.NumKeys > maxAgentResponseBytes/8 {
- return nil, errors.New("agent: too many keys in agent reply")
- }
- keys := make([]*Key, msg.NumKeys)
- data := msg.Keys
- for i := uint32(0); i < msg.NumKeys; i++ {
- var key *Key
- var err error
- if key, data, err = parseKey(data); err != nil {
- return nil, err
- }
- keys[i] = key
- }
- return keys, nil
- case *failureAgentMsg:
- return nil, errors.New("agent: failed to list keys")
- }
- panic("unreachable")
-}
-
-// Sign has the agent sign the data using a protocol 2 key as defined
-// in [PROTOCOL.agent] section 2.6.2.
-func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
- req := ssh.Marshal(signRequestAgentMsg{
- KeyBlob: key.Marshal(),
- Data: data,
- })
-
- msg, err := c.call(req)
- if err != nil {
- return nil, err
- }
-
- switch msg := msg.(type) {
- case *signResponseAgentMsg:
- var sig ssh.Signature
- if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil {
- return nil, err
- }
-
- return &sig, nil
- case *failureAgentMsg:
- return nil, errors.New("agent: failed to sign challenge")
- }
- panic("unreachable")
-}
-
-// unmarshal parses an agent message in packet, returning the parsed
-// form and the message type of packet.
-func unmarshal(packet []byte) (interface{}, error) {
- if len(packet) < 1 {
- return nil, errors.New("agent: empty packet")
- }
- var msg interface{}
- switch packet[0] {
- case agentFailure:
- return new(failureAgentMsg), nil
- case agentSuccess:
- return new(successAgentMsg), nil
- case agentIdentitiesAnswer:
- msg = new(identitiesAnswerAgentMsg)
- case agentSignResponse:
- msg = new(signResponseAgentMsg)
- case agentV1IdentitiesAnswer:
- msg = new(agentV1IdentityMsg)
- default:
- return nil, fmt.Errorf("agent: unknown type tag %d", packet[0])
- }
- if err := ssh.Unmarshal(packet, msg); err != nil {
- return nil, err
- }
- return msg, nil
-}
-
-type rsaKeyMsg struct {
- Type string `sshtype:"17|25"`
- N *big.Int
- E *big.Int
- D *big.Int
- Iqmp *big.Int // IQMP = Inverse Q Mod P
- P *big.Int
- Q *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type dsaKeyMsg struct {
- Type string `sshtype:"17|25"`
- P *big.Int
- Q *big.Int
- G *big.Int
- Y *big.Int
- X *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type ecdsaKeyMsg struct {
- Type string `sshtype:"17|25"`
- Curve string
- KeyBytes []byte
- D *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type ed25519KeyMsg struct {
- Type string `sshtype:"17|25"`
- Pub []byte
- Priv []byte
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-// Insert adds a private key to the agent.
-func (c *client) insertKey(s interface{}, comment string, constraints []byte) error {
- var req []byte
- switch k := s.(type) {
- case *rsa.PrivateKey:
- if len(k.Primes) != 2 {
- return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
- }
- k.Precompute()
- req = ssh.Marshal(rsaKeyMsg{
- Type: ssh.KeyAlgoRSA,
- N: k.N,
- E: big.NewInt(int64(k.E)),
- D: k.D,
- Iqmp: k.Precomputed.Qinv,
- P: k.Primes[0],
- Q: k.Primes[1],
- Comments: comment,
- Constraints: constraints,
- })
- case *dsa.PrivateKey:
- req = ssh.Marshal(dsaKeyMsg{
- Type: ssh.KeyAlgoDSA,
- P: k.P,
- Q: k.Q,
- G: k.G,
- Y: k.Y,
- X: k.X,
- Comments: comment,
- Constraints: constraints,
- })
- case *ecdsa.PrivateKey:
- nistID := fmt.Sprintf("nistp%d", k.Params().BitSize)
- req = ssh.Marshal(ecdsaKeyMsg{
- Type: "ecdsa-sha2-" + nistID,
- Curve: nistID,
- KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y),
- D: k.D,
- Comments: comment,
- Constraints: constraints,
- })
- case *ed25519.PrivateKey:
- req = ssh.Marshal(ed25519KeyMsg{
- Type: ssh.KeyAlgoED25519,
- Pub: []byte(*k)[32:],
- Priv: []byte(*k),
- Comments: comment,
- Constraints: constraints,
- })
- default:
- return fmt.Errorf("agent: unsupported key type %T", s)
- }
-
- // if constraints are present then the message type needs to be changed.
- if len(constraints) != 0 {
- req[0] = agentAddIdConstrained
- }
-
- resp, err := c.call(req)
- if err != nil {
- return err
- }
- if _, ok := resp.(*successAgentMsg); ok {
- return nil
- }
- return errors.New("agent: failure")
-}
-
-type rsaCertMsg struct {
- Type string `sshtype:"17|25"`
- CertBytes []byte
- D *big.Int
- Iqmp *big.Int // IQMP = Inverse Q Mod P
- P *big.Int
- Q *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type dsaCertMsg struct {
- Type string `sshtype:"17|25"`
- CertBytes []byte
- X *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type ecdsaCertMsg struct {
- Type string `sshtype:"17|25"`
- CertBytes []byte
- D *big.Int
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-type ed25519CertMsg struct {
- Type string `sshtype:"17|25"`
- CertBytes []byte
- Pub []byte
- Priv []byte
- Comments string
- Constraints []byte `ssh:"rest"`
-}
-
-// Add adds a private key to the agent. If a certificate is given,
-// that certificate is added instead as public key.
-func (c *client) Add(key AddedKey) error {
- var constraints []byte
-
- if secs := key.LifetimeSecs; secs != 0 {
- constraints = append(constraints, agentConstrainLifetime)
-
- var secsBytes [4]byte
- binary.BigEndian.PutUint32(secsBytes[:], secs)
- constraints = append(constraints, secsBytes[:]...)
- }
-
- if key.ConfirmBeforeUse {
- constraints = append(constraints, agentConstrainConfirm)
- }
-
- if cert := key.Certificate; cert == nil {
- return c.insertKey(key.PrivateKey, key.Comment, constraints)
- } else {
- return c.insertCert(key.PrivateKey, cert, key.Comment, constraints)
- }
-}
-
-func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error {
- var req []byte
- switch k := s.(type) {
- case *rsa.PrivateKey:
- if len(k.Primes) != 2 {
- return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
- }
- k.Precompute()
- req = ssh.Marshal(rsaCertMsg{
- Type: cert.Type(),
- CertBytes: cert.Marshal(),
- D: k.D,
- Iqmp: k.Precomputed.Qinv,
- P: k.Primes[0],
- Q: k.Primes[1],
- Comments: comment,
- Constraints: constraints,
- })
- case *dsa.PrivateKey:
- req = ssh.Marshal(dsaCertMsg{
- Type: cert.Type(),
- CertBytes: cert.Marshal(),
- X: k.X,
- Comments: comment,
- Constraints: constraints,
- })
- case *ecdsa.PrivateKey:
- req = ssh.Marshal(ecdsaCertMsg{
- Type: cert.Type(),
- CertBytes: cert.Marshal(),
- D: k.D,
- Comments: comment,
- Constraints: constraints,
- })
- case *ed25519.PrivateKey:
- req = ssh.Marshal(ed25519CertMsg{
- Type: cert.Type(),
- CertBytes: cert.Marshal(),
- Pub: []byte(*k)[32:],
- Priv: []byte(*k),
- Comments: comment,
- Constraints: constraints,
- })
- default:
- return fmt.Errorf("agent: unsupported key type %T", s)
- }
-
- // if constraints are present then the message type needs to be changed.
- if len(constraints) != 0 {
- req[0] = agentAddIdConstrained
- }
-
- signer, err := ssh.NewSignerFromKey(s)
- if err != nil {
- return err
- }
- if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
- return errors.New("agent: signer and cert have different public key")
- }
-
- resp, err := c.call(req)
- if err != nil {
- return err
- }
- if _, ok := resp.(*successAgentMsg); ok {
- return nil
- }
- return errors.New("agent: failure")
-}
-
-// Signers provides a callback for client authentication.
-func (c *client) Signers() ([]ssh.Signer, error) {
- keys, err := c.List()
- if err != nil {
- return nil, err
- }
-
- var result []ssh.Signer
- for _, k := range keys {
- result = append(result, &agentKeyringSigner{c, k})
- }
- return result, nil
-}
-
-type agentKeyringSigner struct {
- agent *client
- pub ssh.PublicKey
-}
-
-func (s *agentKeyringSigner) PublicKey() ssh.PublicKey {
- return s.pub
-}
-
-func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) {
- // The agent has its own entropy source, so the rand argument is ignored.
- return s.agent.Sign(s.pub, data)
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/client_test.go b/vendor/golang.org/x/crypto/ssh/agent/client_test.go
deleted file mode 100644
index e33d471..0000000
--- a/vendor/golang.org/x/crypto/ssh/agent/client_test.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent
-
-import (
- "bytes"
- "crypto/rand"
- "errors"
- "net"
- "os"
- "os/exec"
- "path/filepath"
- "strconv"
- "testing"
- "time"
-
- "golang.org/x/crypto/ssh"
-)
-
-// startAgent executes ssh-agent, and returns a Agent interface to it.
-func startAgent(t *testing.T) (client Agent, socket string, cleanup func()) {
- if testing.Short() {
- // ssh-agent is not always available, and the key
- // types supported vary by platform.
- t.Skip("skipping test due to -short")
- }
-
- bin, err := exec.LookPath("ssh-agent")
- if err != nil {
- t.Skip("could not find ssh-agent")
- }
-
- cmd := exec.Command(bin, "-s")
- out, err := cmd.Output()
- if err != nil {
- t.Fatalf("cmd.Output: %v", err)
- }
-
- /* Output looks like:
-
- SSH_AUTH_SOCK=/tmp/ssh-P65gpcqArqvH/agent.15541; export SSH_AUTH_SOCK;
- SSH_AGENT_PID=15542; export SSH_AGENT_PID;
- echo Agent pid 15542;
- */
- fields := bytes.Split(out, []byte(";"))
- line := bytes.SplitN(fields[0], []byte("="), 2)
- line[0] = bytes.TrimLeft(line[0], "\n")
- if string(line[0]) != "SSH_AUTH_SOCK" {
- t.Fatalf("could not find key SSH_AUTH_SOCK in %q", fields[0])
- }
- socket = string(line[1])
-
- line = bytes.SplitN(fields[2], []byte("="), 2)
- line[0] = bytes.TrimLeft(line[0], "\n")
- if string(line[0]) != "SSH_AGENT_PID" {
- t.Fatalf("could not find key SSH_AGENT_PID in %q", fields[2])
- }
- pidStr := line[1]
- pid, err := strconv.Atoi(string(pidStr))
- if err != nil {
- t.Fatalf("Atoi(%q): %v", pidStr, err)
- }
-
- conn, err := net.Dial("unix", string(socket))
- if err != nil {
- t.Fatalf("net.Dial: %v", err)
- }
-
- ac := NewClient(conn)
- return ac, socket, func() {
- proc, _ := os.FindProcess(pid)
- if proc != nil {
- proc.Kill()
- }
- conn.Close()
- os.RemoveAll(filepath.Dir(socket))
- }
-}
-
-func testAgent(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
- agent, _, cleanup := startAgent(t)
- defer cleanup()
-
- testAgentInterface(t, agent, key, cert, lifetimeSecs)
-}
-
-func testKeyring(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
- a := NewKeyring()
- testAgentInterface(t, a, key, cert, lifetimeSecs)
-}
-
-func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
- signer, err := ssh.NewSignerFromKey(key)
- if err != nil {
- t.Fatalf("NewSignerFromKey(%T): %v", key, err)
- }
- // The agent should start up empty.
- if keys, err := agent.List(); err != nil {
- t.Fatalf("RequestIdentities: %v", err)
- } else if len(keys) > 0 {
- t.Fatalf("got %d keys, want 0: %v", len(keys), keys)
- }
-
- // Attempt to insert the key, with certificate if specified.
- var pubKey ssh.PublicKey
- if cert != nil {
- err = agent.Add(AddedKey{
- PrivateKey: key,
- Certificate: cert,
- Comment: "comment",
- LifetimeSecs: lifetimeSecs,
- })
- pubKey = cert
- } else {
- err = agent.Add(AddedKey{PrivateKey: key, Comment: "comment", LifetimeSecs: lifetimeSecs})
- pubKey = signer.PublicKey()
- }
- if err != nil {
- t.Fatalf("insert(%T): %v", key, err)
- }
-
- // Did the key get inserted successfully?
- if keys, err := agent.List(); err != nil {
- t.Fatalf("List: %v", err)
- } else if len(keys) != 1 {
- t.Fatalf("got %v, want 1 key", keys)
- } else if keys[0].Comment != "comment" {
- t.Fatalf("key comment: got %v, want %v", keys[0].Comment, "comment")
- } else if !bytes.Equal(keys[0].Blob, pubKey.Marshal()) {
- t.Fatalf("key mismatch")
- }
-
- // Can the agent make a valid signature?
- data := []byte("hello")
- sig, err := agent.Sign(pubKey, data)
- if err != nil {
- t.Fatalf("Sign(%s): %v", pubKey.Type(), err)
- }
-
- if err := pubKey.Verify(data, sig); err != nil {
- t.Fatalf("Verify(%s): %v", pubKey.Type(), err)
- }
-
- // If the key has a lifetime, is it removed when it should be?
- if lifetimeSecs > 0 {
- time.Sleep(time.Second*time.Duration(lifetimeSecs) + 100*time.Millisecond)
- keys, err := agent.List()
- if err != nil {
- t.Fatalf("List: %v", err)
- }
- if len(keys) > 0 {
- t.Fatalf("key not expired")
- }
- }
-
-}
-
-func TestAgent(t *testing.T) {
- for _, keyType := range []string{"rsa", "dsa", "ecdsa", "ed25519"} {
- testAgent(t, testPrivateKeys[keyType], nil, 0)
- testKeyring(t, testPrivateKeys[keyType], nil, 1)
- }
-}
-
-func TestCert(t *testing.T) {
- cert := &ssh.Certificate{
- Key: testPublicKeys["rsa"],
- ValidBefore: ssh.CertTimeInfinity,
- CertType: ssh.UserCert,
- }
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
-
- testAgent(t, testPrivateKeys["rsa"], cert, 0)
- testKeyring(t, testPrivateKeys["rsa"], cert, 1)
-}
-
-// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and
-// therefore is buffered (net.Pipe deadlocks if both sides start with
-// a write.)
-func netPipe() (net.Conn, net.Conn, error) {
- listener, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- return nil, nil, err
- }
- defer listener.Close()
- c1, err := net.Dial("tcp", listener.Addr().String())
- if err != nil {
- return nil, nil, err
- }
-
- c2, err := listener.Accept()
- if err != nil {
- c1.Close()
- return nil, nil, err
- }
-
- return c1, c2, nil
-}
-
-func TestAuth(t *testing.T) {
- a, b, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
-
- defer a.Close()
- defer b.Close()
-
- agent, _, cleanup := startAgent(t)
- defer cleanup()
-
- if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment"}); err != nil {
- t.Errorf("Add: %v", err)
- }
-
- serverConf := ssh.ServerConfig{}
- serverConf.AddHostKey(testSigners["rsa"])
- serverConf.PublicKeyCallback = func(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
- if bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) {
- return nil, nil
- }
-
- return nil, errors.New("pubkey rejected")
- }
-
- go func() {
- conn, _, _, err := ssh.NewServerConn(a, &serverConf)
- if err != nil {
- t.Fatalf("Server: %v", err)
- }
- conn.Close()
- }()
-
- conf := ssh.ClientConfig{}
- conf.Auth = append(conf.Auth, ssh.PublicKeysCallback(agent.Signers))
- conn, _, _, err := ssh.NewClientConn(b, "", &conf)
- if err != nil {
- t.Fatalf("NewClientConn: %v", err)
- }
- conn.Close()
-}
-
-func TestLockClient(t *testing.T) {
- agent, _, cleanup := startAgent(t)
- defer cleanup()
- testLockAgent(agent, t)
-}
-
-func testLockAgent(agent Agent, t *testing.T) {
- if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment 1"}); err != nil {
- t.Errorf("Add: %v", err)
- }
- if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["dsa"], Comment: "comment dsa"}); err != nil {
- t.Errorf("Add: %v", err)
- }
- if keys, err := agent.List(); err != nil {
- t.Errorf("List: %v", err)
- } else if len(keys) != 2 {
- t.Errorf("Want 2 keys, got %v", keys)
- }
-
- passphrase := []byte("secret")
- if err := agent.Lock(passphrase); err != nil {
- t.Errorf("Lock: %v", err)
- }
-
- if keys, err := agent.List(); err != nil {
- t.Errorf("List: %v", err)
- } else if len(keys) != 0 {
- t.Errorf("Want 0 keys, got %v", keys)
- }
-
- signer, _ := ssh.NewSignerFromKey(testPrivateKeys["rsa"])
- if _, err := agent.Sign(signer.PublicKey(), []byte("hello")); err == nil {
- t.Fatalf("Sign did not fail")
- }
-
- if err := agent.Remove(signer.PublicKey()); err == nil {
- t.Fatalf("Remove did not fail")
- }
-
- if err := agent.RemoveAll(); err == nil {
- t.Fatalf("RemoveAll did not fail")
- }
-
- if err := agent.Unlock(nil); err == nil {
- t.Errorf("Unlock with wrong passphrase succeeded")
- }
- if err := agent.Unlock(passphrase); err != nil {
- t.Errorf("Unlock: %v", err)
- }
-
- if err := agent.Remove(signer.PublicKey()); err != nil {
- t.Fatalf("Remove: %v", err)
- }
-
- if keys, err := agent.List(); err != nil {
- t.Errorf("List: %v", err)
- } else if len(keys) != 1 {
- t.Errorf("Want 1 keys, got %v", keys)
- }
-}
-
-func TestAgentLifetime(t *testing.T) {
- agent, _, cleanup := startAgent(t)
- defer cleanup()
-
- for _, keyType := range []string{"rsa", "dsa", "ecdsa"} {
- // Add private keys to the agent.
- err := agent.Add(AddedKey{
- PrivateKey: testPrivateKeys[keyType],
- Comment: "comment",
- LifetimeSecs: 1,
- })
- if err != nil {
- t.Fatalf("add: %v", err)
- }
- // Add certs to the agent.
- cert := &ssh.Certificate{
- Key: testPublicKeys[keyType],
- ValidBefore: ssh.CertTimeInfinity,
- CertType: ssh.UserCert,
- }
- cert.SignCert(rand.Reader, testSigners[keyType])
- err = agent.Add(AddedKey{
- PrivateKey: testPrivateKeys[keyType],
- Certificate: cert,
- Comment: "comment",
- LifetimeSecs: 1,
- })
- if err != nil {
- t.Fatalf("add: %v", err)
- }
- }
- time.Sleep(1100 * time.Millisecond)
- if keys, err := agent.List(); err != nil {
- t.Errorf("List: %v", err)
- } else if len(keys) != 0 {
- t.Errorf("Want 0 keys, got %v", len(keys))
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/example_test.go b/vendor/golang.org/x/crypto/ssh/agent/example_test.go
deleted file mode 100644
index c1130f7..0000000
--- a/vendor/golang.org/x/crypto/ssh/agent/example_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent_test
-
-import (
- "log"
- "os"
- "net"
-
- "golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/agent"
-)
-
-func ExampleClientAgent() {
- // ssh-agent has a UNIX socket under $SSH_AUTH_SOCK
- socket := os.Getenv("SSH_AUTH_SOCK")
- conn, err := net.Dial("unix", socket)
- if err != nil {
- log.Fatalf("net.Dial: %v", err)
- }
- agentClient := agent.NewClient(conn)
- config := &ssh.ClientConfig{
- User: "username",
- Auth: []ssh.AuthMethod{
- // Use a callback rather than PublicKeys
- // so we only consult the agent once the remote server
- // wants it.
- ssh.PublicKeysCallback(agentClient.Signers),
- },
- }
-
- sshc, err := ssh.Dial("tcp", "localhost:22", config)
- if err != nil {
- log.Fatalf("Dial: %v", err)
- }
- // .. use sshc
- sshc.Close()
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go
deleted file mode 100644
index fd24ba9..0000000
--- a/vendor/golang.org/x/crypto/ssh/agent/forward.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent
-
-import (
- "errors"
- "io"
- "net"
- "sync"
-
- "golang.org/x/crypto/ssh"
-)
-
-// RequestAgentForwarding sets up agent forwarding for the session.
-// ForwardToAgent or ForwardToRemote should be called to route
-// the authentication requests.
-func RequestAgentForwarding(session *ssh.Session) error {
- ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil)
- if err != nil {
- return err
- }
- if !ok {
- return errors.New("forwarding request denied")
- }
- return nil
-}
-
-// ForwardToAgent routes authentication requests to the given keyring.
-func ForwardToAgent(client *ssh.Client, keyring Agent) error {
- channels := client.HandleChannelOpen(channelType)
- if channels == nil {
- return errors.New("agent: already have handler for " + channelType)
- }
-
- go func() {
- for ch := range channels {
- channel, reqs, err := ch.Accept()
- if err != nil {
- continue
- }
- go ssh.DiscardRequests(reqs)
- go func() {
- ServeAgent(keyring, channel)
- channel.Close()
- }()
- }
- }()
- return nil
-}
-
-const channelType = "auth-agent@openssh.com"
-
-// ForwardToRemote routes authentication requests to the ssh-agent
-// process serving on the given unix socket.
-func ForwardToRemote(client *ssh.Client, addr string) error {
- channels := client.HandleChannelOpen(channelType)
- if channels == nil {
- return errors.New("agent: already have handler for " + channelType)
- }
- conn, err := net.Dial("unix", addr)
- if err != nil {
- return err
- }
- conn.Close()
-
- go func() {
- for ch := range channels {
- channel, reqs, err := ch.Accept()
- if err != nil {
- continue
- }
- go ssh.DiscardRequests(reqs)
- go forwardUnixSocket(channel, addr)
- }
- }()
- return nil
-}
-
-func forwardUnixSocket(channel ssh.Channel, addr string) {
- conn, err := net.Dial("unix", addr)
- if err != nil {
- return
- }
-
- var wg sync.WaitGroup
- wg.Add(2)
- go func() {
- io.Copy(conn, channel)
- conn.(*net.UnixConn).CloseWrite()
- wg.Done()
- }()
- go func() {
- io.Copy(channel, conn)
- channel.CloseWrite()
- wg.Done()
- }()
-
- wg.Wait()
- conn.Close()
- channel.Close()
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
deleted file mode 100644
index a6ba06a..0000000
--- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/subtle"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "golang.org/x/crypto/ssh"
-)
-
-type privKey struct {
- signer ssh.Signer
- comment string
- expire *time.Time
-}
-
-type keyring struct {
- mu sync.Mutex
- keys []privKey
-
- locked bool
- passphrase []byte
-}
-
-var errLocked = errors.New("agent: locked")
-
-// NewKeyring returns an Agent that holds keys in memory. It is safe
-// for concurrent use by multiple goroutines.
-func NewKeyring() Agent {
- return &keyring{}
-}
-
-// RemoveAll removes all identities.
-func (r *keyring) RemoveAll() error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return errLocked
- }
-
- r.keys = nil
- return nil
-}
-
-// removeLocked does the actual key removal. The caller must already be holding the
-// keyring mutex.
-func (r *keyring) removeLocked(want []byte) error {
- found := false
- for i := 0; i < len(r.keys); {
- if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) {
- found = true
- r.keys[i] = r.keys[len(r.keys)-1]
- r.keys = r.keys[:len(r.keys)-1]
- continue
- } else {
- i++
- }
- }
-
- if !found {
- return errors.New("agent: key not found")
- }
- return nil
-}
-
-// Remove removes all identities with the given public key.
-func (r *keyring) Remove(key ssh.PublicKey) error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return errLocked
- }
-
- return r.removeLocked(key.Marshal())
-}
-
-// Lock locks the agent. Sign and Remove will fail, and List will return an empty list.
-func (r *keyring) Lock(passphrase []byte) error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return errLocked
- }
-
- r.locked = true
- r.passphrase = passphrase
- return nil
-}
-
-// Unlock undoes the effect of Lock
-func (r *keyring) Unlock(passphrase []byte) error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if !r.locked {
- return errors.New("agent: not locked")
- }
- if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) {
- return fmt.Errorf("agent: incorrect passphrase")
- }
-
- r.locked = false
- r.passphrase = nil
- return nil
-}
-
-// expireKeysLocked removes expired keys from the keyring. If a key was added
-// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have
-// ellapsed, it is removed. The caller *must* be holding the keyring mutex.
-func (r *keyring) expireKeysLocked() {
- for _, k := range r.keys {
- if k.expire != nil && time.Now().After(*k.expire) {
- r.removeLocked(k.signer.PublicKey().Marshal())
- }
- }
-}
-
-// List returns the identities known to the agent.
-func (r *keyring) List() ([]*Key, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- // section 2.7: locked agents return empty.
- return nil, nil
- }
-
- r.expireKeysLocked()
- var ids []*Key
- for _, k := range r.keys {
- pub := k.signer.PublicKey()
- ids = append(ids, &Key{
- Format: pub.Type(),
- Blob: pub.Marshal(),
- Comment: k.comment})
- }
- return ids, nil
-}
-
-// Insert adds a private key to the keyring. If a certificate
-// is given, that certificate is added as public key. Note that
-// any constraints given are ignored.
-func (r *keyring) Add(key AddedKey) error {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return errLocked
- }
- signer, err := ssh.NewSignerFromKey(key.PrivateKey)
-
- if err != nil {
- return err
- }
-
- if cert := key.Certificate; cert != nil {
- signer, err = ssh.NewCertSigner(cert, signer)
- if err != nil {
- return err
- }
- }
-
- p := privKey{
- signer: signer,
- comment: key.Comment,
- }
-
- if key.LifetimeSecs > 0 {
- t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second)
- p.expire = &t
- }
-
- r.keys = append(r.keys, p)
-
- return nil
-}
-
-// Sign returns a signature for the data.
-func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return nil, errLocked
- }
-
- r.expireKeysLocked()
- wanted := key.Marshal()
- for _, k := range r.keys {
- if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) {
- return k.signer.Sign(rand.Reader, data)
- }
- }
- return nil, errors.New("not found")
-}
-
-// Signers returns signers for all the known keys.
-func (r *keyring) Signers() ([]ssh.Signer, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.locked {
- return nil, errLocked
- }
-
- r.expireKeysLocked()
- s := make([]ssh.Signer, 0, len(r.keys))
- for _, k := range r.keys {
- s = append(s, k.signer)
- }
- return s, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go b/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go
deleted file mode 100644
index e5d50e7..0000000
--- a/vendor/golang.org/x/crypto/ssh/agent/keyring_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent
-
-import "testing"
-
-func addTestKey(t *testing.T, a Agent, keyName string) {
- err := a.Add(AddedKey{
- PrivateKey: testPrivateKeys[keyName],
- Comment: keyName,
- })
- if err != nil {
- t.Fatalf("failed to add key %q: %v", keyName, err)
- }
-}
-
-func removeTestKey(t *testing.T, a Agent, keyName string) {
- err := a.Remove(testPublicKeys[keyName])
- if err != nil {
- t.Fatalf("failed to remove key %q: %v", keyName, err)
- }
-}
-
-func validateListedKeys(t *testing.T, a Agent, expectedKeys []string) {
- listedKeys, err := a.List()
- if err != nil {
- t.Fatalf("failed to list keys: %v", err)
- return
- }
- actualKeys := make(map[string]bool)
- for _, key := range listedKeys {
- actualKeys[key.Comment] = true
- }
-
- matchedKeys := make(map[string]bool)
- for _, expectedKey := range expectedKeys {
- if !actualKeys[expectedKey] {
- t.Fatalf("expected key %q, but was not found", expectedKey)
- } else {
- matchedKeys[expectedKey] = true
- }
- }
-
- for actualKey := range actualKeys {
- if !matchedKeys[actualKey] {
- t.Fatalf("key %q was found, but was not expected", actualKey)
- }
- }
-}
-
-func TestKeyringAddingAndRemoving(t *testing.T) {
- keyNames := []string{"dsa", "ecdsa", "rsa", "user"}
-
- // add all test private keys
- k := NewKeyring()
- for _, keyName := range keyNames {
- addTestKey(t, k, keyName)
- }
- validateListedKeys(t, k, keyNames)
-
- // remove a key in the middle
- keyToRemove := keyNames[1]
- keyNames = append(keyNames[:1], keyNames[2:]...)
-
- removeTestKey(t, k, keyToRemove)
- validateListedKeys(t, k, keyNames)
-
- // remove all keys
- err := k.RemoveAll()
- if err != nil {
- t.Fatalf("failed to remove all keys: %v", err)
- }
- validateListedKeys(t, k, []string{})
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go
deleted file mode 100644
index 68a333f..0000000
--- a/vendor/golang.org/x/crypto/ssh/agent/server.go
+++ /dev/null
@@ -1,451 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent
-
-import (
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rsa"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "log"
- "math/big"
-
- "golang.org/x/crypto/ed25519"
- "golang.org/x/crypto/ssh"
-)
-
-// Server wraps an Agent and uses it to implement the agent side of
-// the SSH-agent, wire protocol.
-type server struct {
- agent Agent
-}
-
-func (s *server) processRequestBytes(reqData []byte) []byte {
- rep, err := s.processRequest(reqData)
- if err != nil {
- if err != errLocked {
- // TODO(hanwen): provide better logging interface?
- log.Printf("agent %d: %v", reqData[0], err)
- }
- return []byte{agentFailure}
- }
-
- if err == nil && rep == nil {
- return []byte{agentSuccess}
- }
-
- return ssh.Marshal(rep)
-}
-
-func marshalKey(k *Key) []byte {
- var record struct {
- Blob []byte
- Comment string
- }
- record.Blob = k.Marshal()
- record.Comment = k.Comment
-
- return ssh.Marshal(&record)
-}
-
-// See [PROTOCOL.agent], section 2.5.1.
-const agentV1IdentitiesAnswer = 2
-
-type agentV1IdentityMsg struct {
- Numkeys uint32 `sshtype:"2"`
-}
-
-type agentRemoveIdentityMsg struct {
- KeyBlob []byte `sshtype:"18"`
-}
-
-type agentLockMsg struct {
- Passphrase []byte `sshtype:"22"`
-}
-
-type agentUnlockMsg struct {
- Passphrase []byte `sshtype:"23"`
-}
-
-func (s *server) processRequest(data []byte) (interface{}, error) {
- switch data[0] {
- case agentRequestV1Identities:
- return &agentV1IdentityMsg{0}, nil
-
- case agentRemoveAllV1Identities:
- return nil, nil
-
- case agentRemoveIdentity:
- var req agentRemoveIdentityMsg
- if err := ssh.Unmarshal(data, &req); err != nil {
- return nil, err
- }
-
- var wk wireKey
- if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
- return nil, err
- }
-
- return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob})
-
- case agentRemoveAllIdentities:
- return nil, s.agent.RemoveAll()
-
- case agentLock:
- var req agentLockMsg
- if err := ssh.Unmarshal(data, &req); err != nil {
- return nil, err
- }
-
- return nil, s.agent.Lock(req.Passphrase)
-
- case agentUnlock:
- var req agentLockMsg
- if err := ssh.Unmarshal(data, &req); err != nil {
- return nil, err
- }
- return nil, s.agent.Unlock(req.Passphrase)
-
- case agentSignRequest:
- var req signRequestAgentMsg
- if err := ssh.Unmarshal(data, &req); err != nil {
- return nil, err
- }
-
- var wk wireKey
- if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
- return nil, err
- }
-
- k := &Key{
- Format: wk.Format,
- Blob: req.KeyBlob,
- }
-
- sig, err := s.agent.Sign(k, req.Data) // TODO(hanwen): flags.
- if err != nil {
- return nil, err
- }
- return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil
-
- case agentRequestIdentities:
- keys, err := s.agent.List()
- if err != nil {
- return nil, err
- }
-
- rep := identitiesAnswerAgentMsg{
- NumKeys: uint32(len(keys)),
- }
- for _, k := range keys {
- rep.Keys = append(rep.Keys, marshalKey(k)...)
- }
- return rep, nil
-
- case agentAddIdConstrained, agentAddIdentity:
- return nil, s.insertIdentity(data)
- }
-
- return nil, fmt.Errorf("unknown opcode %d", data[0])
-}
-
-func parseRSAKey(req []byte) (*AddedKey, error) {
- var k rsaKeyMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- if k.E.BitLen() > 30 {
- return nil, errors.New("agent: RSA public exponent too large")
- }
- priv := &rsa.PrivateKey{
- PublicKey: rsa.PublicKey{
- E: int(k.E.Int64()),
- N: k.N,
- },
- D: k.D,
- Primes: []*big.Int{k.P, k.Q},
- }
- priv.Precompute()
-
- return &AddedKey{PrivateKey: priv, Comment: k.Comments}, nil
-}
-
-func parseEd25519Key(req []byte) (*AddedKey, error) {
- var k ed25519KeyMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- priv := ed25519.PrivateKey(k.Priv)
- return &AddedKey{PrivateKey: &priv, Comment: k.Comments}, nil
-}
-
-func parseDSAKey(req []byte) (*AddedKey, error) {
- var k dsaKeyMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- priv := &dsa.PrivateKey{
- PublicKey: dsa.PublicKey{
- Parameters: dsa.Parameters{
- P: k.P,
- Q: k.Q,
- G: k.G,
- },
- Y: k.Y,
- },
- X: k.X,
- }
-
- return &AddedKey{PrivateKey: priv, Comment: k.Comments}, nil
-}
-
-func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) {
- priv = &ecdsa.PrivateKey{
- D: privScalar,
- }
-
- switch curveName {
- case "nistp256":
- priv.Curve = elliptic.P256()
- case "nistp384":
- priv.Curve = elliptic.P384()
- case "nistp521":
- priv.Curve = elliptic.P521()
- default:
- return nil, fmt.Errorf("agent: unknown curve %q", curveName)
- }
-
- priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes)
- if priv.X == nil || priv.Y == nil {
- return nil, errors.New("agent: point not on curve")
- }
-
- return priv, nil
-}
-
-func parseEd25519Cert(req []byte) (*AddedKey, error) {
- var k ed25519CertMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- pubKey, err := ssh.ParsePublicKey(k.CertBytes)
- if err != nil {
- return nil, err
- }
- priv := ed25519.PrivateKey(k.Priv)
- cert, ok := pubKey.(*ssh.Certificate)
- if !ok {
- return nil, errors.New("agent: bad ED25519 certificate")
- }
- return &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}, nil
-}
-
-func parseECDSAKey(req []byte) (*AddedKey, error) {
- var k ecdsaKeyMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
-
- priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D)
- if err != nil {
- return nil, err
- }
-
- return &AddedKey{PrivateKey: priv, Comment: k.Comments}, nil
-}
-
-func parseRSACert(req []byte) (*AddedKey, error) {
- var k rsaCertMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
-
- pubKey, err := ssh.ParsePublicKey(k.CertBytes)
- if err != nil {
- return nil, err
- }
-
- cert, ok := pubKey.(*ssh.Certificate)
- if !ok {
- return nil, errors.New("agent: bad RSA certificate")
- }
-
- // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go
- var rsaPub struct {
- Name string
- E *big.Int
- N *big.Int
- }
- if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil {
- return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
- }
-
- if rsaPub.E.BitLen() > 30 {
- return nil, errors.New("agent: RSA public exponent too large")
- }
-
- priv := rsa.PrivateKey{
- PublicKey: rsa.PublicKey{
- E: int(rsaPub.E.Int64()),
- N: rsaPub.N,
- },
- D: k.D,
- Primes: []*big.Int{k.Q, k.P},
- }
- priv.Precompute()
-
- return &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}, nil
-}
-
-func parseDSACert(req []byte) (*AddedKey, error) {
- var k dsaCertMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
- pubKey, err := ssh.ParsePublicKey(k.CertBytes)
- if err != nil {
- return nil, err
- }
- cert, ok := pubKey.(*ssh.Certificate)
- if !ok {
- return nil, errors.New("agent: bad DSA certificate")
- }
-
- // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go
- var w struct {
- Name string
- P, Q, G, Y *big.Int
- }
- if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil {
- return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
- }
-
- priv := &dsa.PrivateKey{
- PublicKey: dsa.PublicKey{
- Parameters: dsa.Parameters{
- P: w.P,
- Q: w.Q,
- G: w.G,
- },
- Y: w.Y,
- },
- X: k.X,
- }
-
- return &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}, nil
-}
-
-func parseECDSACert(req []byte) (*AddedKey, error) {
- var k ecdsaCertMsg
- if err := ssh.Unmarshal(req, &k); err != nil {
- return nil, err
- }
-
- pubKey, err := ssh.ParsePublicKey(k.CertBytes)
- if err != nil {
- return nil, err
- }
- cert, ok := pubKey.(*ssh.Certificate)
- if !ok {
- return nil, errors.New("agent: bad ECDSA certificate")
- }
-
- // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go
- var ecdsaPub struct {
- Name string
- ID string
- Key []byte
- }
- if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil {
- return nil, err
- }
-
- priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D)
- if err != nil {
- return nil, err
- }
-
- return &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}, nil
-}
-
-func (s *server) insertIdentity(req []byte) error {
- var record struct {
- Type string `sshtype:"17|25"`
- Rest []byte `ssh:"rest"`
- }
-
- if err := ssh.Unmarshal(req, &record); err != nil {
- return err
- }
-
- var addedKey *AddedKey
- var err error
-
- switch record.Type {
- case ssh.KeyAlgoRSA:
- addedKey, err = parseRSAKey(req)
- case ssh.KeyAlgoDSA:
- addedKey, err = parseDSAKey(req)
- case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521:
- addedKey, err = parseECDSAKey(req)
- case ssh.KeyAlgoED25519:
- addedKey, err = parseEd25519Key(req)
- case ssh.CertAlgoRSAv01:
- addedKey, err = parseRSACert(req)
- case ssh.CertAlgoDSAv01:
- addedKey, err = parseDSACert(req)
- case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01:
- addedKey, err = parseECDSACert(req)
- case ssh.CertAlgoED25519v01:
- addedKey, err = parseEd25519Cert(req)
- default:
- return fmt.Errorf("agent: not implemented: %q", record.Type)
- }
-
- if err != nil {
- return err
- }
- return s.agent.Add(*addedKey)
-}
-
-// ServeAgent serves the agent protocol on the given connection. It
-// returns when an I/O error occurs.
-func ServeAgent(agent Agent, c io.ReadWriter) error {
- s := &server{agent}
-
- var length [4]byte
- for {
- if _, err := io.ReadFull(c, length[:]); err != nil {
- return err
- }
- l := binary.BigEndian.Uint32(length[:])
- if l > maxAgentResponseBytes {
- // We also cap requests.
- return fmt.Errorf("agent: request too large: %d", l)
- }
-
- req := make([]byte, l)
- if _, err := io.ReadFull(c, req); err != nil {
- return err
- }
-
- repData := s.processRequestBytes(req)
- if len(repData) > maxAgentResponseBytes {
- return fmt.Errorf("agent: reply too large: %d bytes", len(repData))
- }
-
- binary.BigEndian.PutUint32(length[:], uint32(len(repData)))
- if _, err := c.Write(length[:]); err != nil {
- return err
- }
- if _, err := c.Write(repData); err != nil {
- return err
- }
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/server_test.go b/vendor/golang.org/x/crypto/ssh/agent/server_test.go
deleted file mode 100644
index ec9cdee..0000000
--- a/vendor/golang.org/x/crypto/ssh/agent/server_test.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package agent
-
-import (
- "crypto"
- "crypto/rand"
- "fmt"
- "testing"
-
- "golang.org/x/crypto/ssh"
-)
-
-func TestServer(t *testing.T) {
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
- client := NewClient(c1)
-
- go ServeAgent(NewKeyring(), c2)
-
- testAgentInterface(t, client, testPrivateKeys["rsa"], nil, 0)
-}
-
-func TestLockServer(t *testing.T) {
- testLockAgent(NewKeyring(), t)
-}
-
-func TestSetupForwardAgent(t *testing.T) {
- a, b, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
-
- defer a.Close()
- defer b.Close()
-
- _, socket, cleanup := startAgent(t)
- defer cleanup()
-
- serverConf := ssh.ServerConfig{
- NoClientAuth: true,
- }
- serverConf.AddHostKey(testSigners["rsa"])
- incoming := make(chan *ssh.ServerConn, 1)
- go func() {
- conn, _, _, err := ssh.NewServerConn(a, &serverConf)
- if err != nil {
- t.Fatalf("Server: %v", err)
- }
- incoming <- conn
- }()
-
- conf := ssh.ClientConfig{}
- conn, chans, reqs, err := ssh.NewClientConn(b, "", &conf)
- if err != nil {
- t.Fatalf("NewClientConn: %v", err)
- }
- client := ssh.NewClient(conn, chans, reqs)
-
- if err := ForwardToRemote(client, socket); err != nil {
- t.Fatalf("SetupForwardAgent: %v", err)
- }
-
- server := <-incoming
- ch, reqs, err := server.OpenChannel(channelType, nil)
- if err != nil {
- t.Fatalf("OpenChannel(%q): %v", channelType, err)
- }
- go ssh.DiscardRequests(reqs)
-
- agentClient := NewClient(ch)
- testAgentInterface(t, agentClient, testPrivateKeys["rsa"], nil, 0)
- conn.Close()
-}
-
-func TestV1ProtocolMessages(t *testing.T) {
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
- c := NewClient(c1)
-
- go ServeAgent(NewKeyring(), c2)
-
- testV1ProtocolMessages(t, c.(*client))
-}
-
-func testV1ProtocolMessages(t *testing.T, c *client) {
- reply, err := c.call([]byte{agentRequestV1Identities})
- if err != nil {
- t.Fatalf("v1 request all failed: %v", err)
- }
- if msg, ok := reply.(*agentV1IdentityMsg); !ok || msg.Numkeys != 0 {
- t.Fatalf("invalid request all response: %#v", reply)
- }
-
- reply, err = c.call([]byte{agentRemoveAllV1Identities})
- if err != nil {
- t.Fatalf("v1 remove all failed: %v", err)
- }
- if _, ok := reply.(*successAgentMsg); !ok {
- t.Fatalf("invalid remove all response: %#v", reply)
- }
-}
-
-func verifyKey(sshAgent Agent) error {
- keys, err := sshAgent.List()
- if err != nil {
- return fmt.Errorf("listing keys: %v", err)
- }
-
- if len(keys) != 1 {
- return fmt.Errorf("bad number of keys found. expected 1, got %d", len(keys))
- }
-
- buf := make([]byte, 128)
- if _, err := rand.Read(buf); err != nil {
- return fmt.Errorf("rand: %v", err)
- }
-
- sig, err := sshAgent.Sign(keys[0], buf)
- if err != nil {
- return fmt.Errorf("sign: %v", err)
- }
-
- if err := keys[0].Verify(buf, sig); err != nil {
- return fmt.Errorf("verify: %v", err)
- }
- return nil
-}
-
-func addKeyToAgent(key crypto.PrivateKey) error {
- sshAgent := NewKeyring()
- if err := sshAgent.Add(AddedKey{PrivateKey: key}); err != nil {
- return fmt.Errorf("add: %v", err)
- }
- return verifyKey(sshAgent)
-}
-
-func TestKeyTypes(t *testing.T) {
- for k, v := range testPrivateKeys {
- if err := addKeyToAgent(v); err != nil {
- t.Errorf("error adding key type %s, %v", k, err)
- }
- if err := addCertToAgentSock(v, nil); err != nil {
- t.Errorf("error adding key type %s, %v", k, err)
- }
- }
-}
-
-func addCertToAgentSock(key crypto.PrivateKey, cert *ssh.Certificate) error {
- a, b, err := netPipe()
- if err != nil {
- return err
- }
- agentServer := NewKeyring()
- go ServeAgent(agentServer, a)
-
- agentClient := NewClient(b)
- if err := agentClient.Add(AddedKey{PrivateKey: key, Certificate: cert}); err != nil {
- return fmt.Errorf("add: %v", err)
- }
- return verifyKey(agentClient)
-}
-
-func addCertToAgent(key crypto.PrivateKey, cert *ssh.Certificate) error {
- sshAgent := NewKeyring()
- if err := sshAgent.Add(AddedKey{PrivateKey: key, Certificate: cert}); err != nil {
- return fmt.Errorf("add: %v", err)
- }
- return verifyKey(sshAgent)
-}
-
-func TestCertTypes(t *testing.T) {
- for keyType, key := range testPublicKeys {
- cert := &ssh.Certificate{
- ValidPrincipals: []string{"gopher1"},
- ValidAfter: 0,
- ValidBefore: ssh.CertTimeInfinity,
- Key: key,
- Serial: 1,
- CertType: ssh.UserCert,
- SignatureKey: testPublicKeys["rsa"],
- Permissions: ssh.Permissions{
- CriticalOptions: map[string]string{},
- Extensions: map[string]string{},
- },
- }
- if err := cert.SignCert(rand.Reader, testSigners["rsa"]); err != nil {
- t.Fatalf("signcert: %v", err)
- }
- if err := addCertToAgent(testPrivateKeys[keyType], cert); err != nil {
- t.Fatalf("%v", err)
- }
- if err := addCertToAgentSock(testPrivateKeys[keyType], cert); err != nil {
- t.Fatalf("%v", err)
- }
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go b/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go
deleted file mode 100644
index cc42a87..0000000
--- a/vendor/golang.org/x/crypto/ssh/agent/testdata_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places:
-// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
-// instances.
-
-package agent
-
-import (
- "crypto/rand"
- "fmt"
-
- "golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/testdata"
-)
-
-var (
- testPrivateKeys map[string]interface{}
- testSigners map[string]ssh.Signer
- testPublicKeys map[string]ssh.PublicKey
-)
-
-func init() {
- var err error
-
- n := len(testdata.PEMBytes)
- testPrivateKeys = make(map[string]interface{}, n)
- testSigners = make(map[string]ssh.Signer, n)
- testPublicKeys = make(map[string]ssh.PublicKey, n)
- for t, k := range testdata.PEMBytes {
- testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k)
- if err != nil {
- panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
- }
- testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t])
- if err != nil {
- panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
- }
- testPublicKeys[t] = testSigners[t].PublicKey()
- }
-
- // Create a cert and sign it for use in tests.
- testCert := &ssh.Certificate{
- Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
- ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
- ValidAfter: 0, // unix epoch
- ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time.
- Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
- Key: testPublicKeys["ecdsa"],
- SignatureKey: testPublicKeys["rsa"],
- Permissions: ssh.Permissions{
- CriticalOptions: map[string]string{},
- Extensions: map[string]string{},
- },
- }
- testCert.SignCert(rand.Reader, testSigners["rsa"])
- testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
- testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"])
- if err != nil {
- panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/benchmark_test.go b/vendor/golang.org/x/crypto/ssh/benchmark_test.go
deleted file mode 100644
index d9f7eb9..0000000
--- a/vendor/golang.org/x/crypto/ssh/benchmark_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "errors"
- "io"
- "net"
- "testing"
-)
-
-type server struct {
- *ServerConn
- chans <-chan NewChannel
-}
-
-func newServer(c net.Conn, conf *ServerConfig) (*server, error) {
- sconn, chans, reqs, err := NewServerConn(c, conf)
- if err != nil {
- return nil, err
- }
- go DiscardRequests(reqs)
- return &server{sconn, chans}, nil
-}
-
-func (s *server) Accept() (NewChannel, error) {
- n, ok := <-s.chans
- if !ok {
- return nil, io.EOF
- }
- return n, nil
-}
-
-func sshPipe() (Conn, *server, error) {
- c1, c2, err := netPipe()
- if err != nil {
- return nil, nil, err
- }
-
- clientConf := ClientConfig{
- User: "user",
- }
- serverConf := ServerConfig{
- NoClientAuth: true,
- }
- serverConf.AddHostKey(testSigners["ecdsa"])
- done := make(chan *server, 1)
- go func() {
- server, err := newServer(c2, &serverConf)
- if err != nil {
- done <- nil
- }
- done <- server
- }()
-
- client, _, reqs, err := NewClientConn(c1, "", &clientConf)
- if err != nil {
- return nil, nil, err
- }
-
- server := <-done
- if server == nil {
- return nil, nil, errors.New("server handshake failed.")
- }
- go DiscardRequests(reqs)
-
- return client, server, nil
-}
-
-func BenchmarkEndToEnd(b *testing.B) {
- b.StopTimer()
-
- client, server, err := sshPipe()
- if err != nil {
- b.Fatalf("sshPipe: %v", err)
- }
-
- defer client.Close()
- defer server.Close()
-
- size := (1 << 20)
- input := make([]byte, size)
- output := make([]byte, size)
- b.SetBytes(int64(size))
- done := make(chan int, 1)
-
- go func() {
- newCh, err := server.Accept()
- if err != nil {
- b.Fatalf("Client: %v", err)
- }
- ch, incoming, err := newCh.Accept()
- go DiscardRequests(incoming)
- for i := 0; i < b.N; i++ {
- if _, err := io.ReadFull(ch, output); err != nil {
- b.Fatalf("ReadFull: %v", err)
- }
- }
- ch.Close()
- done <- 1
- }()
-
- ch, in, err := client.OpenChannel("speed", nil)
- if err != nil {
- b.Fatalf("OpenChannel: %v", err)
- }
- go DiscardRequests(in)
-
- b.ResetTimer()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- if _, err := ch.Write(input); err != nil {
- b.Fatalf("WriteFull: %v", err)
- }
- }
- ch.Close()
- b.StopTimer()
-
- <-done
-}
diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go
deleted file mode 100644
index 6931b51..0000000
--- a/vendor/golang.org/x/crypto/ssh/buffer.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "io"
- "sync"
-)
-
-// buffer provides a linked list buffer for data exchange
-// between producer and consumer. Theoretically the buffer is
-// of unlimited capacity as it does no allocation of its own.
-type buffer struct {
- // protects concurrent access to head, tail and closed
- *sync.Cond
-
- head *element // the buffer that will be read first
- tail *element // the buffer that will be read last
-
- closed bool
-}
-
-// An element represents a single link in a linked list.
-type element struct {
- buf []byte
- next *element
-}
-
-// newBuffer returns an empty buffer that is not closed.
-func newBuffer() *buffer {
- e := new(element)
- b := &buffer{
- Cond: newCond(),
- head: e,
- tail: e,
- }
- return b
-}
-
-// write makes buf available for Read to receive.
-// buf must not be modified after the call to write.
-func (b *buffer) write(buf []byte) {
- b.Cond.L.Lock()
- e := &element{buf: buf}
- b.tail.next = e
- b.tail = e
- b.Cond.Signal()
- b.Cond.L.Unlock()
-}
-
-// eof closes the buffer. Reads from the buffer once all
-// the data has been consumed will receive os.EOF.
-func (b *buffer) eof() error {
- b.Cond.L.Lock()
- b.closed = true
- b.Cond.Signal()
- b.Cond.L.Unlock()
- return nil
-}
-
-// Read reads data from the internal buffer in buf. Reads will block
-// if no data is available, or until the buffer is closed.
-func (b *buffer) Read(buf []byte) (n int, err error) {
- b.Cond.L.Lock()
- defer b.Cond.L.Unlock()
-
- for len(buf) > 0 {
- // if there is data in b.head, copy it
- if len(b.head.buf) > 0 {
- r := copy(buf, b.head.buf)
- buf, b.head.buf = buf[r:], b.head.buf[r:]
- n += r
- continue
- }
- // if there is a next buffer, make it the head
- if len(b.head.buf) == 0 && b.head != b.tail {
- b.head = b.head.next
- continue
- }
-
- // if at least one byte has been copied, return
- if n > 0 {
- break
- }
-
- // if nothing was read, and there is nothing outstanding
- // check to see if the buffer is closed.
- if b.closed {
- err = io.EOF
- break
- }
- // out of buffers, wait for producer
- b.Cond.Wait()
- }
- return
-}
diff --git a/vendor/golang.org/x/crypto/ssh/buffer_test.go b/vendor/golang.org/x/crypto/ssh/buffer_test.go
deleted file mode 100644
index d5781cb..0000000
--- a/vendor/golang.org/x/crypto/ssh/buffer_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "io"
- "testing"
-)
-
-var alphabet = []byte("abcdefghijklmnopqrstuvwxyz")
-
-func TestBufferReadwrite(t *testing.T) {
- b := newBuffer()
- b.write(alphabet[:10])
- r, _ := b.Read(make([]byte, 10))
- if r != 10 {
- t.Fatalf("Expected written == read == 10, written: 10, read %d", r)
- }
-
- b = newBuffer()
- b.write(alphabet[:5])
- r, _ = b.Read(make([]byte, 10))
- if r != 5 {
- t.Fatalf("Expected written == read == 5, written: 5, read %d", r)
- }
-
- b = newBuffer()
- b.write(alphabet[:10])
- r, _ = b.Read(make([]byte, 5))
- if r != 5 {
- t.Fatalf("Expected written == 10, read == 5, written: 10, read %d", r)
- }
-
- b = newBuffer()
- b.write(alphabet[:5])
- b.write(alphabet[5:15])
- r, _ = b.Read(make([]byte, 10))
- r2, _ := b.Read(make([]byte, 10))
- if r != 10 || r2 != 5 || 15 != r+r2 {
- t.Fatal("Expected written == read == 15")
- }
-}
-
-func TestBufferClose(t *testing.T) {
- b := newBuffer()
- b.write(alphabet[:10])
- b.eof()
- _, err := b.Read(make([]byte, 5))
- if err != nil {
- t.Fatal("expected read of 5 to not return EOF")
- }
- b = newBuffer()
- b.write(alphabet[:10])
- b.eof()
- r, err := b.Read(make([]byte, 5))
- r2, err2 := b.Read(make([]byte, 10))
- if r != 5 || r2 != 5 || err != nil || err2 != nil {
- t.Fatal("expected reads of 5 and 5")
- }
-
- b = newBuffer()
- b.write(alphabet[:10])
- b.eof()
- r, err = b.Read(make([]byte, 5))
- r2, err2 = b.Read(make([]byte, 10))
- r3, err3 := b.Read(make([]byte, 10))
- if r != 5 || r2 != 5 || r3 != 0 || err != nil || err2 != nil || err3 != io.EOF {
- t.Fatal("expected reads of 5 and 5 and 0, with EOF")
- }
-
- b = newBuffer()
- b.write(make([]byte, 5))
- b.write(make([]byte, 10))
- b.eof()
- r, err = b.Read(make([]byte, 9))
- r2, err2 = b.Read(make([]byte, 3))
- r3, err3 = b.Read(make([]byte, 3))
- r4, err4 := b.Read(make([]byte, 10))
- if err != nil || err2 != nil || err3 != nil || err4 != io.EOF {
- t.Fatalf("Expected EOF on forth read only, err=%v, err2=%v, err3=%v, err4=%v", err, err2, err3, err4)
- }
- if r != 9 || r2 != 3 || r3 != 3 || r4 != 0 {
- t.Fatal("Expected written == read == 15", r, r2, r3, r4)
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
deleted file mode 100644
index 6331c94..0000000
--- a/vendor/golang.org/x/crypto/ssh/certs.go
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "net"
- "sort"
- "time"
-)
-
-// These constants from [PROTOCOL.certkeys] represent the algorithm names
-// for certificate types supported by this package.
-const (
- CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
- CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
- CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
- CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
- CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
- CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
-)
-
-// Certificate types distinguish between host and user
-// certificates. The values can be set in the CertType field of
-// Certificate.
-const (
- UserCert = 1
- HostCert = 2
-)
-
-// Signature represents a cryptographic signature.
-type Signature struct {
- Format string
- Blob []byte
-}
-
-// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
-// a certificate does not expire.
-const CertTimeInfinity = 1<<64 - 1
-
-// An Certificate represents an OpenSSH certificate as defined in
-// [PROTOCOL.certkeys]?rev=1.8.
-type Certificate struct {
- Nonce []byte
- Key PublicKey
- Serial uint64
- CertType uint32
- KeyId string
- ValidPrincipals []string
- ValidAfter uint64
- ValidBefore uint64
- Permissions
- Reserved []byte
- SignatureKey PublicKey
- Signature *Signature
-}
-
-// genericCertData holds the key-independent part of the certificate data.
-// Overall, certificates contain an nonce, public key fields and
-// key-independent fields.
-type genericCertData struct {
- Serial uint64
- CertType uint32
- KeyId string
- ValidPrincipals []byte
- ValidAfter uint64
- ValidBefore uint64
- CriticalOptions []byte
- Extensions []byte
- Reserved []byte
- SignatureKey []byte
- Signature []byte
-}
-
-func marshalStringList(namelist []string) []byte {
- var to []byte
- for _, name := range namelist {
- s := struct{ N string }{name}
- to = append(to, Marshal(&s)...)
- }
- return to
-}
-
-type optionsTuple struct {
- Key string
- Value []byte
-}
-
-type optionsTupleValue struct {
- Value string
-}
-
-// serialize a map of critical options or extensions
-// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
-// we need two length prefixes for a non-empty string value
-func marshalTuples(tups map[string]string) []byte {
- keys := make([]string, 0, len(tups))
- for key := range tups {
- keys = append(keys, key)
- }
- sort.Strings(keys)
-
- var ret []byte
- for _, key := range keys {
- s := optionsTuple{Key: key}
- if value := tups[key]; len(value) > 0 {
- s.Value = Marshal(&optionsTupleValue{value})
- }
- ret = append(ret, Marshal(&s)...)
- }
- return ret
-}
-
-// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
-// we need two length prefixes for a non-empty option value
-func parseTuples(in []byte) (map[string]string, error) {
- tups := map[string]string{}
- var lastKey string
- var haveLastKey bool
-
- for len(in) > 0 {
- var key, val, extra []byte
- var ok bool
-
- if key, in, ok = parseString(in); !ok {
- return nil, errShortRead
- }
- keyStr := string(key)
- // according to [PROTOCOL.certkeys], the names must be in
- // lexical order.
- if haveLastKey && keyStr <= lastKey {
- return nil, fmt.Errorf("ssh: certificate options are not in lexical order")
- }
- lastKey, haveLastKey = keyStr, true
- // the next field is a data field, which if non-empty has a string embedded
- if val, in, ok = parseString(in); !ok {
- return nil, errShortRead
- }
- if len(val) > 0 {
- val, extra, ok = parseString(val)
- if !ok {
- return nil, errShortRead
- }
- if len(extra) > 0 {
- return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value")
- }
- tups[keyStr] = string(val)
- } else {
- tups[keyStr] = ""
- }
- }
- return tups, nil
-}
-
-func parseCert(in []byte, privAlgo string) (*Certificate, error) {
- nonce, rest, ok := parseString(in)
- if !ok {
- return nil, errShortRead
- }
-
- key, rest, err := parsePubKey(rest, privAlgo)
- if err != nil {
- return nil, err
- }
-
- var g genericCertData
- if err := Unmarshal(rest, &g); err != nil {
- return nil, err
- }
-
- c := &Certificate{
- Nonce: nonce,
- Key: key,
- Serial: g.Serial,
- CertType: g.CertType,
- KeyId: g.KeyId,
- ValidAfter: g.ValidAfter,
- ValidBefore: g.ValidBefore,
- }
-
- for principals := g.ValidPrincipals; len(principals) > 0; {
- principal, rest, ok := parseString(principals)
- if !ok {
- return nil, errShortRead
- }
- c.ValidPrincipals = append(c.ValidPrincipals, string(principal))
- principals = rest
- }
-
- c.CriticalOptions, err = parseTuples(g.CriticalOptions)
- if err != nil {
- return nil, err
- }
- c.Extensions, err = parseTuples(g.Extensions)
- if err != nil {
- return nil, err
- }
- c.Reserved = g.Reserved
- k, err := ParsePublicKey(g.SignatureKey)
- if err != nil {
- return nil, err
- }
-
- c.SignatureKey = k
- c.Signature, rest, ok = parseSignatureBody(g.Signature)
- if !ok || len(rest) > 0 {
- return nil, errors.New("ssh: signature parse error")
- }
-
- return c, nil
-}
-
-type openSSHCertSigner struct {
- pub *Certificate
- signer Signer
-}
-
-// NewCertSigner returns a Signer that signs with the given Certificate, whose
-// private key is held by signer. It returns an error if the public key in cert
-// doesn't match the key used by signer.
-func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
- if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
- return nil, errors.New("ssh: signer and cert have different public key")
- }
-
- return &openSSHCertSigner{cert, signer}, nil
-}
-
-func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
- return s.signer.Sign(rand, data)
-}
-
-func (s *openSSHCertSigner) PublicKey() PublicKey {
- return s.pub
-}
-
-const sourceAddressCriticalOption = "source-address"
-
-// CertChecker does the work of verifying a certificate. Its methods
-// can be plugged into ClientConfig.HostKeyCallback and
-// ServerConfig.PublicKeyCallback. For the CertChecker to work,
-// minimally, the IsAuthority callback should be set.
-type CertChecker struct {
- // SupportedCriticalOptions lists the CriticalOptions that the
- // server application layer understands. These are only used
- // for user certificates.
- SupportedCriticalOptions []string
-
- // IsAuthority should return true if the key is recognized as
- // an authority. This allows for certificates to be signed by other
- // certificates.
- IsAuthority func(auth PublicKey) bool
-
- // Clock is used for verifying time stamps. If nil, time.Now
- // is used.
- Clock func() time.Time
-
- // UserKeyFallback is called when CertChecker.Authenticate encounters a
- // public key that is not a certificate. It must implement validation
- // of user keys or else, if nil, all such keys are rejected.
- UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
-
- // HostKeyFallback is called when CertChecker.CheckHostKey encounters a
- // public key that is not a certificate. It must implement host key
- // validation or else, if nil, all such keys are rejected.
- HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error
-
- // IsRevoked is called for each certificate so that revocation checking
- // can be implemented. It should return true if the given certificate
- // is revoked and false otherwise. If nil, no certificates are
- // considered to have been revoked.
- IsRevoked func(cert *Certificate) bool
-}
-
-// CheckHostKey checks a host key certificate. This method can be
-// plugged into ClientConfig.HostKeyCallback.
-func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error {
- cert, ok := key.(*Certificate)
- if !ok {
- if c.HostKeyFallback != nil {
- return c.HostKeyFallback(addr, remote, key)
- }
- return errors.New("ssh: non-certificate host key")
- }
- if cert.CertType != HostCert {
- return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType)
- }
-
- return c.CheckCert(addr, cert)
-}
-
-// Authenticate checks a user certificate. Authenticate can be used as
-// a value for ServerConfig.PublicKeyCallback.
-func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) {
- cert, ok := pubKey.(*Certificate)
- if !ok {
- if c.UserKeyFallback != nil {
- return c.UserKeyFallback(conn, pubKey)
- }
- return nil, errors.New("ssh: normal key pairs not accepted")
- }
-
- if cert.CertType != UserCert {
- return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType)
- }
-
- if err := c.CheckCert(conn.User(), cert); err != nil {
- return nil, err
- }
-
- return &cert.Permissions, nil
-}
-
-// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and
-// the signature of the certificate.
-func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
- if c.IsRevoked != nil && c.IsRevoked(cert) {
- return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial)
- }
-
- for opt, _ := range cert.CriticalOptions {
- // sourceAddressCriticalOption will be enforced by
- // serverAuthenticate
- if opt == sourceAddressCriticalOption {
- continue
- }
-
- found := false
- for _, supp := range c.SupportedCriticalOptions {
- if supp == opt {
- found = true
- break
- }
- }
- if !found {
- return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt)
- }
- }
-
- if len(cert.ValidPrincipals) > 0 {
- // By default, certs are valid for all users/hosts.
- found := false
- for _, p := range cert.ValidPrincipals {
- if p == principal {
- found = true
- break
- }
- }
- if !found {
- return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals)
- }
- }
-
- if !c.IsAuthority(cert.SignatureKey) {
- return fmt.Errorf("ssh: certificate signed by unrecognized authority")
- }
-
- clock := c.Clock
- if clock == nil {
- clock = time.Now
- }
-
- unixNow := clock().Unix()
- if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {
- return fmt.Errorf("ssh: cert is not yet valid")
- }
- if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {
- return fmt.Errorf("ssh: cert has expired")
- }
- if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {
- return fmt.Errorf("ssh: certificate signature does not verify")
- }
-
- return nil
-}
-
-// SignCert sets c.SignatureKey to the authority's public key and stores a
-// Signature, by authority, in the certificate.
-func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
- c.Nonce = make([]byte, 32)
- if _, err := io.ReadFull(rand, c.Nonce); err != nil {
- return err
- }
- c.SignatureKey = authority.PublicKey()
-
- sig, err := authority.Sign(rand, c.bytesForSigning())
- if err != nil {
- return err
- }
- c.Signature = sig
- return nil
-}
-
-var certAlgoNames = map[string]string{
- KeyAlgoRSA: CertAlgoRSAv01,
- KeyAlgoDSA: CertAlgoDSAv01,
- KeyAlgoECDSA256: CertAlgoECDSA256v01,
- KeyAlgoECDSA384: CertAlgoECDSA384v01,
- KeyAlgoECDSA521: CertAlgoECDSA521v01,
- KeyAlgoED25519: CertAlgoED25519v01,
-}
-
-// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
-// Panics if a non-certificate algorithm is passed.
-func certToPrivAlgo(algo string) string {
- for privAlgo, pubAlgo := range certAlgoNames {
- if pubAlgo == algo {
- return privAlgo
- }
- }
- panic("unknown cert algorithm")
-}
-
-func (cert *Certificate) bytesForSigning() []byte {
- c2 := *cert
- c2.Signature = nil
- out := c2.Marshal()
- // Drop trailing signature length.
- return out[:len(out)-4]
-}
-
-// Marshal serializes c into OpenSSH's wire format. It is part of the
-// PublicKey interface.
-func (c *Certificate) Marshal() []byte {
- generic := genericCertData{
- Serial: c.Serial,
- CertType: c.CertType,
- KeyId: c.KeyId,
- ValidPrincipals: marshalStringList(c.ValidPrincipals),
- ValidAfter: uint64(c.ValidAfter),
- ValidBefore: uint64(c.ValidBefore),
- CriticalOptions: marshalTuples(c.CriticalOptions),
- Extensions: marshalTuples(c.Extensions),
- Reserved: c.Reserved,
- SignatureKey: c.SignatureKey.Marshal(),
- }
- if c.Signature != nil {
- generic.Signature = Marshal(c.Signature)
- }
- genericBytes := Marshal(&generic)
- keyBytes := c.Key.Marshal()
- _, keyBytes, _ = parseString(keyBytes)
- prefix := Marshal(&struct {
- Name string
- Nonce []byte
- Key []byte `ssh:"rest"`
- }{c.Type(), c.Nonce, keyBytes})
-
- result := make([]byte, 0, len(prefix)+len(genericBytes))
- result = append(result, prefix...)
- result = append(result, genericBytes...)
- return result
-}
-
-// Type returns the key name. It is part of the PublicKey interface.
-func (c *Certificate) Type() string {
- algo, ok := certAlgoNames[c.Key.Type()]
- if !ok {
- panic("unknown cert key type " + c.Key.Type())
- }
- return algo
-}
-
-// Verify verifies a signature against the certificate's public
-// key. It is part of the PublicKey interface.
-func (c *Certificate) Verify(data []byte, sig *Signature) error {
- return c.Key.Verify(data, sig)
-}
-
-func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
- format, in, ok := parseString(in)
- if !ok {
- return
- }
-
- out = &Signature{
- Format: string(format),
- }
-
- if out.Blob, in, ok = parseString(in); !ok {
- return
- }
-
- return out, in, ok
-}
-
-func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) {
- sigBytes, rest, ok := parseString(in)
- if !ok {
- return
- }
-
- out, trailing, ok := parseSignatureBody(sigBytes)
- if !ok || len(trailing) > 0 {
- return nil, nil, false
- }
- return
-}
diff --git a/vendor/golang.org/x/crypto/ssh/certs_test.go b/vendor/golang.org/x/crypto/ssh/certs_test.go
deleted file mode 100644
index c5f2e53..0000000
--- a/vendor/golang.org/x/crypto/ssh/certs_test.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "crypto/rand"
- "reflect"
- "testing"
- "time"
-)
-
-// Cert generated by ssh-keygen 6.0p1 Debian-4.
-// % ssh-keygen -s ca-key -I test user-key
-const exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=`
-
-func TestParseCert(t *testing.T) {
- authKeyBytes := []byte(exampleSSHCert)
-
- key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes)
- if err != nil {
- t.Fatalf("ParseAuthorizedKey: %v", err)
- }
- if len(rest) > 0 {
- t.Errorf("rest: got %q, want empty", rest)
- }
-
- if _, ok := key.(*Certificate); !ok {
- t.Fatalf("got %v (%T), want *Certificate", key, key)
- }
-
- marshaled := MarshalAuthorizedKey(key)
- // Before comparison, remove the trailing newline that
- // MarshalAuthorizedKey adds.
- marshaled = marshaled[:len(marshaled)-1]
- if !bytes.Equal(authKeyBytes, marshaled) {
- t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes)
- }
-}
-
-// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3
-// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub
-// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN
-// Critical Options:
-// force-command /bin/sleep
-// source-address 192.168.1.0/24
-// Extensions:
-// permit-X11-forwarding
-// permit-agent-forwarding
-// permit-port-forwarding
-// permit-pty
-// permit-user-rc
-const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ`
-
-func TestParseCertWithOptions(t *testing.T) {
- opts := map[string]string{
- "source-address": "192.168.1.0/24",
- "force-command": "/bin/sleep",
- }
- exts := map[string]string{
- "permit-X11-forwarding": "",
- "permit-agent-forwarding": "",
- "permit-port-forwarding": "",
- "permit-pty": "",
- "permit-user-rc": "",
- }
- authKeyBytes := []byte(exampleSSHCertWithOptions)
-
- key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes)
- if err != nil {
- t.Fatalf("ParseAuthorizedKey: %v", err)
- }
- if len(rest) > 0 {
- t.Errorf("rest: got %q, want empty", rest)
- }
- cert, ok := key.(*Certificate)
- if !ok {
- t.Fatalf("got %v (%T), want *Certificate", key, key)
- }
- if !reflect.DeepEqual(cert.CriticalOptions, opts) {
- t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts)
- }
- if !reflect.DeepEqual(cert.Extensions, exts) {
- t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts)
- }
- marshaled := MarshalAuthorizedKey(key)
- // Before comparison, remove the trailing newline that
- // MarshalAuthorizedKey adds.
- marshaled = marshaled[:len(marshaled)-1]
- if !bytes.Equal(authKeyBytes, marshaled) {
- t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes)
- }
-}
-
-func TestValidateCert(t *testing.T) {
- key, _, _, _, err := ParseAuthorizedKey([]byte(exampleSSHCert))
- if err != nil {
- t.Fatalf("ParseAuthorizedKey: %v", err)
- }
- validCert, ok := key.(*Certificate)
- if !ok {
- t.Fatalf("got %v (%T), want *Certificate", key, key)
- }
- checker := CertChecker{}
- checker.IsAuthority = func(k PublicKey) bool {
- return bytes.Equal(k.Marshal(), validCert.SignatureKey.Marshal())
- }
-
- if err := checker.CheckCert("user", validCert); err != nil {
- t.Errorf("Unable to validate certificate: %v", err)
- }
- invalidCert := &Certificate{
- Key: testPublicKeys["rsa"],
- SignatureKey: testPublicKeys["ecdsa"],
- ValidBefore: CertTimeInfinity,
- Signature: &Signature{},
- }
- if err := checker.CheckCert("user", invalidCert); err == nil {
- t.Error("Invalid cert signature passed validation")
- }
-}
-
-func TestValidateCertTime(t *testing.T) {
- cert := Certificate{
- ValidPrincipals: []string{"user"},
- Key: testPublicKeys["rsa"],
- ValidAfter: 50,
- ValidBefore: 100,
- }
-
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
-
- for ts, ok := range map[int64]bool{
- 25: false,
- 50: true,
- 99: true,
- 100: false,
- 125: false,
- } {
- checker := CertChecker{
- Clock: func() time.Time { return time.Unix(ts, 0) },
- }
- checker.IsAuthority = func(k PublicKey) bool {
- return bytes.Equal(k.Marshal(),
- testPublicKeys["ecdsa"].Marshal())
- }
-
- if v := checker.CheckCert("user", &cert); (v == nil) != ok {
- t.Errorf("Authenticate(%d): %v", ts, v)
- }
- }
-}
-
-// TODO(hanwen): tests for
-//
-// host keys:
-// * fallbacks
-
-func TestHostKeyCert(t *testing.T) {
- cert := &Certificate{
- ValidPrincipals: []string{"hostname", "hostname.domain"},
- Key: testPublicKeys["rsa"],
- ValidBefore: CertTimeInfinity,
- CertType: HostCert,
- }
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
-
- checker := &CertChecker{
- IsAuthority: func(p PublicKey) bool {
- return bytes.Equal(testPublicKeys["ecdsa"].Marshal(), p.Marshal())
- },
- }
-
- certSigner, err := NewCertSigner(cert, testSigners["rsa"])
- if err != nil {
- t.Errorf("NewCertSigner: %v", err)
- }
-
- for _, name := range []string{"hostname", "otherhost"} {
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
-
- errc := make(chan error)
-
- go func() {
- conf := ServerConfig{
- NoClientAuth: true,
- }
- conf.AddHostKey(certSigner)
- _, _, _, err := NewServerConn(c1, &conf)
- errc <- err
- }()
-
- config := &ClientConfig{
- User: "user",
- HostKeyCallback: checker.CheckHostKey,
- }
- _, _, _, err = NewClientConn(c2, name, config)
-
- succeed := name == "hostname"
- if (err == nil) != succeed {
- t.Fatalf("NewClientConn(%q): %v", name, err)
- }
-
- err = <-errc
- if (err == nil) != succeed {
- t.Fatalf("NewServerConn(%q): %v", name, err)
- }
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go
deleted file mode 100644
index 6d709b5..0000000
--- a/vendor/golang.org/x/crypto/ssh/channel.go
+++ /dev/null
@@ -1,633 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "log"
- "sync"
-)
-
-const (
- minPacketLength = 9
- // channelMaxPacket contains the maximum number of bytes that will be
- // sent in a single packet. As per RFC 4253, section 6.1, 32k is also
- // the minimum.
- channelMaxPacket = 1 << 15
- // We follow OpenSSH here.
- channelWindowSize = 64 * channelMaxPacket
-)
-
-// NewChannel represents an incoming request to a channel. It must either be
-// accepted for use by calling Accept, or rejected by calling Reject.
-type NewChannel interface {
- // Accept accepts the channel creation request. It returns the Channel
- // and a Go channel containing SSH requests. The Go channel must be
- // serviced otherwise the Channel will hang.
- Accept() (Channel, <-chan *Request, error)
-
- // Reject rejects the channel creation request. After calling
- // this, no other methods on the Channel may be called.
- Reject(reason RejectionReason, message string) error
-
- // ChannelType returns the type of the channel, as supplied by the
- // client.
- ChannelType() string
-
- // ExtraData returns the arbitrary payload for this channel, as supplied
- // by the client. This data is specific to the channel type.
- ExtraData() []byte
-}
-
-// A Channel is an ordered, reliable, flow-controlled, duplex stream
-// that is multiplexed over an SSH connection.
-type Channel interface {
- // Read reads up to len(data) bytes from the channel.
- Read(data []byte) (int, error)
-
- // Write writes len(data) bytes to the channel.
- Write(data []byte) (int, error)
-
- // Close signals end of channel use. No data may be sent after this
- // call.
- Close() error
-
- // CloseWrite signals the end of sending in-band
- // data. Requests may still be sent, and the other side may
- // still send data
- CloseWrite() error
-
- // SendRequest sends a channel request. If wantReply is true,
- // it will wait for a reply and return the result as a
- // boolean, otherwise the return value will be false. Channel
- // requests are out-of-band messages so they may be sent even
- // if the data stream is closed or blocked by flow control.
- // If the channel is closed before a reply is returned, io.EOF
- // is returned.
- SendRequest(name string, wantReply bool, payload []byte) (bool, error)
-
- // Stderr returns an io.ReadWriter that writes to this channel
- // with the extended data type set to stderr. Stderr may
- // safely be read and written from a different goroutine than
- // Read and Write respectively.
- Stderr() io.ReadWriter
-}
-
-// Request is a request sent outside of the normal stream of
-// data. Requests can either be specific to an SSH channel, or they
-// can be global.
-type Request struct {
- Type string
- WantReply bool
- Payload []byte
-
- ch *channel
- mux *mux
-}
-
-// Reply sends a response to a request. It must be called for all requests
-// where WantReply is true and is a no-op otherwise. The payload argument is
-// ignored for replies to channel-specific requests.
-func (r *Request) Reply(ok bool, payload []byte) error {
- if !r.WantReply {
- return nil
- }
-
- if r.ch == nil {
- return r.mux.ackRequest(ok, payload)
- }
-
- return r.ch.ackRequest(ok)
-}
-
-// RejectionReason is an enumeration used when rejecting channel creation
-// requests. See RFC 4254, section 5.1.
-type RejectionReason uint32
-
-const (
- Prohibited RejectionReason = iota + 1
- ConnectionFailed
- UnknownChannelType
- ResourceShortage
-)
-
-// String converts the rejection reason to human readable form.
-func (r RejectionReason) String() string {
- switch r {
- case Prohibited:
- return "administratively prohibited"
- case ConnectionFailed:
- return "connect failed"
- case UnknownChannelType:
- return "unknown channel type"
- case ResourceShortage:
- return "resource shortage"
- }
- return fmt.Sprintf("unknown reason %d", int(r))
-}
-
-func min(a uint32, b int) uint32 {
- if a < uint32(b) {
- return a
- }
- return uint32(b)
-}
-
-type channelDirection uint8
-
-const (
- channelInbound channelDirection = iota
- channelOutbound
-)
-
-// channel is an implementation of the Channel interface that works
-// with the mux class.
-type channel struct {
- // R/O after creation
- chanType string
- extraData []byte
- localId, remoteId uint32
-
- // maxIncomingPayload and maxRemotePayload are the maximum
- // payload sizes of normal and extended data packets for
- // receiving and sending, respectively. The wire packet will
- // be 9 or 13 bytes larger (excluding encryption overhead).
- maxIncomingPayload uint32
- maxRemotePayload uint32
-
- mux *mux
-
- // decided is set to true if an accept or reject message has been sent
- // (for outbound channels) or received (for inbound channels).
- decided bool
-
- // direction contains either channelOutbound, for channels created
- // locally, or channelInbound, for channels created by the peer.
- direction channelDirection
-
- // Pending internal channel messages.
- msg chan interface{}
-
- // Since requests have no ID, there can be only one request
- // with WantReply=true outstanding. This lock is held by a
- // goroutine that has such an outgoing request pending.
- sentRequestMu sync.Mutex
-
- incomingRequests chan *Request
-
- sentEOF bool
-
- // thread-safe data
- remoteWin window
- pending *buffer
- extPending *buffer
-
- // windowMu protects myWindow, the flow-control window.
- windowMu sync.Mutex
- myWindow uint32
-
- // writeMu serializes calls to mux.conn.writePacket() and
- // protects sentClose and packetPool. This mutex must be
- // different from windowMu, as writePacket can block if there
- // is a key exchange pending.
- writeMu sync.Mutex
- sentClose bool
-
- // packetPool has a buffer for each extended channel ID to
- // save allocations during writes.
- packetPool map[uint32][]byte
-}
-
-// writePacket sends a packet. If the packet is a channel close, it updates
-// sentClose. This method takes the lock c.writeMu.
-func (c *channel) writePacket(packet []byte) error {
- c.writeMu.Lock()
- if c.sentClose {
- c.writeMu.Unlock()
- return io.EOF
- }
- c.sentClose = (packet[0] == msgChannelClose)
- err := c.mux.conn.writePacket(packet)
- c.writeMu.Unlock()
- return err
-}
-
-func (c *channel) sendMessage(msg interface{}) error {
- if debugMux {
- log.Printf("send(%d): %#v", c.mux.chanList.offset, msg)
- }
-
- p := Marshal(msg)
- binary.BigEndian.PutUint32(p[1:], c.remoteId)
- return c.writePacket(p)
-}
-
-// WriteExtended writes data to a specific extended stream. These streams are
-// used, for example, for stderr.
-func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
- if c.sentEOF {
- return 0, io.EOF
- }
- // 1 byte message type, 4 bytes remoteId, 4 bytes data length
- opCode := byte(msgChannelData)
- headerLength := uint32(9)
- if extendedCode > 0 {
- headerLength += 4
- opCode = msgChannelExtendedData
- }
-
- c.writeMu.Lock()
- packet := c.packetPool[extendedCode]
- // We don't remove the buffer from packetPool, so
- // WriteExtended calls from different goroutines will be
- // flagged as errors by the race detector.
- c.writeMu.Unlock()
-
- for len(data) > 0 {
- space := min(c.maxRemotePayload, len(data))
- if space, err = c.remoteWin.reserve(space); err != nil {
- return n, err
- }
- if want := headerLength + space; uint32(cap(packet)) < want {
- packet = make([]byte, want)
- } else {
- packet = packet[:want]
- }
-
- todo := data[:space]
-
- packet[0] = opCode
- binary.BigEndian.PutUint32(packet[1:], c.remoteId)
- if extendedCode > 0 {
- binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode))
- }
- binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo)))
- copy(packet[headerLength:], todo)
- if err = c.writePacket(packet); err != nil {
- return n, err
- }
-
- n += len(todo)
- data = data[len(todo):]
- }
-
- c.writeMu.Lock()
- c.packetPool[extendedCode] = packet
- c.writeMu.Unlock()
-
- return n, err
-}
-
-func (c *channel) handleData(packet []byte) error {
- headerLen := 9
- isExtendedData := packet[0] == msgChannelExtendedData
- if isExtendedData {
- headerLen = 13
- }
- if len(packet) < headerLen {
- // malformed data packet
- return parseError(packet[0])
- }
-
- var extended uint32
- if isExtendedData {
- extended = binary.BigEndian.Uint32(packet[5:])
- }
-
- length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen])
- if length == 0 {
- return nil
- }
- if length > c.maxIncomingPayload {
- // TODO(hanwen): should send Disconnect?
- return errors.New("ssh: incoming packet exceeds maximum payload size")
- }
-
- data := packet[headerLen:]
- if length != uint32(len(data)) {
- return errors.New("ssh: wrong packet length")
- }
-
- c.windowMu.Lock()
- if c.myWindow < length {
- c.windowMu.Unlock()
- // TODO(hanwen): should send Disconnect with reason?
- return errors.New("ssh: remote side wrote too much")
- }
- c.myWindow -= length
- c.windowMu.Unlock()
-
- if extended == 1 {
- c.extPending.write(data)
- } else if extended > 0 {
- // discard other extended data.
- } else {
- c.pending.write(data)
- }
- return nil
-}
-
-func (c *channel) adjustWindow(n uint32) error {
- c.windowMu.Lock()
- // Since myWindow is managed on our side, and can never exceed
- // the initial window setting, we don't worry about overflow.
- c.myWindow += uint32(n)
- c.windowMu.Unlock()
- return c.sendMessage(windowAdjustMsg{
- AdditionalBytes: uint32(n),
- })
-}
-
-func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) {
- switch extended {
- case 1:
- n, err = c.extPending.Read(data)
- case 0:
- n, err = c.pending.Read(data)
- default:
- return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended)
- }
-
- if n > 0 {
- err = c.adjustWindow(uint32(n))
- // sendWindowAdjust can return io.EOF if the remote
- // peer has closed the connection, however we want to
- // defer forwarding io.EOF to the caller of Read until
- // the buffer has been drained.
- if n > 0 && err == io.EOF {
- err = nil
- }
- }
-
- return n, err
-}
-
-func (c *channel) close() {
- c.pending.eof()
- c.extPending.eof()
- close(c.msg)
- close(c.incomingRequests)
- c.writeMu.Lock()
- // This is not necessary for a normal channel teardown, but if
- // there was another error, it is.
- c.sentClose = true
- c.writeMu.Unlock()
- // Unblock writers.
- c.remoteWin.close()
-}
-
-// responseMessageReceived is called when a success or failure message is
-// received on a channel to check that such a message is reasonable for the
-// given channel.
-func (c *channel) responseMessageReceived() error {
- if c.direction == channelInbound {
- return errors.New("ssh: channel response message received on inbound channel")
- }
- if c.decided {
- return errors.New("ssh: duplicate response received for channel")
- }
- c.decided = true
- return nil
-}
-
-func (c *channel) handlePacket(packet []byte) error {
- switch packet[0] {
- case msgChannelData, msgChannelExtendedData:
- return c.handleData(packet)
- case msgChannelClose:
- c.sendMessage(channelCloseMsg{PeersId: c.remoteId})
- c.mux.chanList.remove(c.localId)
- c.close()
- return nil
- case msgChannelEOF:
- // RFC 4254 is mute on how EOF affects dataExt messages but
- // it is logical to signal EOF at the same time.
- c.extPending.eof()
- c.pending.eof()
- return nil
- }
-
- decoded, err := decode(packet)
- if err != nil {
- return err
- }
-
- switch msg := decoded.(type) {
- case *channelOpenFailureMsg:
- if err := c.responseMessageReceived(); err != nil {
- return err
- }
- c.mux.chanList.remove(msg.PeersId)
- c.msg <- msg
- case *channelOpenConfirmMsg:
- if err := c.responseMessageReceived(); err != nil {
- return err
- }
- if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
- return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize)
- }
- c.remoteId = msg.MyId
- c.maxRemotePayload = msg.MaxPacketSize
- c.remoteWin.add(msg.MyWindow)
- c.msg <- msg
- case *windowAdjustMsg:
- if !c.remoteWin.add(msg.AdditionalBytes) {
- return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes)
- }
- case *channelRequestMsg:
- req := Request{
- Type: msg.Request,
- WantReply: msg.WantReply,
- Payload: msg.RequestSpecificData,
- ch: c,
- }
-
- c.incomingRequests <- &req
- default:
- c.msg <- msg
- }
- return nil
-}
-
-func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel {
- ch := &channel{
- remoteWin: window{Cond: newCond()},
- myWindow: channelWindowSize,
- pending: newBuffer(),
- extPending: newBuffer(),
- direction: direction,
- incomingRequests: make(chan *Request, 16),
- msg: make(chan interface{}, 16),
- chanType: chanType,
- extraData: extraData,
- mux: m,
- packetPool: make(map[uint32][]byte),
- }
- ch.localId = m.chanList.add(ch)
- return ch
-}
-
-var errUndecided = errors.New("ssh: must Accept or Reject channel")
-var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once")
-
-type extChannel struct {
- code uint32
- ch *channel
-}
-
-func (e *extChannel) Write(data []byte) (n int, err error) {
- return e.ch.WriteExtended(data, e.code)
-}
-
-func (e *extChannel) Read(data []byte) (n int, err error) {
- return e.ch.ReadExtended(data, e.code)
-}
-
-func (c *channel) Accept() (Channel, <-chan *Request, error) {
- if c.decided {
- return nil, nil, errDecidedAlready
- }
- c.maxIncomingPayload = channelMaxPacket
- confirm := channelOpenConfirmMsg{
- PeersId: c.remoteId,
- MyId: c.localId,
- MyWindow: c.myWindow,
- MaxPacketSize: c.maxIncomingPayload,
- }
- c.decided = true
- if err := c.sendMessage(confirm); err != nil {
- return nil, nil, err
- }
-
- return c, c.incomingRequests, nil
-}
-
-func (ch *channel) Reject(reason RejectionReason, message string) error {
- if ch.decided {
- return errDecidedAlready
- }
- reject := channelOpenFailureMsg{
- PeersId: ch.remoteId,
- Reason: reason,
- Message: message,
- Language: "en",
- }
- ch.decided = true
- return ch.sendMessage(reject)
-}
-
-func (ch *channel) Read(data []byte) (int, error) {
- if !ch.decided {
- return 0, errUndecided
- }
- return ch.ReadExtended(data, 0)
-}
-
-func (ch *channel) Write(data []byte) (int, error) {
- if !ch.decided {
- return 0, errUndecided
- }
- return ch.WriteExtended(data, 0)
-}
-
-func (ch *channel) CloseWrite() error {
- if !ch.decided {
- return errUndecided
- }
- ch.sentEOF = true
- return ch.sendMessage(channelEOFMsg{
- PeersId: ch.remoteId})
-}
-
-func (ch *channel) Close() error {
- if !ch.decided {
- return errUndecided
- }
-
- return ch.sendMessage(channelCloseMsg{
- PeersId: ch.remoteId})
-}
-
-// Extended returns an io.ReadWriter that sends and receives data on the given,
-// SSH extended stream. Such streams are used, for example, for stderr.
-func (ch *channel) Extended(code uint32) io.ReadWriter {
- if !ch.decided {
- return nil
- }
- return &extChannel{code, ch}
-}
-
-func (ch *channel) Stderr() io.ReadWriter {
- return ch.Extended(1)
-}
-
-func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
- if !ch.decided {
- return false, errUndecided
- }
-
- if wantReply {
- ch.sentRequestMu.Lock()
- defer ch.sentRequestMu.Unlock()
- }
-
- msg := channelRequestMsg{
- PeersId: ch.remoteId,
- Request: name,
- WantReply: wantReply,
- RequestSpecificData: payload,
- }
-
- if err := ch.sendMessage(msg); err != nil {
- return false, err
- }
-
- if wantReply {
- m, ok := (<-ch.msg)
- if !ok {
- return false, io.EOF
- }
- switch m.(type) {
- case *channelRequestFailureMsg:
- return false, nil
- case *channelRequestSuccessMsg:
- return true, nil
- default:
- return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m)
- }
- }
-
- return false, nil
-}
-
-// ackRequest either sends an ack or nack to the channel request.
-func (ch *channel) ackRequest(ok bool) error {
- if !ch.decided {
- return errUndecided
- }
-
- var msg interface{}
- if !ok {
- msg = channelRequestFailureMsg{
- PeersId: ch.remoteId,
- }
- } else {
- msg = channelRequestSuccessMsg{
- PeersId: ch.remoteId,
- }
- }
- return ch.sendMessage(msg)
-}
-
-func (ch *channel) ChannelType() string {
- return ch.chanType
-}
-
-func (ch *channel) ExtraData() []byte {
- return ch.extraData
-}
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
deleted file mode 100644
index 34d3917..0000000
--- a/vendor/golang.org/x/crypto/ssh/cipher.go
+++ /dev/null
@@ -1,579 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "crypto/des"
- "crypto/rc4"
- "crypto/subtle"
- "encoding/binary"
- "errors"
- "fmt"
- "hash"
- "io"
- "io/ioutil"
-)
-
-const (
- packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher.
-
- // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations
- // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC
- // indicates implementations SHOULD be able to handle larger packet sizes, but then
- // waffles on about reasonable limits.
- //
- // OpenSSH caps their maxPacket at 256kB so we choose to do
- // the same. maxPacket is also used to ensure that uint32
- // length fields do not overflow, so it should remain well
- // below 4G.
- maxPacket = 256 * 1024
-)
-
-// noneCipher implements cipher.Stream and provides no encryption. It is used
-// by the transport before the first key-exchange.
-type noneCipher struct{}
-
-func (c noneCipher) XORKeyStream(dst, src []byte) {
- copy(dst, src)
-}
-
-func newAESCTR(key, iv []byte) (cipher.Stream, error) {
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, err
- }
- return cipher.NewCTR(c, iv), nil
-}
-
-func newRC4(key, iv []byte) (cipher.Stream, error) {
- return rc4.NewCipher(key)
-}
-
-type streamCipherMode struct {
- keySize int
- ivSize int
- skip int
- createFunc func(key, iv []byte) (cipher.Stream, error)
-}
-
-func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
- if len(key) < c.keySize {
- panic("ssh: key length too small for cipher")
- }
- if len(iv) < c.ivSize {
- panic("ssh: iv too small for cipher")
- }
-
- stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
- if err != nil {
- return nil, err
- }
-
- var streamDump []byte
- if c.skip > 0 {
- streamDump = make([]byte, 512)
- }
-
- for remainingToDump := c.skip; remainingToDump > 0; {
- dumpThisTime := remainingToDump
- if dumpThisTime > len(streamDump) {
- dumpThisTime = len(streamDump)
- }
- stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
- remainingToDump -= dumpThisTime
- }
-
- return stream, nil
-}
-
-// cipherModes documents properties of supported ciphers. Ciphers not included
-// are not supported and will not be negotiated, even if explicitly requested in
-// ClientConfig.Crypto.Ciphers.
-var cipherModes = map[string]*streamCipherMode{
- // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
- // are defined in the order specified in the RFC.
- "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
- "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
- "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
-
- // Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
- // They are defined in the order specified in the RFC.
- "arcfour128": {16, 0, 1536, newRC4},
- "arcfour256": {32, 0, 1536, newRC4},
-
- // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
- // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
- // RC4) has problems with weak keys, and should be used with caution."
- // RFC4345 introduces improved versions of Arcfour.
- "arcfour": {16, 0, 0, newRC4},
-
- // AES-GCM is not a stream cipher, so it is constructed with a
- // special case. If we add any more non-stream ciphers, we
- // should invest a cleaner way to do this.
- gcmCipherID: {16, 12, 0, nil},
-
- // CBC mode is insecure and so is not included in the default config.
- // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
- // needed, it's possible to specify a custom Config to enable it.
- // You should expect that an active attacker can recover plaintext if
- // you do.
- aes128cbcID: {16, aes.BlockSize, 0, nil},
-
- // 3des-cbc is insecure and is disabled by default.
- tripledescbcID: {24, des.BlockSize, 0, nil},
-}
-
-// prefixLen is the length of the packet prefix that contains the packet length
-// and number of padding bytes.
-const prefixLen = 5
-
-// streamPacketCipher is a packetCipher using a stream cipher.
-type streamPacketCipher struct {
- mac hash.Hash
- cipher cipher.Stream
-
- // The following members are to avoid per-packet allocations.
- prefix [prefixLen]byte
- seqNumBytes [4]byte
- padding [2 * packetSizeMultiple]byte
- packetData []byte
- macResult []byte
-}
-
-// readPacket reads and decrypt a single packet from the reader argument.
-func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
- if _, err := io.ReadFull(r, s.prefix[:]); err != nil {
- return nil, err
- }
-
- s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
- length := binary.BigEndian.Uint32(s.prefix[0:4])
- paddingLength := uint32(s.prefix[4])
-
- var macSize uint32
- if s.mac != nil {
- s.mac.Reset()
- binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
- s.mac.Write(s.seqNumBytes[:])
- s.mac.Write(s.prefix[:])
- macSize = uint32(s.mac.Size())
- }
-
- if length <= paddingLength+1 {
- return nil, errors.New("ssh: invalid packet length, packet too small")
- }
-
- if length > maxPacket {
- return nil, errors.New("ssh: invalid packet length, packet too large")
- }
-
- // the maxPacket check above ensures that length-1+macSize
- // does not overflow.
- if uint32(cap(s.packetData)) < length-1+macSize {
- s.packetData = make([]byte, length-1+macSize)
- } else {
- s.packetData = s.packetData[:length-1+macSize]
- }
-
- if _, err := io.ReadFull(r, s.packetData); err != nil {
- return nil, err
- }
- mac := s.packetData[length-1:]
- data := s.packetData[:length-1]
- s.cipher.XORKeyStream(data, data)
-
- if s.mac != nil {
- s.mac.Write(data)
- s.macResult = s.mac.Sum(s.macResult[:0])
- if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
- return nil, errors.New("ssh: MAC failure")
- }
- }
-
- return s.packetData[:length-paddingLength-1], nil
-}
-
-// writePacket encrypts and sends a packet of data to the writer argument
-func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
- if len(packet) > maxPacket {
- return errors.New("ssh: packet too large")
- }
-
- paddingLength := packetSizeMultiple - (prefixLen+len(packet))%packetSizeMultiple
- if paddingLength < 4 {
- paddingLength += packetSizeMultiple
- }
-
- length := len(packet) + 1 + paddingLength
- binary.BigEndian.PutUint32(s.prefix[:], uint32(length))
- s.prefix[4] = byte(paddingLength)
- padding := s.padding[:paddingLength]
- if _, err := io.ReadFull(rand, padding); err != nil {
- return err
- }
-
- if s.mac != nil {
- s.mac.Reset()
- binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
- s.mac.Write(s.seqNumBytes[:])
- s.mac.Write(s.prefix[:])
- s.mac.Write(packet)
- s.mac.Write(padding)
- }
-
- s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
- s.cipher.XORKeyStream(packet, packet)
- s.cipher.XORKeyStream(padding, padding)
-
- if _, err := w.Write(s.prefix[:]); err != nil {
- return err
- }
- if _, err := w.Write(packet); err != nil {
- return err
- }
- if _, err := w.Write(padding); err != nil {
- return err
- }
-
- if s.mac != nil {
- s.macResult = s.mac.Sum(s.macResult[:0])
- if _, err := w.Write(s.macResult); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type gcmCipher struct {
- aead cipher.AEAD
- prefix [4]byte
- iv []byte
- buf []byte
-}
-
-func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) {
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, err
- }
-
- aead, err := cipher.NewGCM(c)
- if err != nil {
- return nil, err
- }
-
- return &gcmCipher{
- aead: aead,
- iv: iv,
- }, nil
-}
-
-const gcmTagSize = 16
-
-func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
- // Pad out to multiple of 16 bytes. This is different from the
- // stream cipher because that encrypts the length too.
- padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple)
- if padding < 4 {
- padding += packetSizeMultiple
- }
-
- length := uint32(len(packet) + int(padding) + 1)
- binary.BigEndian.PutUint32(c.prefix[:], length)
- if _, err := w.Write(c.prefix[:]); err != nil {
- return err
- }
-
- if cap(c.buf) < int(length) {
- c.buf = make([]byte, length)
- } else {
- c.buf = c.buf[:length]
- }
-
- c.buf[0] = padding
- copy(c.buf[1:], packet)
- if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil {
- return err
- }
- c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:])
- if _, err := w.Write(c.buf); err != nil {
- return err
- }
- c.incIV()
-
- return nil
-}
-
-func (c *gcmCipher) incIV() {
- for i := 4 + 7; i >= 4; i-- {
- c.iv[i]++
- if c.iv[i] != 0 {
- break
- }
- }
-}
-
-func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
- if _, err := io.ReadFull(r, c.prefix[:]); err != nil {
- return nil, err
- }
- length := binary.BigEndian.Uint32(c.prefix[:])
- if length > maxPacket {
- return nil, errors.New("ssh: max packet length exceeded.")
- }
-
- if cap(c.buf) < int(length+gcmTagSize) {
- c.buf = make([]byte, length+gcmTagSize)
- } else {
- c.buf = c.buf[:length+gcmTagSize]
- }
-
- if _, err := io.ReadFull(r, c.buf); err != nil {
- return nil, err
- }
-
- plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:])
- if err != nil {
- return nil, err
- }
- c.incIV()
-
- padding := plain[0]
- if padding < 4 || padding >= 20 {
- return nil, fmt.Errorf("ssh: illegal padding %d", padding)
- }
-
- if int(padding+1) >= len(plain) {
- return nil, fmt.Errorf("ssh: padding %d too large", padding)
- }
- plain = plain[1 : length-uint32(padding)]
- return plain, nil
-}
-
-// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1
-type cbcCipher struct {
- mac hash.Hash
- macSize uint32
- decrypter cipher.BlockMode
- encrypter cipher.BlockMode
-
- // The following members are to avoid per-packet allocations.
- seqNumBytes [4]byte
- packetData []byte
- macResult []byte
-
- // Amount of data we should still read to hide which
- // verification error triggered.
- oracleCamouflage uint32
-}
-
-func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
- cbc := &cbcCipher{
- mac: macModes[algs.MAC].new(macKey),
- decrypter: cipher.NewCBCDecrypter(c, iv),
- encrypter: cipher.NewCBCEncrypter(c, iv),
- packetData: make([]byte, 1024),
- }
- if cbc.mac != nil {
- cbc.macSize = uint32(cbc.mac.Size())
- }
-
- return cbc, nil
-}
-
-func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, err
- }
-
- cbc, err := newCBCCipher(c, iv, key, macKey, algs)
- if err != nil {
- return nil, err
- }
-
- return cbc, nil
-}
-
-func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
- c, err := des.NewTripleDESCipher(key)
- if err != nil {
- return nil, err
- }
-
- cbc, err := newCBCCipher(c, iv, key, macKey, algs)
- if err != nil {
- return nil, err
- }
-
- return cbc, nil
-}
-
-func maxUInt32(a, b int) uint32 {
- if a > b {
- return uint32(a)
- }
- return uint32(b)
-}
-
-const (
- cbcMinPacketSizeMultiple = 8
- cbcMinPacketSize = 16
- cbcMinPaddingSize = 4
-)
-
-// cbcError represents a verification error that may leak information.
-type cbcError string
-
-func (e cbcError) Error() string { return string(e) }
-
-func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
- p, err := c.readPacketLeaky(seqNum, r)
- if err != nil {
- if _, ok := err.(cbcError); ok {
- // Verification error: read a fixed amount of
- // data, to make distinguishing between
- // failing MAC and failing length check more
- // difficult.
- io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage))
- }
- }
- return p, err
-}
-
-func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) {
- blockSize := c.decrypter.BlockSize()
-
- // Read the header, which will include some of the subsequent data in the
- // case of block ciphers - this is copied back to the payload later.
- // How many bytes of payload/padding will be read with this first read.
- firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize)
- firstBlock := c.packetData[:firstBlockLength]
- if _, err := io.ReadFull(r, firstBlock); err != nil {
- return nil, err
- }
-
- c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength
-
- c.decrypter.CryptBlocks(firstBlock, firstBlock)
- length := binary.BigEndian.Uint32(firstBlock[:4])
- if length > maxPacket {
- return nil, cbcError("ssh: packet too large")
- }
- if length+4 < maxUInt32(cbcMinPacketSize, blockSize) {
- // The minimum size of a packet is 16 (or the cipher block size, whichever
- // is larger) bytes.
- return nil, cbcError("ssh: packet too small")
- }
- // The length of the packet (including the length field but not the MAC) must
- // be a multiple of the block size or 8, whichever is larger.
- if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 {
- return nil, cbcError("ssh: invalid packet length multiple")
- }
-
- paddingLength := uint32(firstBlock[4])
- if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 {
- return nil, cbcError("ssh: invalid packet length")
- }
-
- // Positions within the c.packetData buffer:
- macStart := 4 + length
- paddingStart := macStart - paddingLength
-
- // Entire packet size, starting before length, ending at end of mac.
- entirePacketSize := macStart + c.macSize
-
- // Ensure c.packetData is large enough for the entire packet data.
- if uint32(cap(c.packetData)) < entirePacketSize {
- // Still need to upsize and copy, but this should be rare at runtime, only
- // on upsizing the packetData buffer.
- c.packetData = make([]byte, entirePacketSize)
- copy(c.packetData, firstBlock)
- } else {
- c.packetData = c.packetData[:entirePacketSize]
- }
-
- if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil {
- return nil, err
- } else {
- c.oracleCamouflage -= uint32(n)
- }
-
- remainingCrypted := c.packetData[firstBlockLength:macStart]
- c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted)
-
- mac := c.packetData[macStart:]
- if c.mac != nil {
- c.mac.Reset()
- binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
- c.mac.Write(c.seqNumBytes[:])
- c.mac.Write(c.packetData[:macStart])
- c.macResult = c.mac.Sum(c.macResult[:0])
- if subtle.ConstantTimeCompare(c.macResult, mac) != 1 {
- return nil, cbcError("ssh: MAC failure")
- }
- }
-
- return c.packetData[prefixLen:paddingStart], nil
-}
-
-func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
- effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize())
-
- // Length of encrypted portion of the packet (header, payload, padding).
- // Enforce minimum padding and packet size.
- encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize)
- // Enforce block size.
- encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize
-
- length := encLength - 4
- paddingLength := int(length) - (1 + len(packet))
-
- // Overall buffer contains: header, payload, padding, mac.
- // Space for the MAC is reserved in the capacity but not the slice length.
- bufferSize := encLength + c.macSize
- if uint32(cap(c.packetData)) < bufferSize {
- c.packetData = make([]byte, encLength, bufferSize)
- } else {
- c.packetData = c.packetData[:encLength]
- }
-
- p := c.packetData
-
- // Packet header.
- binary.BigEndian.PutUint32(p, length)
- p = p[4:]
- p[0] = byte(paddingLength)
-
- // Payload.
- p = p[1:]
- copy(p, packet)
-
- // Padding.
- p = p[len(packet):]
- if _, err := io.ReadFull(rand, p); err != nil {
- return err
- }
-
- if c.mac != nil {
- c.mac.Reset()
- binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
- c.mac.Write(c.seqNumBytes[:])
- c.mac.Write(c.packetData)
- // The MAC is now appended into the capacity reserved for it earlier.
- c.packetData = c.mac.Sum(c.packetData)
- }
-
- c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength])
-
- if _, err := w.Write(c.packetData); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/cipher_test.go b/vendor/golang.org/x/crypto/ssh/cipher_test.go
deleted file mode 100644
index eced8d8..0000000
--- a/vendor/golang.org/x/crypto/ssh/cipher_test.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "crypto"
- "crypto/aes"
- "crypto/rand"
- "testing"
-)
-
-func TestDefaultCiphersExist(t *testing.T) {
- for _, cipherAlgo := range supportedCiphers {
- if _, ok := cipherModes[cipherAlgo]; !ok {
- t.Errorf("default cipher %q is unknown", cipherAlgo)
- }
- }
-}
-
-func TestPacketCiphers(t *testing.T) {
- // Still test aes128cbc cipher although it's commented out.
- cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil}
- defer delete(cipherModes, aes128cbcID)
-
- for cipher := range cipherModes {
- kr := &kexResult{Hash: crypto.SHA1}
- algs := directionAlgorithms{
- Cipher: cipher,
- MAC: "hmac-sha1",
- Compression: "none",
- }
- client, err := newPacketCipher(clientKeys, algs, kr)
- if err != nil {
- t.Errorf("newPacketCipher(client, %q): %v", cipher, err)
- continue
- }
- server, err := newPacketCipher(clientKeys, algs, kr)
- if err != nil {
- t.Errorf("newPacketCipher(client, %q): %v", cipher, err)
- continue
- }
-
- want := "bla bla"
- input := []byte(want)
- buf := &bytes.Buffer{}
- if err := client.writePacket(0, buf, rand.Reader, input); err != nil {
- t.Errorf("writePacket(%q): %v", cipher, err)
- continue
- }
-
- packet, err := server.readPacket(0, buf)
- if err != nil {
- t.Errorf("readPacket(%q): %v", cipher, err)
- continue
- }
-
- if string(packet) != want {
- t.Errorf("roundtrip(%q): got %q, want %q", cipher, packet, want)
- }
- }
-}
-
-func TestCBCOracleCounterMeasure(t *testing.T) {
- cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil}
- defer delete(cipherModes, aes128cbcID)
-
- kr := &kexResult{Hash: crypto.SHA1}
- algs := directionAlgorithms{
- Cipher: aes128cbcID,
- MAC: "hmac-sha1",
- Compression: "none",
- }
- client, err := newPacketCipher(clientKeys, algs, kr)
- if err != nil {
- t.Fatalf("newPacketCipher(client): %v", err)
- }
-
- want := "bla bla"
- input := []byte(want)
- buf := &bytes.Buffer{}
- if err := client.writePacket(0, buf, rand.Reader, input); err != nil {
- t.Errorf("writePacket: %v", err)
- }
-
- packetSize := buf.Len()
- buf.Write(make([]byte, 2*maxPacket))
-
- // We corrupt each byte, but this usually will only test the
- // 'packet too large' or 'MAC failure' cases.
- lastRead := -1
- for i := 0; i < packetSize; i++ {
- server, err := newPacketCipher(clientKeys, algs, kr)
- if err != nil {
- t.Fatalf("newPacketCipher(client): %v", err)
- }
-
- fresh := &bytes.Buffer{}
- fresh.Write(buf.Bytes())
- fresh.Bytes()[i] ^= 0x01
-
- before := fresh.Len()
- _, err = server.readPacket(0, fresh)
- if err == nil {
- t.Errorf("corrupt byte %d: readPacket succeeded ", i)
- continue
- }
- if _, ok := err.(cbcError); !ok {
- t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err)
- continue
- }
-
- after := fresh.Len()
- bytesRead := before - after
- if bytesRead < maxPacket {
- t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket)
- continue
- }
-
- if i > 0 && bytesRead != lastRead {
- t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead)
- }
- lastRead = bytesRead
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go
deleted file mode 100644
index 0212a20..0000000
--- a/vendor/golang.org/x/crypto/ssh/client.go
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "errors"
- "fmt"
- "net"
- "sync"
- "time"
-)
-
-// Client implements a traditional SSH client that supports shells,
-// subprocesses, port forwarding and tunneled dialing.
-type Client struct {
- Conn
-
- forwards forwardList // forwarded tcpip connections from the remote side
- mu sync.Mutex
- channelHandlers map[string]chan NewChannel
-}
-
-// HandleChannelOpen returns a channel on which NewChannel requests
-// for the given type are sent. If the type already is being handled,
-// nil is returned. The channel is closed when the connection is closed.
-func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.channelHandlers == nil {
- // The SSH channel has been closed.
- c := make(chan NewChannel)
- close(c)
- return c
- }
-
- ch := c.channelHandlers[channelType]
- if ch != nil {
- return nil
- }
-
- ch = make(chan NewChannel, 16)
- c.channelHandlers[channelType] = ch
- return ch
-}
-
-// NewClient creates a Client on top of the given connection.
-func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
- conn := &Client{
- Conn: c,
- channelHandlers: make(map[string]chan NewChannel, 1),
- }
-
- go conn.handleGlobalRequests(reqs)
- go conn.handleChannelOpens(chans)
- go func() {
- conn.Wait()
- conn.forwards.closeAll()
- }()
- go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
- return conn
-}
-
-// NewClientConn establishes an authenticated SSH connection using c
-// as the underlying transport. The Request and NewChannel channels
-// must be serviced or the connection will hang.
-func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) {
- fullConf := *config
- fullConf.SetDefaults()
- conn := &connection{
- sshConn: sshConn{conn: c},
- }
-
- if err := conn.clientHandshake(addr, &fullConf); err != nil {
- c.Close()
- return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err)
- }
- conn.mux = newMux(conn.transport)
- return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil
-}
-
-// clientHandshake performs the client side key exchange. See RFC 4253 Section
-// 7.
-func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error {
- if config.ClientVersion != "" {
- c.clientVersion = []byte(config.ClientVersion)
- } else {
- c.clientVersion = []byte(packageVersion)
- }
- var err error
- c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion)
- if err != nil {
- return err
- }
-
- c.transport = newClientTransport(
- newTransport(c.sshConn.conn, config.Rand, true /* is client */),
- c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
- if err := c.transport.requestInitialKeyChange(); err != nil {
- return err
- }
-
- // We just did the key change, so the session ID is established.
- c.sessionID = c.transport.getSessionID()
-
- return c.clientAuthenticate(config)
-}
-
-// verifyHostKeySignature verifies the host key obtained in the key
-// exchange.
-func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
- sig, rest, ok := parseSignatureBody(result.Signature)
- if len(rest) > 0 || !ok {
- return errors.New("ssh: signature parse error")
- }
-
- return hostKey.Verify(result.H, sig)
-}
-
-// NewSession opens a new Session for this client. (A session is a remote
-// execution of a program.)
-func (c *Client) NewSession() (*Session, error) {
- ch, in, err := c.OpenChannel("session", nil)
- if err != nil {
- return nil, err
- }
- return newSession(ch, in)
-}
-
-func (c *Client) handleGlobalRequests(incoming <-chan *Request) {
- for r := range incoming {
- // This handles keepalive messages and matches
- // the behaviour of OpenSSH.
- r.Reply(false, nil)
- }
-}
-
-// handleChannelOpens channel open messages from the remote side.
-func (c *Client) handleChannelOpens(in <-chan NewChannel) {
- for ch := range in {
- c.mu.Lock()
- handler := c.channelHandlers[ch.ChannelType()]
- c.mu.Unlock()
-
- if handler != nil {
- handler <- ch
- } else {
- ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType()))
- }
- }
-
- c.mu.Lock()
- for _, ch := range c.channelHandlers {
- close(ch)
- }
- c.channelHandlers = nil
- c.mu.Unlock()
-}
-
-// Dial starts a client connection to the given SSH server. It is a
-// convenience function that connects to the given network address,
-// initiates the SSH handshake, and then sets up a Client. For access
-// to incoming channels and requests, use net.Dial with NewClientConn
-// instead.
-func Dial(network, addr string, config *ClientConfig) (*Client, error) {
- conn, err := net.DialTimeout(network, addr, config.Timeout)
- if err != nil {
- return nil, err
- }
- c, chans, reqs, err := NewClientConn(conn, addr, config)
- if err != nil {
- return nil, err
- }
- return NewClient(c, chans, reqs), nil
-}
-
-// A ClientConfig structure is used to configure a Client. It must not be
-// modified after having been passed to an SSH function.
-type ClientConfig struct {
- // Config contains configuration that is shared between clients and
- // servers.
- Config
-
- // User contains the username to authenticate as.
- User string
-
- // Auth contains possible authentication methods to use with the
- // server. Only the first instance of a particular RFC 4252 method will
- // be used during authentication.
- Auth []AuthMethod
-
- // HostKeyCallback, if not nil, is called during the cryptographic
- // handshake to validate the server's host key. A nil HostKeyCallback
- // implies that all host keys are accepted.
- HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
-
- // ClientVersion contains the version identification string that will
- // be used for the connection. If empty, a reasonable default is used.
- ClientVersion string
-
- // HostKeyAlgorithms lists the key types that the client will
- // accept from the server as host key, in order of
- // preference. If empty, a reasonable default is used. Any
- // string returned from PublicKey.Type method may be used, or
- // any of the CertAlgoXxxx and KeyAlgoXxxx constants.
- HostKeyAlgorithms []string
-
- // Timeout is the maximum amount of time for the TCP connection to establish.
- //
- // A Timeout of zero means no timeout.
- Timeout time.Duration
-}
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
deleted file mode 100644
index 294af0d..0000000
--- a/vendor/golang.org/x/crypto/ssh/client_auth.go
+++ /dev/null
@@ -1,473 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-)
-
-// clientAuthenticate authenticates with the remote server. See RFC 4252.
-func (c *connection) clientAuthenticate(config *ClientConfig) error {
- // initiate user auth session
- if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil {
- return err
- }
- packet, err := c.transport.readPacket()
- if err != nil {
- return err
- }
- var serviceAccept serviceAcceptMsg
- if err := Unmarshal(packet, &serviceAccept); err != nil {
- return err
- }
-
- // during the authentication phase the client first attempts the "none" method
- // then any untried methods suggested by the server.
- tried := make(map[string]bool)
- var lastMethods []string
- for auth := AuthMethod(new(noneAuth)); auth != nil; {
- ok, methods, err := auth.auth(c.transport.getSessionID(), config.User, c.transport, config.Rand)
- if err != nil {
- return err
- }
- if ok {
- // success
- return nil
- }
- tried[auth.method()] = true
- if methods == nil {
- methods = lastMethods
- }
- lastMethods = methods
-
- auth = nil
-
- findNext:
- for _, a := range config.Auth {
- candidateMethod := a.method()
- if tried[candidateMethod] {
- continue
- }
- for _, meth := range methods {
- if meth == candidateMethod {
- auth = a
- break findNext
- }
- }
- }
- }
- return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
-}
-
-func keys(m map[string]bool) []string {
- s := make([]string, 0, len(m))
-
- for key := range m {
- s = append(s, key)
- }
- return s
-}
-
-// An AuthMethod represents an instance of an RFC 4252 authentication method.
-type AuthMethod interface {
- // auth authenticates user over transport t.
- // Returns true if authentication is successful.
- // If authentication is not successful, a []string of alternative
- // method names is returned. If the slice is nil, it will be ignored
- // and the previous set of possible methods will be reused.
- auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error)
-
- // method returns the RFC 4252 method name.
- method() string
-}
-
-// "none" authentication, RFC 4252 section 5.2.
-type noneAuth int
-
-func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
- if err := c.writePacket(Marshal(&userAuthRequestMsg{
- User: user,
- Service: serviceSSH,
- Method: "none",
- })); err != nil {
- return false, nil, err
- }
-
- return handleAuthResponse(c)
-}
-
-func (n *noneAuth) method() string {
- return "none"
-}
-
-// passwordCallback is an AuthMethod that fetches the password through
-// a function call, e.g. by prompting the user.
-type passwordCallback func() (password string, err error)
-
-func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
- type passwordAuthMsg struct {
- User string `sshtype:"50"`
- Service string
- Method string
- Reply bool
- Password string
- }
-
- pw, err := cb()
- // REVIEW NOTE: is there a need to support skipping a password attempt?
- // The program may only find out that the user doesn't have a password
- // when prompting.
- if err != nil {
- return false, nil, err
- }
-
- if err := c.writePacket(Marshal(&passwordAuthMsg{
- User: user,
- Service: serviceSSH,
- Method: cb.method(),
- Reply: false,
- Password: pw,
- })); err != nil {
- return false, nil, err
- }
-
- return handleAuthResponse(c)
-}
-
-func (cb passwordCallback) method() string {
- return "password"
-}
-
-// Password returns an AuthMethod using the given password.
-func Password(secret string) AuthMethod {
- return passwordCallback(func() (string, error) { return secret, nil })
-}
-
-// PasswordCallback returns an AuthMethod that uses a callback for
-// fetching a password.
-func PasswordCallback(prompt func() (secret string, err error)) AuthMethod {
- return passwordCallback(prompt)
-}
-
-type publickeyAuthMsg struct {
- User string `sshtype:"50"`
- Service string
- Method string
- // HasSig indicates to the receiver packet that the auth request is signed and
- // should be used for authentication of the request.
- HasSig bool
- Algoname string
- PubKey []byte
- // Sig is tagged with "rest" so Marshal will exclude it during
- // validateKey
- Sig []byte `ssh:"rest"`
-}
-
-// publicKeyCallback is an AuthMethod that uses a set of key
-// pairs for authentication.
-type publicKeyCallback func() ([]Signer, error)
-
-func (cb publicKeyCallback) method() string {
- return "publickey"
-}
-
-func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
- // Authentication is performed in two stages. The first stage sends an
- // enquiry to test if each key is acceptable to the remote. The second
- // stage attempts to authenticate with the valid keys obtained in the
- // first stage.
-
- signers, err := cb()
- if err != nil {
- return false, nil, err
- }
- var validKeys []Signer
- for _, signer := range signers {
- if ok, err := validateKey(signer.PublicKey(), user, c); ok {
- validKeys = append(validKeys, signer)
- } else {
- if err != nil {
- return false, nil, err
- }
- }
- }
-
- // methods that may continue if this auth is not successful.
- var methods []string
- for _, signer := range validKeys {
- pub := signer.PublicKey()
-
- pubKey := pub.Marshal()
- sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
- User: user,
- Service: serviceSSH,
- Method: cb.method(),
- }, []byte(pub.Type()), pubKey))
- if err != nil {
- return false, nil, err
- }
-
- // manually wrap the serialized signature in a string
- s := Marshal(sign)
- sig := make([]byte, stringLength(len(s)))
- marshalString(sig, s)
- msg := publickeyAuthMsg{
- User: user,
- Service: serviceSSH,
- Method: cb.method(),
- HasSig: true,
- Algoname: pub.Type(),
- PubKey: pubKey,
- Sig: sig,
- }
- p := Marshal(&msg)
- if err := c.writePacket(p); err != nil {
- return false, nil, err
- }
- var success bool
- success, methods, err = handleAuthResponse(c)
- if err != nil {
- return false, nil, err
- }
- if success {
- return success, methods, err
- }
- }
- return false, methods, nil
-}
-
-// validateKey validates the key provided is acceptable to the server.
-func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
- pubKey := key.Marshal()
- msg := publickeyAuthMsg{
- User: user,
- Service: serviceSSH,
- Method: "publickey",
- HasSig: false,
- Algoname: key.Type(),
- PubKey: pubKey,
- }
- if err := c.writePacket(Marshal(&msg)); err != nil {
- return false, err
- }
-
- return confirmKeyAck(key, c)
-}
-
-func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
- pubKey := key.Marshal()
- algoname := key.Type()
-
- for {
- packet, err := c.readPacket()
- if err != nil {
- return false, err
- }
- switch packet[0] {
- case msgUserAuthBanner:
- // TODO(gpaul): add callback to present the banner to the user
- case msgUserAuthPubKeyOk:
- var msg userAuthPubKeyOkMsg
- if err := Unmarshal(packet, &msg); err != nil {
- return false, err
- }
- if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
- return false, nil
- }
- return true, nil
- case msgUserAuthFailure:
- return false, nil
- default:
- return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
- }
- }
-}
-
-// PublicKeys returns an AuthMethod that uses the given key
-// pairs.
-func PublicKeys(signers ...Signer) AuthMethod {
- return publicKeyCallback(func() ([]Signer, error) { return signers, nil })
-}
-
-// PublicKeysCallback returns an AuthMethod that runs the given
-// function to obtain a list of key pairs.
-func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod {
- return publicKeyCallback(getSigners)
-}
-
-// handleAuthResponse returns whether the preceding authentication request succeeded
-// along with a list of remaining authentication methods to try next and
-// an error if an unexpected response was received.
-func handleAuthResponse(c packetConn) (bool, []string, error) {
- for {
- packet, err := c.readPacket()
- if err != nil {
- return false, nil, err
- }
-
- switch packet[0] {
- case msgUserAuthBanner:
- // TODO: add callback to present the banner to the user
- case msgUserAuthFailure:
- var msg userAuthFailureMsg
- if err := Unmarshal(packet, &msg); err != nil {
- return false, nil, err
- }
- return false, msg.Methods, nil
- case msgUserAuthSuccess:
- return true, nil, nil
- default:
- return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
- }
- }
-}
-
-// KeyboardInteractiveChallenge should print questions, optionally
-// disabling echoing (e.g. for passwords), and return all the answers.
-// Challenge may be called multiple times in a single session. After
-// successful authentication, the server may send a challenge with no
-// questions, for which the user and instruction messages should be
-// printed. RFC 4256 section 3.3 details how the UI should behave for
-// both CLI and GUI environments.
-type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
-
-// KeyboardInteractive returns a AuthMethod using a prompt/response
-// sequence controlled by the server.
-func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod {
- return challenge
-}
-
-func (cb KeyboardInteractiveChallenge) method() string {
- return "keyboard-interactive"
-}
-
-func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
- type initiateMsg struct {
- User string `sshtype:"50"`
- Service string
- Method string
- Language string
- Submethods string
- }
-
- if err := c.writePacket(Marshal(&initiateMsg{
- User: user,
- Service: serviceSSH,
- Method: "keyboard-interactive",
- })); err != nil {
- return false, nil, err
- }
-
- for {
- packet, err := c.readPacket()
- if err != nil {
- return false, nil, err
- }
-
- // like handleAuthResponse, but with less options.
- switch packet[0] {
- case msgUserAuthBanner:
- // TODO: Print banners during userauth.
- continue
- case msgUserAuthInfoRequest:
- // OK
- case msgUserAuthFailure:
- var msg userAuthFailureMsg
- if err := Unmarshal(packet, &msg); err != nil {
- return false, nil, err
- }
- return false, msg.Methods, nil
- case msgUserAuthSuccess:
- return true, nil, nil
- default:
- return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
- }
-
- var msg userAuthInfoRequestMsg
- if err := Unmarshal(packet, &msg); err != nil {
- return false, nil, err
- }
-
- // Manually unpack the prompt/echo pairs.
- rest := msg.Prompts
- var prompts []string
- var echos []bool
- for i := 0; i < int(msg.NumPrompts); i++ {
- prompt, r, ok := parseString(rest)
- if !ok || len(r) == 0 {
- return false, nil, errors.New("ssh: prompt format error")
- }
- prompts = append(prompts, string(prompt))
- echos = append(echos, r[0] != 0)
- rest = r[1:]
- }
-
- if len(rest) != 0 {
- return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
- }
-
- answers, err := cb(msg.User, msg.Instruction, prompts, echos)
- if err != nil {
- return false, nil, err
- }
-
- if len(answers) != len(prompts) {
- return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
- }
- responseLength := 1 + 4
- for _, a := range answers {
- responseLength += stringLength(len(a))
- }
- serialized := make([]byte, responseLength)
- p := serialized
- p[0] = msgUserAuthInfoResponse
- p = p[1:]
- p = marshalUint32(p, uint32(len(answers)))
- for _, a := range answers {
- p = marshalString(p, []byte(a))
- }
-
- if err := c.writePacket(serialized); err != nil {
- return false, nil, err
- }
- }
-}
-
-type retryableAuthMethod struct {
- authMethod AuthMethod
- maxTries int
-}
-
-func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) {
- for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
- ok, methods, err = r.authMethod.auth(session, user, c, rand)
- if ok || err != nil { // either success or error terminate
- return ok, methods, err
- }
- }
- return ok, methods, err
-}
-
-func (r *retryableAuthMethod) method() string {
- return r.authMethod.method()
-}
-
-// RetryableAuthMethod is a decorator for other auth methods enabling them to
-// be retried up to maxTries before considering that AuthMethod itself failed.
-// If maxTries is <= 0, will retry indefinitely
-//
-// This is useful for interactive clients using challenge/response type
-// authentication (e.g. Keyboard-Interactive, Password, etc) where the user
-// could mistype their response resulting in the server issuing a
-// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4
-// [keyboard-interactive]); Without this decorator, the non-retryable
-// AuthMethod would be removed from future consideration, and never tried again
-// (and so the user would never be able to retry their entry).
-func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod {
- return &retryableAuthMethod{authMethod: auth, maxTries: maxTries}
-}
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth_test.go b/vendor/golang.org/x/crypto/ssh/client_auth_test.go
deleted file mode 100644
index 1409276..0000000
--- a/vendor/golang.org/x/crypto/ssh/client_auth_test.go
+++ /dev/null
@@ -1,472 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "crypto/rand"
- "errors"
- "fmt"
- "os"
- "strings"
- "testing"
-)
-
-type keyboardInteractive map[string]string
-
-func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) {
- var answers []string
- for _, q := range questions {
- answers = append(answers, cr[q])
- }
- return answers, nil
-}
-
-// reused internally by tests
-var clientPassword = "tiger"
-
-// tryAuth runs a handshake with a given config against an SSH server
-// with config serverConfig
-func tryAuth(t *testing.T, config *ClientConfig) error {
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
-
- certChecker := CertChecker{
- IsAuthority: func(k PublicKey) bool {
- return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal())
- },
- UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
- if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) {
- return nil, nil
- }
-
- return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User())
- },
- IsRevoked: func(c *Certificate) bool {
- return c.Serial == 666
- },
- }
-
- serverConfig := &ServerConfig{
- PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) {
- if conn.User() == "testuser" && string(pass) == clientPassword {
- return nil, nil
- }
- return nil, errors.New("password auth failed")
- },
- PublicKeyCallback: certChecker.Authenticate,
- KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) {
- ans, err := challenge("user",
- "instruction",
- []string{"question1", "question2"},
- []bool{true, true})
- if err != nil {
- return nil, err
- }
- ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2"
- if ok {
- challenge("user", "motd", nil, nil)
- return nil, nil
- }
- return nil, errors.New("keyboard-interactive failed")
- },
- AuthLogCallback: func(conn ConnMetadata, method string, err error) {
- t.Logf("user %q, method %q: %v", conn.User(), method, err)
- },
- }
- serverConfig.AddHostKey(testSigners["rsa"])
-
- go newServer(c1, serverConfig)
- _, _, _, err = NewClientConn(c2, "", config)
- return err
-}
-
-func TestClientAuthPublicKey(t *testing.T) {
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- PublicKeys(testSigners["rsa"]),
- },
- }
- if err := tryAuth(t, config); err != nil {
- t.Fatalf("unable to dial remote side: %s", err)
- }
-}
-
-func TestAuthMethodPassword(t *testing.T) {
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- Password(clientPassword),
- },
- }
-
- if err := tryAuth(t, config); err != nil {
- t.Fatalf("unable to dial remote side: %s", err)
- }
-}
-
-func TestAuthMethodFallback(t *testing.T) {
- var passwordCalled bool
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- PublicKeys(testSigners["rsa"]),
- PasswordCallback(
- func() (string, error) {
- passwordCalled = true
- return "WRONG", nil
- }),
- },
- }
-
- if err := tryAuth(t, config); err != nil {
- t.Fatalf("unable to dial remote side: %s", err)
- }
-
- if passwordCalled {
- t.Errorf("password auth tried before public-key auth.")
- }
-}
-
-func TestAuthMethodWrongPassword(t *testing.T) {
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- Password("wrong"),
- PublicKeys(testSigners["rsa"]),
- },
- }
-
- if err := tryAuth(t, config); err != nil {
- t.Fatalf("unable to dial remote side: %s", err)
- }
-}
-
-func TestAuthMethodKeyboardInteractive(t *testing.T) {
- answers := keyboardInteractive(map[string]string{
- "question1": "answer1",
- "question2": "answer2",
- })
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- KeyboardInteractive(answers.Challenge),
- },
- }
-
- if err := tryAuth(t, config); err != nil {
- t.Fatalf("unable to dial remote side: %s", err)
- }
-}
-
-func TestAuthMethodWrongKeyboardInteractive(t *testing.T) {
- answers := keyboardInteractive(map[string]string{
- "question1": "answer1",
- "question2": "WRONG",
- })
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- KeyboardInteractive(answers.Challenge),
- },
- }
-
- if err := tryAuth(t, config); err == nil {
- t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive")
- }
-}
-
-// the mock server will only authenticate ssh-rsa keys
-func TestAuthMethodInvalidPublicKey(t *testing.T) {
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- PublicKeys(testSigners["dsa"]),
- },
- }
-
- if err := tryAuth(t, config); err == nil {
- t.Fatalf("dsa private key should not have authenticated with rsa public key")
- }
-}
-
-// the client should authenticate with the second key
-func TestAuthMethodRSAandDSA(t *testing.T) {
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- PublicKeys(testSigners["dsa"], testSigners["rsa"]),
- },
- }
- if err := tryAuth(t, config); err != nil {
- t.Fatalf("client could not authenticate with rsa key: %v", err)
- }
-}
-
-func TestClientHMAC(t *testing.T) {
- for _, mac := range supportedMACs {
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- PublicKeys(testSigners["rsa"]),
- },
- Config: Config{
- MACs: []string{mac},
- },
- }
- if err := tryAuth(t, config); err != nil {
- t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err)
- }
- }
-}
-
-// issue 4285.
-func TestClientUnsupportedCipher(t *testing.T) {
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- PublicKeys(),
- },
- Config: Config{
- Ciphers: []string{"aes128-cbc"}, // not currently supported
- },
- }
- if err := tryAuth(t, config); err == nil {
- t.Errorf("expected no ciphers in common")
- }
-}
-
-func TestClientUnsupportedKex(t *testing.T) {
- if os.Getenv("GO_BUILDER_NAME") != "" {
- t.Skip("skipping known-flaky test on the Go build dashboard; see golang.org/issue/15198")
- }
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- PublicKeys(),
- },
- Config: Config{
- KeyExchanges: []string{"diffie-hellman-group-exchange-sha256"}, // not currently supported
- },
- }
- if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") {
- t.Errorf("got %v, expected 'common algorithm'", err)
- }
-}
-
-func TestClientLoginCert(t *testing.T) {
- cert := &Certificate{
- Key: testPublicKeys["rsa"],
- ValidBefore: CertTimeInfinity,
- CertType: UserCert,
- }
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
- certSigner, err := NewCertSigner(cert, testSigners["rsa"])
- if err != nil {
- t.Fatalf("NewCertSigner: %v", err)
- }
-
- clientConfig := &ClientConfig{
- User: "user",
- }
- clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner))
-
- t.Log("should succeed")
- if err := tryAuth(t, clientConfig); err != nil {
- t.Errorf("cert login failed: %v", err)
- }
-
- t.Log("corrupted signature")
- cert.Signature.Blob[0]++
- if err := tryAuth(t, clientConfig); err == nil {
- t.Errorf("cert login passed with corrupted sig")
- }
-
- t.Log("revoked")
- cert.Serial = 666
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
- if err := tryAuth(t, clientConfig); err == nil {
- t.Errorf("revoked cert login succeeded")
- }
- cert.Serial = 1
-
- t.Log("sign with wrong key")
- cert.SignCert(rand.Reader, testSigners["dsa"])
- if err := tryAuth(t, clientConfig); err == nil {
- t.Errorf("cert login passed with non-authoritative key")
- }
-
- t.Log("host cert")
- cert.CertType = HostCert
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
- if err := tryAuth(t, clientConfig); err == nil {
- t.Errorf("cert login passed with wrong type")
- }
- cert.CertType = UserCert
-
- t.Log("principal specified")
- cert.ValidPrincipals = []string{"user"}
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
- if err := tryAuth(t, clientConfig); err != nil {
- t.Errorf("cert login failed: %v", err)
- }
-
- t.Log("wrong principal specified")
- cert.ValidPrincipals = []string{"fred"}
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
- if err := tryAuth(t, clientConfig); err == nil {
- t.Errorf("cert login passed with wrong principal")
- }
- cert.ValidPrincipals = nil
-
- t.Log("added critical option")
- cert.CriticalOptions = map[string]string{"root-access": "yes"}
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
- if err := tryAuth(t, clientConfig); err == nil {
- t.Errorf("cert login passed with unrecognized critical option")
- }
-
- t.Log("allowed source address")
- cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24"}
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
- if err := tryAuth(t, clientConfig); err != nil {
- t.Errorf("cert login with source-address failed: %v", err)
- }
-
- t.Log("disallowed source address")
- cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42"}
- cert.SignCert(rand.Reader, testSigners["ecdsa"])
- if err := tryAuth(t, clientConfig); err == nil {
- t.Errorf("cert login with source-address succeeded")
- }
-}
-
-func testPermissionsPassing(withPermissions bool, t *testing.T) {
- serverConfig := &ServerConfig{
- PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
- if conn.User() == "nopermissions" {
- return nil, nil
- } else {
- return &Permissions{}, nil
- }
- },
- }
- serverConfig.AddHostKey(testSigners["rsa"])
-
- clientConfig := &ClientConfig{
- Auth: []AuthMethod{
- PublicKeys(testSigners["rsa"]),
- },
- }
- if withPermissions {
- clientConfig.User = "permissions"
- } else {
- clientConfig.User = "nopermissions"
- }
-
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
-
- go NewClientConn(c2, "", clientConfig)
- serverConn, err := newServer(c1, serverConfig)
- if err != nil {
- t.Fatal(err)
- }
- if p := serverConn.Permissions; (p != nil) != withPermissions {
- t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p)
- }
-}
-
-func TestPermissionsPassing(t *testing.T) {
- testPermissionsPassing(true, t)
-}
-
-func TestNoPermissionsPassing(t *testing.T) {
- testPermissionsPassing(false, t)
-}
-
-func TestRetryableAuth(t *testing.T) {
- n := 0
- passwords := []string{"WRONG1", "WRONG2"}
-
- config := &ClientConfig{
- User: "testuser",
- Auth: []AuthMethod{
- RetryableAuthMethod(PasswordCallback(func() (string, error) {
- p := passwords[n]
- n++
- return p, nil
- }), 2),
- PublicKeys(testSigners["rsa"]),
- },
- }
-
- if err := tryAuth(t, config); err != nil {
- t.Fatalf("unable to dial remote side: %s", err)
- }
- if n != 2 {
- t.Fatalf("Did not try all passwords")
- }
-}
-
-func ExampleRetryableAuthMethod(t *testing.T) {
- user := "testuser"
- NumberOfPrompts := 3
-
- // Normally this would be a callback that prompts the user to answer the
- // provided questions
- Cb := func(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
- return []string{"answer1", "answer2"}, nil
- }
-
- config := &ClientConfig{
- User: user,
- Auth: []AuthMethod{
- RetryableAuthMethod(KeyboardInteractiveChallenge(Cb), NumberOfPrompts),
- },
- }
-
- if err := tryAuth(t, config); err != nil {
- t.Fatalf("unable to dial remote side: %s", err)
- }
-}
-
-// Test if username is received on server side when NoClientAuth is used
-func TestClientAuthNone(t *testing.T) {
- user := "testuser"
- serverConfig := &ServerConfig{
- NoClientAuth: true,
- }
- serverConfig.AddHostKey(testSigners["rsa"])
-
- clientConfig := &ClientConfig{
- User: user,
- }
-
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
-
- go NewClientConn(c2, "", clientConfig)
- serverConn, err := newServer(c1, serverConfig)
- if err != nil {
- t.Fatalf("newServer: %v", err)
- }
- if serverConn.User() != user {
- t.Fatalf("server: got %q, want %q", serverConn.User(), user)
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/client_test.go b/vendor/golang.org/x/crypto/ssh/client_test.go
deleted file mode 100644
index 1fe790c..0000000
--- a/vendor/golang.org/x/crypto/ssh/client_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "net"
- "testing"
-)
-
-func testClientVersion(t *testing.T, config *ClientConfig, expected string) {
- clientConn, serverConn := net.Pipe()
- defer clientConn.Close()
- receivedVersion := make(chan string, 1)
- go func() {
- version, err := readVersion(serverConn)
- if err != nil {
- receivedVersion <- ""
- } else {
- receivedVersion <- string(version)
- }
- serverConn.Close()
- }()
- NewClientConn(clientConn, "", config)
- actual := <-receivedVersion
- if actual != expected {
- t.Fatalf("got %s; want %s", actual, expected)
- }
-}
-
-func TestCustomClientVersion(t *testing.T) {
- version := "Test-Client-Version-0.0"
- testClientVersion(t, &ClientConfig{ClientVersion: version}, version)
-}
-
-func TestDefaultClientVersion(t *testing.T) {
- testClientVersion(t, &ClientConfig{}, packageVersion)
-}
diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
deleted file mode 100644
index 2c72ab5..0000000
--- a/vendor/golang.org/x/crypto/ssh/common.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "crypto"
- "crypto/rand"
- "fmt"
- "io"
- "sync"
-
- _ "crypto/sha1"
- _ "crypto/sha256"
- _ "crypto/sha512"
-)
-
-// These are string constants in the SSH protocol.
-const (
- compressionNone = "none"
- serviceUserAuth = "ssh-userauth"
- serviceSSH = "ssh-connection"
-)
-
-// supportedCiphers specifies the supported ciphers in preference order.
-var supportedCiphers = []string{
- "aes128-ctr", "aes192-ctr", "aes256-ctr",
- "aes128-gcm@openssh.com",
- "arcfour256", "arcfour128",
-}
-
-// supportedKexAlgos specifies the supported key-exchange algorithms in
-// preference order.
-var supportedKexAlgos = []string{
- kexAlgoCurve25519SHA256,
- // P384 and P521 are not constant-time yet, but since we don't
- // reuse ephemeral keys, using them for ECDH should be OK.
- kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
- kexAlgoDH14SHA1, kexAlgoDH1SHA1,
-}
-
-// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods
-// of authenticating servers) in preference order.
-var supportedHostKeyAlgos = []string{
- CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
- CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
-
- KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
- KeyAlgoRSA, KeyAlgoDSA,
-
- KeyAlgoED25519,
-}
-
-// supportedMACs specifies a default set of MAC algorithms in preference order.
-// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
-// because they have reached the end of their useful life.
-var supportedMACs = []string{
- "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
-}
-
-var supportedCompressions = []string{compressionNone}
-
-// hashFuncs keeps the mapping of supported algorithms to their respective
-// hashes needed for signature verification.
-var hashFuncs = map[string]crypto.Hash{
- KeyAlgoRSA: crypto.SHA1,
- KeyAlgoDSA: crypto.SHA1,
- KeyAlgoECDSA256: crypto.SHA256,
- KeyAlgoECDSA384: crypto.SHA384,
- KeyAlgoECDSA521: crypto.SHA512,
- CertAlgoRSAv01: crypto.SHA1,
- CertAlgoDSAv01: crypto.SHA1,
- CertAlgoECDSA256v01: crypto.SHA256,
- CertAlgoECDSA384v01: crypto.SHA384,
- CertAlgoECDSA521v01: crypto.SHA512,
-}
-
-// unexpectedMessageError results when the SSH message that we received didn't
-// match what we wanted.
-func unexpectedMessageError(expected, got uint8) error {
- return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected)
-}
-
-// parseError results from a malformed SSH message.
-func parseError(tag uint8) error {
- return fmt.Errorf("ssh: parse error in message type %d", tag)
-}
-
-func findCommon(what string, client []string, server []string) (common string, err error) {
- for _, c := range client {
- for _, s := range server {
- if c == s {
- return c, nil
- }
- }
- }
- return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server)
-}
-
-type directionAlgorithms struct {
- Cipher string
- MAC string
- Compression string
-}
-
-type algorithms struct {
- kex string
- hostKey string
- w directionAlgorithms
- r directionAlgorithms
-}
-
-func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) {
- result := &algorithms{}
-
- result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
- if err != nil {
- return
- }
-
- result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos)
- if err != nil {
- return
- }
-
- result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer)
- if err != nil {
- return
- }
-
- result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient)
- if err != nil {
- return
- }
-
- result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
- if err != nil {
- return
- }
-
- result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
- if err != nil {
- return
- }
-
- result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
- if err != nil {
- return
- }
-
- result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient)
- if err != nil {
- return
- }
-
- return result, nil
-}
-
-// If rekeythreshold is too small, we can't make any progress sending
-// stuff.
-const minRekeyThreshold uint64 = 256
-
-// Config contains configuration data common to both ServerConfig and
-// ClientConfig.
-type Config struct {
- // Rand provides the source of entropy for cryptographic
- // primitives. If Rand is nil, the cryptographic random reader
- // in package crypto/rand will be used.
- Rand io.Reader
-
- // The maximum number of bytes sent or received after which a
- // new key is negotiated. It must be at least 256. If
- // unspecified, 1 gigabyte is used.
- RekeyThreshold uint64
-
- // The allowed key exchanges algorithms. If unspecified then a
- // default set of algorithms is used.
- KeyExchanges []string
-
- // The allowed cipher algorithms. If unspecified then a sensible
- // default is used.
- Ciphers []string
-
- // The allowed MAC algorithms. If unspecified then a sensible default
- // is used.
- MACs []string
-}
-
-// SetDefaults sets sensible values for unset fields in config. This is
-// exported for testing: Configs passed to SSH functions are copied and have
-// default values set automatically.
-func (c *Config) SetDefaults() {
- if c.Rand == nil {
- c.Rand = rand.Reader
- }
- if c.Ciphers == nil {
- c.Ciphers = supportedCiphers
- }
- var ciphers []string
- for _, c := range c.Ciphers {
- if cipherModes[c] != nil {
- // reject the cipher if we have no cipherModes definition
- ciphers = append(ciphers, c)
- }
- }
- c.Ciphers = ciphers
-
- if c.KeyExchanges == nil {
- c.KeyExchanges = supportedKexAlgos
- }
-
- if c.MACs == nil {
- c.MACs = supportedMACs
- }
-
- if c.RekeyThreshold == 0 {
- // RFC 4253, section 9 suggests rekeying after 1G.
- c.RekeyThreshold = 1 << 30
- }
- if c.RekeyThreshold < minRekeyThreshold {
- c.RekeyThreshold = minRekeyThreshold
- }
-}
-
-// buildDataSignedForAuth returns the data that is signed in order to prove
-// possession of a private key. See RFC 4252, section 7.
-func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
- data := struct {
- Session []byte
- Type byte
- User string
- Service string
- Method string
- Sign bool
- Algo []byte
- PubKey []byte
- }{
- sessionId,
- msgUserAuthRequest,
- req.User,
- req.Service,
- req.Method,
- true,
- algo,
- pubKey,
- }
- return Marshal(data)
-}
-
-func appendU16(buf []byte, n uint16) []byte {
- return append(buf, byte(n>>8), byte(n))
-}
-
-func appendU32(buf []byte, n uint32) []byte {
- return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
-}
-
-func appendU64(buf []byte, n uint64) []byte {
- return append(buf,
- byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32),
- byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
-}
-
-func appendInt(buf []byte, n int) []byte {
- return appendU32(buf, uint32(n))
-}
-
-func appendString(buf []byte, s string) []byte {
- buf = appendU32(buf, uint32(len(s)))
- buf = append(buf, s...)
- return buf
-}
-
-func appendBool(buf []byte, b bool) []byte {
- if b {
- return append(buf, 1)
- }
- return append(buf, 0)
-}
-
-// newCond is a helper to hide the fact that there is no usable zero
-// value for sync.Cond.
-func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) }
-
-// window represents the buffer available to clients
-// wishing to write to a channel.
-type window struct {
- *sync.Cond
- win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1
- writeWaiters int
- closed bool
-}
-
-// add adds win to the amount of window available
-// for consumers.
-func (w *window) add(win uint32) bool {
- // a zero sized window adjust is a noop.
- if win == 0 {
- return true
- }
- w.L.Lock()
- if w.win+win < win {
- w.L.Unlock()
- return false
- }
- w.win += win
- // It is unusual that multiple goroutines would be attempting to reserve
- // window space, but not guaranteed. Use broadcast to notify all waiters
- // that additional window is available.
- w.Broadcast()
- w.L.Unlock()
- return true
-}
-
-// close sets the window to closed, so all reservations fail
-// immediately.
-func (w *window) close() {
- w.L.Lock()
- w.closed = true
- w.Broadcast()
- w.L.Unlock()
-}
-
-// reserve reserves win from the available window capacity.
-// If no capacity remains, reserve will block. reserve may
-// return less than requested.
-func (w *window) reserve(win uint32) (uint32, error) {
- var err error
- w.L.Lock()
- w.writeWaiters++
- w.Broadcast()
- for w.win == 0 && !w.closed {
- w.Wait()
- }
- w.writeWaiters--
- if w.win < win {
- win = w.win
- }
- w.win -= win
- if w.closed {
- err = io.EOF
- }
- w.L.Unlock()
- return win, err
-}
-
-// waitWriterBlocked waits until some goroutine is blocked for further
-// writes. It is used in tests only.
-func (w *window) waitWriterBlocked() {
- w.Cond.L.Lock()
- for w.writeWaiters == 0 {
- w.Cond.Wait()
- }
- w.Cond.L.Unlock()
-}
diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go
deleted file mode 100644
index e786f2f..0000000
--- a/vendor/golang.org/x/crypto/ssh/connection.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "fmt"
- "net"
-)
-
-// OpenChannelError is returned if the other side rejects an
-// OpenChannel request.
-type OpenChannelError struct {
- Reason RejectionReason
- Message string
-}
-
-func (e *OpenChannelError) Error() string {
- return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message)
-}
-
-// ConnMetadata holds metadata for the connection.
-type ConnMetadata interface {
- // User returns the user ID for this connection.
- User() string
-
- // SessionID returns the sesson hash, also denoted by H.
- SessionID() []byte
-
- // ClientVersion returns the client's version string as hashed
- // into the session ID.
- ClientVersion() []byte
-
- // ServerVersion returns the server's version string as hashed
- // into the session ID.
- ServerVersion() []byte
-
- // RemoteAddr returns the remote address for this connection.
- RemoteAddr() net.Addr
-
- // LocalAddr returns the local address for this connection.
- LocalAddr() net.Addr
-}
-
-// Conn represents an SSH connection for both server and client roles.
-// Conn is the basis for implementing an application layer, such
-// as ClientConn, which implements the traditional shell access for
-// clients.
-type Conn interface {
- ConnMetadata
-
- // SendRequest sends a global request, and returns the
- // reply. If wantReply is true, it returns the response status
- // and payload. See also RFC4254, section 4.
- SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error)
-
- // OpenChannel tries to open an channel. If the request is
- // rejected, it returns *OpenChannelError. On success it returns
- // the SSH Channel and a Go channel for incoming, out-of-band
- // requests. The Go channel must be serviced, or the
- // connection will hang.
- OpenChannel(name string, data []byte) (Channel, <-chan *Request, error)
-
- // Close closes the underlying network connection
- Close() error
-
- // Wait blocks until the connection has shut down, and returns the
- // error causing the shutdown.
- Wait() error
-
- // TODO(hanwen): consider exposing:
- // RequestKeyChange
- // Disconnect
-}
-
-// DiscardRequests consumes and rejects all requests from the
-// passed-in channel.
-func DiscardRequests(in <-chan *Request) {
- for req := range in {
- if req.WantReply {
- req.Reply(false, nil)
- }
- }
-}
-
-// A connection represents an incoming connection.
-type connection struct {
- transport *handshakeTransport
- sshConn
-
- // The connection protocol.
- *mux
-}
-
-func (c *connection) Close() error {
- return c.sshConn.conn.Close()
-}
-
-// sshconn provides net.Conn metadata, but disallows direct reads and
-// writes.
-type sshConn struct {
- conn net.Conn
-
- user string
- sessionID []byte
- clientVersion []byte
- serverVersion []byte
-}
-
-func dup(src []byte) []byte {
- dst := make([]byte, len(src))
- copy(dst, src)
- return dst
-}
-
-func (c *sshConn) User() string {
- return c.user
-}
-
-func (c *sshConn) RemoteAddr() net.Addr {
- return c.conn.RemoteAddr()
-}
-
-func (c *sshConn) Close() error {
- return c.conn.Close()
-}
-
-func (c *sshConn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
-}
-
-func (c *sshConn) SessionID() []byte {
- return dup(c.sessionID)
-}
-
-func (c *sshConn) ClientVersion() []byte {
- return dup(c.clientVersion)
-}
-
-func (c *sshConn) ServerVersion() []byte {
- return dup(c.serverVersion)
-}
diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go
deleted file mode 100644
index d6be894..0000000
--- a/vendor/golang.org/x/crypto/ssh/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package ssh implements an SSH client and server.
-
-SSH is a transport security protocol, an authentication protocol and a
-family of application protocols. The most typical application level
-protocol is a remote shell and this is specifically implemented. However,
-the multiplexed nature of SSH is exposed to users that wish to support
-others.
-
-References:
- [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
- [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
-*/
-package ssh // import "golang.org/x/crypto/ssh"
diff --git a/vendor/golang.org/x/crypto/ssh/example_test.go b/vendor/golang.org/x/crypto/ssh/example_test.go
deleted file mode 100644
index 4d2eabd..0000000
--- a/vendor/golang.org/x/crypto/ssh/example_test.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh_test
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "log"
- "net"
- "net/http"
-
- "golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/terminal"
-)
-
-func ExampleNewServerConn() {
- // Public key authentication is done by comparing
- // the public key of a received connection
- // with the entries in the authorized_keys file.
- authorizedKeysBytes, err := ioutil.ReadFile("authorized_keys")
- if err != nil {
- log.Fatalf("Failed to load authorized_keys, err: %v", err)
- }
-
- authorizedKeysMap := map[string]bool{}
- for len(authorizedKeysBytes) > 0 {
- pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
- if err != nil {
- log.Fatal(err)
- }
-
- authorizedKeysMap[string(pubKey.Marshal())] = true
- authorizedKeysBytes = rest
- }
-
- // An SSH server is represented by a ServerConfig, which holds
- // certificate details and handles authentication of ServerConns.
- config := &ssh.ServerConfig{
- // Remove to disable password auth.
- PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
- // Should use constant-time compare (or better, salt+hash) in
- // a production setting.
- if c.User() == "testuser" && string(pass) == "tiger" {
- return nil, nil
- }
- return nil, fmt.Errorf("password rejected for %q", c.User())
- },
-
- // Remove to disable public key auth.
- PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {
- if authorizedKeysMap[string(pubKey.Marshal())] {
- return nil, nil
- }
- return nil, fmt.Errorf("unknown public key for %q", c.User())
- },
- }
-
- privateBytes, err := ioutil.ReadFile("id_rsa")
- if err != nil {
- log.Fatal("Failed to load private key: ", err)
- }
-
- private, err := ssh.ParsePrivateKey(privateBytes)
- if err != nil {
- log.Fatal("Failed to parse private key: ", err)
- }
-
- config.AddHostKey(private)
-
- // Once a ServerConfig has been configured, connections can be
- // accepted.
- listener, err := net.Listen("tcp", "0.0.0.0:2022")
- if err != nil {
- log.Fatal("failed to listen for connection: ", err)
- }
- nConn, err := listener.Accept()
- if err != nil {
- log.Fatal("failed to accept incoming connection: ", err)
- }
-
- // Before use, a handshake must be performed on the incoming
- // net.Conn.
- _, chans, reqs, err := ssh.NewServerConn(nConn, config)
- if err != nil {
- log.Fatal("failed to handshake: ", err)
- }
- // The incoming Request channel must be serviced.
- go ssh.DiscardRequests(reqs)
-
- // Service the incoming Channel channel.
-
- // Service the incoming Channel channel.
- for newChannel := range chans {
- // Channels have a type, depending on the application level
- // protocol intended. In the case of a shell, the type is
- // "session" and ServerShell may be used to present a simple
- // terminal interface.
- if newChannel.ChannelType() != "session" {
- newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
- continue
- }
- channel, requests, err := newChannel.Accept()
- if err != nil {
- log.Fatalf("Could not accept channel: %v", err)
- }
-
- // Sessions have out-of-band requests such as "shell",
- // "pty-req" and "env". Here we handle only the
- // "shell" request.
- go func(in <-chan *ssh.Request) {
- for req := range in {
- req.Reply(req.Type == "shell", nil)
- }
- }(requests)
-
- term := terminal.NewTerminal(channel, "> ")
-
- go func() {
- defer channel.Close()
- for {
- line, err := term.ReadLine()
- if err != nil {
- break
- }
- fmt.Println(line)
- }
- }()
- }
-}
-
-func ExampleDial() {
- // An SSH client is represented with a ClientConn.
- //
- // To authenticate with the remote server you must pass at least one
- // implementation of AuthMethod via the Auth field in ClientConfig.
- config := &ssh.ClientConfig{
- User: "username",
- Auth: []ssh.AuthMethod{
- ssh.Password("yourpassword"),
- },
- }
- client, err := ssh.Dial("tcp", "yourserver.com:22", config)
- if err != nil {
- log.Fatal("Failed to dial: ", err)
- }
-
- // Each ClientConn can support multiple interactive sessions,
- // represented by a Session.
- session, err := client.NewSession()
- if err != nil {
- log.Fatal("Failed to create session: ", err)
- }
- defer session.Close()
-
- // Once a Session is created, you can execute a single command on
- // the remote side using the Run method.
- var b bytes.Buffer
- session.Stdout = &b
- if err := session.Run("/usr/bin/whoami"); err != nil {
- log.Fatal("Failed to run: " + err.Error())
- }
- fmt.Println(b.String())
-}
-
-func ExamplePublicKeys() {
- // A public key may be used to authenticate against the remote
- // server by using an unencrypted PEM-encoded private key file.
- //
- // If you have an encrypted private key, the crypto/x509 package
- // can be used to decrypt it.
- key, err := ioutil.ReadFile("/home/user/.ssh/id_rsa")
- if err != nil {
- log.Fatalf("unable to read private key: %v", err)
- }
-
- // Create the Signer for this private key.
- signer, err := ssh.ParsePrivateKey(key)
- if err != nil {
- log.Fatalf("unable to parse private key: %v", err)
- }
-
- config := &ssh.ClientConfig{
- User: "user",
- Auth: []ssh.AuthMethod{
- // Use the PublicKeys method for remote authentication.
- ssh.PublicKeys(signer),
- },
- }
-
- // Connect to the remote server and perform the SSH handshake.
- client, err := ssh.Dial("tcp", "host.com:22", config)
- if err != nil {
- log.Fatalf("unable to connect: %v", err)
- }
- defer client.Close()
-}
-
-func ExampleClient_Listen() {
- config := &ssh.ClientConfig{
- User: "username",
- Auth: []ssh.AuthMethod{
- ssh.Password("password"),
- },
- }
- // Dial your ssh server.
- conn, err := ssh.Dial("tcp", "localhost:22", config)
- if err != nil {
- log.Fatal("unable to connect: ", err)
- }
- defer conn.Close()
-
- // Request the remote side to open port 8080 on all interfaces.
- l, err := conn.Listen("tcp", "0.0.0.0:8080")
- if err != nil {
- log.Fatal("unable to register tcp forward: ", err)
- }
- defer l.Close()
-
- // Serve HTTP with your SSH server acting as a reverse proxy.
- http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
- fmt.Fprintf(resp, "Hello world!\n")
- }))
-}
-
-func ExampleSession_RequestPty() {
- // Create client config
- config := &ssh.ClientConfig{
- User: "username",
- Auth: []ssh.AuthMethod{
- ssh.Password("password"),
- },
- }
- // Connect to ssh server
- conn, err := ssh.Dial("tcp", "localhost:22", config)
- if err != nil {
- log.Fatal("unable to connect: ", err)
- }
- defer conn.Close()
- // Create a session
- session, err := conn.NewSession()
- if err != nil {
- log.Fatal("unable to create session: ", err)
- }
- defer session.Close()
- // Set up terminal modes
- modes := ssh.TerminalModes{
- ssh.ECHO: 0, // disable echoing
- ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
- ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
- }
- // Request pseudo terminal
- if err := session.RequestPty("xterm", 40, 80, modes); err != nil {
- log.Fatal("request for pseudo terminal failed: ", err)
- }
- // Start remote shell
- if err := session.Shell(); err != nil {
- log.Fatal("failed to start shell: ", err)
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
deleted file mode 100644
index 37d42e4..0000000
--- a/vendor/golang.org/x/crypto/ssh/handshake.go
+++ /dev/null
@@ -1,460 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "crypto/rand"
- "errors"
- "fmt"
- "io"
- "log"
- "net"
- "sync"
-)
-
-// debugHandshake, if set, prints messages sent and received. Key
-// exchange messages are printed as if DH were used, so the debug
-// messages are wrong when using ECDH.
-const debugHandshake = false
-
-// keyingTransport is a packet based transport that supports key
-// changes. It need not be thread-safe. It should pass through
-// msgNewKeys in both directions.
-type keyingTransport interface {
- packetConn
-
- // prepareKeyChange sets up a key change. The key change for a
- // direction will be effected if a msgNewKeys message is sent
- // or received.
- prepareKeyChange(*algorithms, *kexResult) error
-}
-
-// handshakeTransport implements rekeying on top of a keyingTransport
-// and offers a thread-safe writePacket() interface.
-type handshakeTransport struct {
- conn keyingTransport
- config *Config
-
- serverVersion []byte
- clientVersion []byte
-
- // hostKeys is non-empty if we are the server. In that case,
- // it contains all host keys that can be used to sign the
- // connection.
- hostKeys []Signer
-
- // hostKeyAlgorithms is non-empty if we are the client. In that case,
- // we accept these key types from the server as host key.
- hostKeyAlgorithms []string
-
- // On read error, incoming is closed, and readError is set.
- incoming chan []byte
- readError error
-
- // data for host key checking
- hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
- dialAddress string
- remoteAddr net.Addr
-
- readSinceKex uint64
-
- // Protects the writing side of the connection
- mu sync.Mutex
- cond *sync.Cond
- sentInitPacket []byte
- sentInitMsg *kexInitMsg
- writtenSinceKex uint64
- writeError error
-
- // The session ID or nil if first kex did not complete yet.
- sessionID []byte
-}
-
-func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
- t := &handshakeTransport{
- conn: conn,
- serverVersion: serverVersion,
- clientVersion: clientVersion,
- incoming: make(chan []byte, 16),
- config: config,
- }
- t.cond = sync.NewCond(&t.mu)
- return t
-}
-
-func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport {
- t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
- t.dialAddress = dialAddr
- t.remoteAddr = addr
- t.hostKeyCallback = config.HostKeyCallback
- if config.HostKeyAlgorithms != nil {
- t.hostKeyAlgorithms = config.HostKeyAlgorithms
- } else {
- t.hostKeyAlgorithms = supportedHostKeyAlgos
- }
- go t.readLoop()
- return t
-}
-
-func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport {
- t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
- t.hostKeys = config.hostKeys
- go t.readLoop()
- return t
-}
-
-func (t *handshakeTransport) getSessionID() []byte {
- return t.sessionID
-}
-
-func (t *handshakeTransport) id() string {
- if len(t.hostKeys) > 0 {
- return "server"
- }
- return "client"
-}
-
-func (t *handshakeTransport) readPacket() ([]byte, error) {
- p, ok := <-t.incoming
- if !ok {
- return nil, t.readError
- }
- return p, nil
-}
-
-func (t *handshakeTransport) readLoop() {
- for {
- p, err := t.readOnePacket()
- if err != nil {
- t.readError = err
- close(t.incoming)
- break
- }
- if p[0] == msgIgnore || p[0] == msgDebug {
- continue
- }
- t.incoming <- p
- }
-
- // If we can't read, declare the writing part dead too.
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.writeError == nil {
- t.writeError = t.readError
- }
- t.cond.Broadcast()
-}
-
-func (t *handshakeTransport) readOnePacket() ([]byte, error) {
- if t.readSinceKex > t.config.RekeyThreshold {
- if err := t.requestKeyChange(); err != nil {
- return nil, err
- }
- }
-
- p, err := t.conn.readPacket()
- if err != nil {
- return nil, err
- }
-
- t.readSinceKex += uint64(len(p))
- if debugHandshake {
- if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
- log.Printf("%s got data (packet %d bytes)", t.id(), len(p))
- } else {
- msg, err := decode(p)
- log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err)
- }
- }
- if p[0] != msgKexInit {
- return p, nil
- }
-
- t.mu.Lock()
-
- firstKex := t.sessionID == nil
-
- err = t.enterKeyExchangeLocked(p)
- if err != nil {
- // drop connection
- t.conn.Close()
- t.writeError = err
- }
-
- if debugHandshake {
- log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err)
- }
-
- // Unblock writers.
- t.sentInitMsg = nil
- t.sentInitPacket = nil
- t.cond.Broadcast()
- t.writtenSinceKex = 0
- t.mu.Unlock()
-
- if err != nil {
- return nil, err
- }
-
- t.readSinceKex = 0
-
- // By default, a key exchange is hidden from higher layers by
- // translating it into msgIgnore.
- successPacket := []byte{msgIgnore}
- if firstKex {
- // sendKexInit() for the first kex waits for
- // msgNewKeys so the authentication process is
- // guaranteed to happen over an encrypted transport.
- successPacket = []byte{msgNewKeys}
- }
-
- return successPacket, nil
-}
-
-// keyChangeCategory describes whether a key exchange is the first on a
-// connection, or a subsequent one.
-type keyChangeCategory bool
-
-const (
- firstKeyExchange keyChangeCategory = true
- subsequentKeyExchange keyChangeCategory = false
-)
-
-// sendKexInit sends a key change message, and returns the message
-// that was sent. After initiating the key change, all writes will be
-// blocked until the change is done, and a failed key change will
-// close the underlying transport. This function is safe for
-// concurrent use by multiple goroutines.
-func (t *handshakeTransport) sendKexInit(isFirst keyChangeCategory) error {
- var err error
-
- t.mu.Lock()
- // If this is the initial key change, but we already have a sessionID,
- // then do nothing because the key exchange has already completed
- // asynchronously.
- if !isFirst || t.sessionID == nil {
- _, _, err = t.sendKexInitLocked(isFirst)
- }
- t.mu.Unlock()
- if err != nil {
- return err
- }
- if isFirst {
- if packet, err := t.readPacket(); err != nil {
- return err
- } else if packet[0] != msgNewKeys {
- return unexpectedMessageError(msgNewKeys, packet[0])
- }
- }
- return nil
-}
-
-func (t *handshakeTransport) requestInitialKeyChange() error {
- return t.sendKexInit(firstKeyExchange)
-}
-
-func (t *handshakeTransport) requestKeyChange() error {
- return t.sendKexInit(subsequentKeyExchange)
-}
-
-// sendKexInitLocked sends a key change message. t.mu must be locked
-// while this happens.
-func (t *handshakeTransport) sendKexInitLocked(isFirst keyChangeCategory) (*kexInitMsg, []byte, error) {
- // kexInits may be sent either in response to the other side,
- // or because our side wants to initiate a key change, so we
- // may have already sent a kexInit. In that case, don't send a
- // second kexInit.
- if t.sentInitMsg != nil {
- return t.sentInitMsg, t.sentInitPacket, nil
- }
-
- msg := &kexInitMsg{
- KexAlgos: t.config.KeyExchanges,
- CiphersClientServer: t.config.Ciphers,
- CiphersServerClient: t.config.Ciphers,
- MACsClientServer: t.config.MACs,
- MACsServerClient: t.config.MACs,
- CompressionClientServer: supportedCompressions,
- CompressionServerClient: supportedCompressions,
- }
- io.ReadFull(rand.Reader, msg.Cookie[:])
-
- if len(t.hostKeys) > 0 {
- for _, k := range t.hostKeys {
- msg.ServerHostKeyAlgos = append(
- msg.ServerHostKeyAlgos, k.PublicKey().Type())
- }
- } else {
- msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
- }
- packet := Marshal(msg)
-
- // writePacket destroys the contents, so save a copy.
- packetCopy := make([]byte, len(packet))
- copy(packetCopy, packet)
-
- if err := t.conn.writePacket(packetCopy); err != nil {
- return nil, nil, err
- }
-
- t.sentInitMsg = msg
- t.sentInitPacket = packet
- return msg, packet, nil
-}
-
-func (t *handshakeTransport) writePacket(p []byte) error {
- t.mu.Lock()
- defer t.mu.Unlock()
-
- if t.writtenSinceKex > t.config.RekeyThreshold {
- t.sendKexInitLocked(subsequentKeyExchange)
- }
- for t.sentInitMsg != nil && t.writeError == nil {
- t.cond.Wait()
- }
- if t.writeError != nil {
- return t.writeError
- }
- t.writtenSinceKex += uint64(len(p))
-
- switch p[0] {
- case msgKexInit:
- return errors.New("ssh: only handshakeTransport can send kexInit")
- case msgNewKeys:
- return errors.New("ssh: only handshakeTransport can send newKeys")
- default:
- return t.conn.writePacket(p)
- }
-}
-
-func (t *handshakeTransport) Close() error {
- return t.conn.Close()
-}
-
-// enterKeyExchange runs the key exchange. t.mu must be held while running this.
-func (t *handshakeTransport) enterKeyExchangeLocked(otherInitPacket []byte) error {
- if debugHandshake {
- log.Printf("%s entered key exchange", t.id())
- }
- myInit, myInitPacket, err := t.sendKexInitLocked(subsequentKeyExchange)
- if err != nil {
- return err
- }
-
- otherInit := &kexInitMsg{}
- if err := Unmarshal(otherInitPacket, otherInit); err != nil {
- return err
- }
-
- magics := handshakeMagics{
- clientVersion: t.clientVersion,
- serverVersion: t.serverVersion,
- clientKexInit: otherInitPacket,
- serverKexInit: myInitPacket,
- }
-
- clientInit := otherInit
- serverInit := myInit
- if len(t.hostKeys) == 0 {
- clientInit = myInit
- serverInit = otherInit
-
- magics.clientKexInit = myInitPacket
- magics.serverKexInit = otherInitPacket
- }
-
- algs, err := findAgreedAlgorithms(clientInit, serverInit)
- if err != nil {
- return err
- }
-
- // We don't send FirstKexFollows, but we handle receiving it.
- //
- // RFC 4253 section 7 defines the kex and the agreement method for
- // first_kex_packet_follows. It states that the guessed packet
- // should be ignored if the "kex algorithm and/or the host
- // key algorithm is guessed wrong (server and client have
- // different preferred algorithm), or if any of the other
- // algorithms cannot be agreed upon". The other algorithms have
- // already been checked above so the kex algorithm and host key
- // algorithm are checked here.
- if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) {
- // other side sent a kex message for the wrong algorithm,
- // which we have to ignore.
- if _, err := t.conn.readPacket(); err != nil {
- return err
- }
- }
-
- kex, ok := kexAlgoMap[algs.kex]
- if !ok {
- return fmt.Errorf("ssh: unexpected key exchange algorithm %v", algs.kex)
- }
-
- var result *kexResult
- if len(t.hostKeys) > 0 {
- result, err = t.server(kex, algs, &magics)
- } else {
- result, err = t.client(kex, algs, &magics)
- }
-
- if err != nil {
- return err
- }
-
- if t.sessionID == nil {
- t.sessionID = result.H
- }
- result.SessionID = t.sessionID
-
- t.conn.prepareKeyChange(algs, result)
- if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
- return err
- }
- if packet, err := t.conn.readPacket(); err != nil {
- return err
- } else if packet[0] != msgNewKeys {
- return unexpectedMessageError(msgNewKeys, packet[0])
- }
-
- return nil
-}
-
-func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
- var hostKey Signer
- for _, k := range t.hostKeys {
- if algs.hostKey == k.PublicKey().Type() {
- hostKey = k
- }
- }
-
- r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
- return r, err
-}
-
-func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
- result, err := kex.Client(t.conn, t.config.Rand, magics)
- if err != nil {
- return nil, err
- }
-
- hostKey, err := ParsePublicKey(result.HostKey)
- if err != nil {
- return nil, err
- }
-
- if err := verifyHostKeySignature(hostKey, result); err != nil {
- return nil, err
- }
-
- if t.hostKeyCallback != nil {
- err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey)
- if err != nil {
- return nil, err
- }
- }
-
- return result, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/handshake_test.go b/vendor/golang.org/x/crypto/ssh/handshake_test.go
deleted file mode 100644
index da53d3a..0000000
--- a/vendor/golang.org/x/crypto/ssh/handshake_test.go
+++ /dev/null
@@ -1,486 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "crypto/rand"
- "errors"
- "fmt"
- "net"
- "reflect"
- "runtime"
- "strings"
- "sync"
- "testing"
-)
-
-type testChecker struct {
- calls []string
-}
-
-func (t *testChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error {
- if dialAddr == "bad" {
- return fmt.Errorf("dialAddr is bad")
- }
-
- if tcpAddr, ok := addr.(*net.TCPAddr); !ok || tcpAddr == nil {
- return fmt.Errorf("testChecker: got %T want *net.TCPAddr", addr)
- }
-
- t.calls = append(t.calls, fmt.Sprintf("%s %v %s %x", dialAddr, addr, key.Type(), key.Marshal()))
-
- return nil
-}
-
-// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and
-// therefore is buffered (net.Pipe deadlocks if both sides start with
-// a write.)
-func netPipe() (net.Conn, net.Conn, error) {
- listener, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- return nil, nil, err
- }
- defer listener.Close()
- c1, err := net.Dial("tcp", listener.Addr().String())
- if err != nil {
- return nil, nil, err
- }
-
- c2, err := listener.Accept()
- if err != nil {
- c1.Close()
- return nil, nil, err
- }
-
- return c1, c2, nil
-}
-
-func handshakePair(clientConf *ClientConfig, addr string) (client *handshakeTransport, server *handshakeTransport, err error) {
- a, b, err := netPipe()
- if err != nil {
- return nil, nil, err
- }
-
- trC := newTransport(a, rand.Reader, true)
- trS := newTransport(b, rand.Reader, false)
- clientConf.SetDefaults()
-
- v := []byte("version")
- client = newClientTransport(trC, v, v, clientConf, addr, a.RemoteAddr())
-
- serverConf := &ServerConfig{}
- serverConf.AddHostKey(testSigners["ecdsa"])
- serverConf.AddHostKey(testSigners["rsa"])
- serverConf.SetDefaults()
- server = newServerTransport(trS, v, v, serverConf)
-
- return client, server, nil
-}
-
-func TestHandshakeBasic(t *testing.T) {
- if runtime.GOOS == "plan9" {
- t.Skip("see golang.org/issue/7237")
- }
- checker := &testChecker{}
- trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
- if err != nil {
- t.Fatalf("handshakePair: %v", err)
- }
-
- defer trC.Close()
- defer trS.Close()
-
- go func() {
- // Client writes a bunch of stuff, and does a key
- // change in the middle. This should not confuse the
- // handshake in progress
- for i := 0; i < 10; i++ {
- p := []byte{msgRequestSuccess, byte(i)}
- if err := trC.writePacket(p); err != nil {
- t.Fatalf("sendPacket: %v", err)
- }
- if i == 5 {
- // halfway through, we request a key change.
- err := trC.sendKexInit(subsequentKeyExchange)
- if err != nil {
- t.Fatalf("sendKexInit: %v", err)
- }
- }
- }
- trC.Close()
- }()
-
- // Server checks that client messages come in cleanly
- i := 0
- for {
- p, err := trS.readPacket()
- if err != nil {
- break
- }
- if p[0] == msgNewKeys {
- continue
- }
- want := []byte{msgRequestSuccess, byte(i)}
- if bytes.Compare(p, want) != 0 {
- t.Errorf("message %d: got %q, want %q", i, p, want)
- }
- i++
- }
- if i != 10 {
- t.Errorf("received %d messages, want 10.", i)
- }
-
- // If all went well, we registered exactly 1 key change.
- if len(checker.calls) != 1 {
- t.Fatalf("got %d host key checks, want 1", len(checker.calls))
- }
-
- pub := testSigners["ecdsa"].PublicKey()
- want := fmt.Sprintf("%s %v %s %x", "addr", trC.remoteAddr, pub.Type(), pub.Marshal())
- if want != checker.calls[0] {
- t.Errorf("got %q want %q for host key check", checker.calls[0], want)
- }
-}
-
-func TestHandshakeError(t *testing.T) {
- checker := &testChecker{}
- trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "bad")
- if err != nil {
- t.Fatalf("handshakePair: %v", err)
- }
- defer trC.Close()
- defer trS.Close()
-
- // send a packet
- packet := []byte{msgRequestSuccess, 42}
- if err := trC.writePacket(packet); err != nil {
- t.Errorf("writePacket: %v", err)
- }
-
- // Now request a key change.
- err = trC.sendKexInit(subsequentKeyExchange)
- if err != nil {
- t.Errorf("sendKexInit: %v", err)
- }
-
- // the key change will fail, and afterwards we can't write.
- if err := trC.writePacket([]byte{msgRequestSuccess, 43}); err == nil {
- t.Errorf("writePacket after botched rekey succeeded.")
- }
-
- readback, err := trS.readPacket()
- if err != nil {
- t.Fatalf("server closed too soon: %v", err)
- }
- if bytes.Compare(readback, packet) != 0 {
- t.Errorf("got %q want %q", readback, packet)
- }
- readback, err = trS.readPacket()
- if err == nil {
- t.Errorf("got a message %q after failed key change", readback)
- }
-}
-
-func TestForceFirstKex(t *testing.T) {
- checker := &testChecker{}
- trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
- if err != nil {
- t.Fatalf("handshakePair: %v", err)
- }
-
- defer trC.Close()
- defer trS.Close()
-
- trC.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth}))
-
- // We setup the initial key exchange, but the remote side
- // tries to send serviceRequestMsg in cleartext, which is
- // disallowed.
-
- err = trS.sendKexInit(firstKeyExchange)
- if err == nil {
- t.Errorf("server first kex init should reject unexpected packet")
- }
-}
-
-func TestHandshakeTwice(t *testing.T) {
- checker := &testChecker{}
- trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
- if err != nil {
- t.Fatalf("handshakePair: %v", err)
- }
-
- defer trC.Close()
- defer trS.Close()
-
- // Both sides should ask for the first key exchange first.
- err = trS.sendKexInit(firstKeyExchange)
- if err != nil {
- t.Errorf("server sendKexInit: %v", err)
- }
-
- err = trC.sendKexInit(firstKeyExchange)
- if err != nil {
- t.Errorf("client sendKexInit: %v", err)
- }
-
- sent := 0
- // send a packet
- packet := make([]byte, 5)
- packet[0] = msgRequestSuccess
- if err := trC.writePacket(packet); err != nil {
- t.Errorf("writePacket: %v", err)
- }
- sent++
-
- // Send another packet. Use a fresh one, since writePacket destroys.
- packet = make([]byte, 5)
- packet[0] = msgRequestSuccess
- if err := trC.writePacket(packet); err != nil {
- t.Errorf("writePacket: %v", err)
- }
- sent++
-
- // 2nd key change.
- err = trC.sendKexInit(subsequentKeyExchange)
- if err != nil {
- t.Errorf("sendKexInit: %v", err)
- }
-
- packet = make([]byte, 5)
- packet[0] = msgRequestSuccess
- if err := trC.writePacket(packet); err != nil {
- t.Errorf("writePacket: %v", err)
- }
- sent++
-
- packet = make([]byte, 5)
- packet[0] = msgRequestSuccess
- for i := 0; i < sent; i++ {
- msg, err := trS.readPacket()
- if err != nil {
- t.Fatalf("server closed too soon: %v", err)
- }
-
- if bytes.Compare(msg, packet) != 0 {
- t.Errorf("packet %d: got %q want %q", i, msg, packet)
- }
- }
- if len(checker.calls) != 2 {
- t.Errorf("got %d key changes, want 2", len(checker.calls))
- }
-}
-
-func TestHandshakeAutoRekeyWrite(t *testing.T) {
- checker := &testChecker{}
- clientConf := &ClientConfig{HostKeyCallback: checker.Check}
- clientConf.RekeyThreshold = 500
- trC, trS, err := handshakePair(clientConf, "addr")
- if err != nil {
- t.Fatalf("handshakePair: %v", err)
- }
- defer trC.Close()
- defer trS.Close()
-
- for i := 0; i < 5; i++ {
- packet := make([]byte, 251)
- packet[0] = msgRequestSuccess
- if err := trC.writePacket(packet); err != nil {
- t.Errorf("writePacket: %v", err)
- }
- }
-
- j := 0
- for ; j < 5; j++ {
- _, err := trS.readPacket()
- if err != nil {
- break
- }
- }
-
- if j != 5 {
- t.Errorf("got %d, want 5 messages", j)
- }
-
- if len(checker.calls) != 2 {
- t.Errorf("got %d key changes, wanted 2", len(checker.calls))
- }
-}
-
-type syncChecker struct {
- called chan int
-}
-
-func (t *syncChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error {
- t.called <- 1
- return nil
-}
-
-func TestHandshakeAutoRekeyRead(t *testing.T) {
- sync := &syncChecker{make(chan int, 2)}
- clientConf := &ClientConfig{
- HostKeyCallback: sync.Check,
- }
- clientConf.RekeyThreshold = 500
-
- trC, trS, err := handshakePair(clientConf, "addr")
- if err != nil {
- t.Fatalf("handshakePair: %v", err)
- }
- defer trC.Close()
- defer trS.Close()
-
- packet := make([]byte, 501)
- packet[0] = msgRequestSuccess
- if err := trS.writePacket(packet); err != nil {
- t.Fatalf("writePacket: %v", err)
- }
- // While we read out the packet, a key change will be
- // initiated.
- if _, err := trC.readPacket(); err != nil {
- t.Fatalf("readPacket(client): %v", err)
- }
-
- <-sync.called
-}
-
-// errorKeyingTransport generates errors after a given number of
-// read/write operations.
-type errorKeyingTransport struct {
- packetConn
- readLeft, writeLeft int
-}
-
-func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error {
- return nil
-}
-func (n *errorKeyingTransport) getSessionID() []byte {
- return nil
-}
-
-func (n *errorKeyingTransport) writePacket(packet []byte) error {
- if n.writeLeft == 0 {
- n.Close()
- return errors.New("barf")
- }
-
- n.writeLeft--
- return n.packetConn.writePacket(packet)
-}
-
-func (n *errorKeyingTransport) readPacket() ([]byte, error) {
- if n.readLeft == 0 {
- n.Close()
- return nil, errors.New("barf")
- }
-
- n.readLeft--
- return n.packetConn.readPacket()
-}
-
-func TestHandshakeErrorHandlingRead(t *testing.T) {
- for i := 0; i < 20; i++ {
- testHandshakeErrorHandlingN(t, i, -1)
- }
-}
-
-func TestHandshakeErrorHandlingWrite(t *testing.T) {
- for i := 0; i < 20; i++ {
- testHandshakeErrorHandlingN(t, -1, i)
- }
-}
-
-// testHandshakeErrorHandlingN runs handshakes, injecting errors. If
-// handshakeTransport deadlocks, the go runtime will detect it and
-// panic.
-func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int) {
- msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)})
-
- a, b := memPipe()
- defer a.Close()
- defer b.Close()
-
- key := testSigners["ecdsa"]
- serverConf := Config{RekeyThreshold: minRekeyThreshold}
- serverConf.SetDefaults()
- serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'})
- serverConn.hostKeys = []Signer{key}
- go serverConn.readLoop()
-
- clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold}
- clientConf.SetDefaults()
- clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'})
- clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()}
- go clientConn.readLoop()
-
- var wg sync.WaitGroup
- wg.Add(4)
-
- for _, hs := range []packetConn{serverConn, clientConn} {
- go func(c packetConn) {
- for {
- err := c.writePacket(msg)
- if err != nil {
- break
- }
- }
- wg.Done()
- }(hs)
- go func(c packetConn) {
- for {
- _, err := c.readPacket()
- if err != nil {
- break
- }
- }
- wg.Done()
- }(hs)
- }
-
- wg.Wait()
-}
-
-func TestDisconnect(t *testing.T) {
- if runtime.GOOS == "plan9" {
- t.Skip("see golang.org/issue/7237")
- }
- checker := &testChecker{}
- trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
- if err != nil {
- t.Fatalf("handshakePair: %v", err)
- }
-
- defer trC.Close()
- defer trS.Close()
-
- trC.writePacket([]byte{msgRequestSuccess, 0, 0})
- errMsg := &disconnectMsg{
- Reason: 42,
- Message: "such is life",
- }
- trC.writePacket(Marshal(errMsg))
- trC.writePacket([]byte{msgRequestSuccess, 0, 0})
-
- packet, err := trS.readPacket()
- if err != nil {
- t.Fatalf("readPacket 1: %v", err)
- }
- if packet[0] != msgRequestSuccess {
- t.Errorf("got packet %v, want packet type %d", packet, msgRequestSuccess)
- }
-
- _, err = trS.readPacket()
- if err == nil {
- t.Errorf("readPacket 2 succeeded")
- } else if !reflect.DeepEqual(err, errMsg) {
- t.Errorf("got error %#v, want %#v", err, errMsg)
- }
-
- _, err = trS.readPacket()
- if err == nil {
- t.Errorf("readPacket 3 succeeded")
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
deleted file mode 100644
index c87fbeb..0000000
--- a/vendor/golang.org/x/crypto/ssh/kex.go
+++ /dev/null
@@ -1,540 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/subtle"
- "errors"
- "io"
- "math/big"
-
- "golang.org/x/crypto/curve25519"
-)
-
-const (
- kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
- kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
- kexAlgoECDH256 = "ecdh-sha2-nistp256"
- kexAlgoECDH384 = "ecdh-sha2-nistp384"
- kexAlgoECDH521 = "ecdh-sha2-nistp521"
- kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
-)
-
-// kexResult captures the outcome of a key exchange.
-type kexResult struct {
- // Session hash. See also RFC 4253, section 8.
- H []byte
-
- // Shared secret. See also RFC 4253, section 8.
- K []byte
-
- // Host key as hashed into H.
- HostKey []byte
-
- // Signature of H.
- Signature []byte
-
- // A cryptographic hash function that matches the security
- // level of the key exchange algorithm. It is used for
- // calculating H, and for deriving keys from H and K.
- Hash crypto.Hash
-
- // The session ID, which is the first H computed. This is used
- // to derive key material inside the transport.
- SessionID []byte
-}
-
-// handshakeMagics contains data that is always included in the
-// session hash.
-type handshakeMagics struct {
- clientVersion, serverVersion []byte
- clientKexInit, serverKexInit []byte
-}
-
-func (m *handshakeMagics) write(w io.Writer) {
- writeString(w, m.clientVersion)
- writeString(w, m.serverVersion)
- writeString(w, m.clientKexInit)
- writeString(w, m.serverKexInit)
-}
-
-// kexAlgorithm abstracts different key exchange algorithms.
-type kexAlgorithm interface {
- // Server runs server-side key agreement, signing the result
- // with a hostkey.
- Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
-
- // Client runs the client-side key agreement. Caller is
- // responsible for verifying the host key signature.
- Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error)
-}
-
-// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
-type dhGroup struct {
- g, p, pMinus1 *big.Int
-}
-
-func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
- if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 {
- return nil, errors.New("ssh: DH parameter out of bounds")
- }
- return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
-}
-
-func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
- hashFunc := crypto.SHA1
-
- var x *big.Int
- for {
- var err error
- if x, err = rand.Int(randSource, group.pMinus1); err != nil {
- return nil, err
- }
- if x.Sign() > 0 {
- break
- }
- }
-
- X := new(big.Int).Exp(group.g, x, group.p)
- kexDHInit := kexDHInitMsg{
- X: X,
- }
- if err := c.writePacket(Marshal(&kexDHInit)); err != nil {
- return nil, err
- }
-
- packet, err := c.readPacket()
- if err != nil {
- return nil, err
- }
-
- var kexDHReply kexDHReplyMsg
- if err = Unmarshal(packet, &kexDHReply); err != nil {
- return nil, err
- }
-
- kInt, err := group.diffieHellman(kexDHReply.Y, x)
- if err != nil {
- return nil, err
- }
-
- h := hashFunc.New()
- magics.write(h)
- writeString(h, kexDHReply.HostKey)
- writeInt(h, X)
- writeInt(h, kexDHReply.Y)
- K := make([]byte, intLength(kInt))
- marshalInt(K, kInt)
- h.Write(K)
-
- return &kexResult{
- H: h.Sum(nil),
- K: K,
- HostKey: kexDHReply.HostKey,
- Signature: kexDHReply.Signature,
- Hash: crypto.SHA1,
- }, nil
-}
-
-func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
- hashFunc := crypto.SHA1
- packet, err := c.readPacket()
- if err != nil {
- return
- }
- var kexDHInit kexDHInitMsg
- if err = Unmarshal(packet, &kexDHInit); err != nil {
- return
- }
-
- var y *big.Int
- for {
- if y, err = rand.Int(randSource, group.pMinus1); err != nil {
- return
- }
- if y.Sign() > 0 {
- break
- }
- }
-
- Y := new(big.Int).Exp(group.g, y, group.p)
- kInt, err := group.diffieHellman(kexDHInit.X, y)
- if err != nil {
- return nil, err
- }
-
- hostKeyBytes := priv.PublicKey().Marshal()
-
- h := hashFunc.New()
- magics.write(h)
- writeString(h, hostKeyBytes)
- writeInt(h, kexDHInit.X)
- writeInt(h, Y)
-
- K := make([]byte, intLength(kInt))
- marshalInt(K, kInt)
- h.Write(K)
-
- H := h.Sum(nil)
-
- // H is already a hash, but the hostkey signing will apply its
- // own key-specific hash algorithm.
- sig, err := signAndMarshal(priv, randSource, H)
- if err != nil {
- return nil, err
- }
-
- kexDHReply := kexDHReplyMsg{
- HostKey: hostKeyBytes,
- Y: Y,
- Signature: sig,
- }
- packet = Marshal(&kexDHReply)
-
- err = c.writePacket(packet)
- return &kexResult{
- H: H,
- K: K,
- HostKey: hostKeyBytes,
- Signature: sig,
- Hash: crypto.SHA1,
- }, nil
-}
-
-// ecdh performs Elliptic Curve Diffie-Hellman key exchange as
-// described in RFC 5656, section 4.
-type ecdh struct {
- curve elliptic.Curve
-}
-
-func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
- ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
- if err != nil {
- return nil, err
- }
-
- kexInit := kexECDHInitMsg{
- ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y),
- }
-
- serialized := Marshal(&kexInit)
- if err := c.writePacket(serialized); err != nil {
- return nil, err
- }
-
- packet, err := c.readPacket()
- if err != nil {
- return nil, err
- }
-
- var reply kexECDHReplyMsg
- if err = Unmarshal(packet, &reply); err != nil {
- return nil, err
- }
-
- x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey)
- if err != nil {
- return nil, err
- }
-
- // generate shared secret
- secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes())
-
- h := ecHash(kex.curve).New()
- magics.write(h)
- writeString(h, reply.HostKey)
- writeString(h, kexInit.ClientPubKey)
- writeString(h, reply.EphemeralPubKey)
- K := make([]byte, intLength(secret))
- marshalInt(K, secret)
- h.Write(K)
-
- return &kexResult{
- H: h.Sum(nil),
- K: K,
- HostKey: reply.HostKey,
- Signature: reply.Signature,
- Hash: ecHash(kex.curve),
- }, nil
-}
-
-// unmarshalECKey parses and checks an EC key.
-func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) {
- x, y = elliptic.Unmarshal(curve, pubkey)
- if x == nil {
- return nil, nil, errors.New("ssh: elliptic.Unmarshal failure")
- }
- if !validateECPublicKey(curve, x, y) {
- return nil, nil, errors.New("ssh: public key not on curve")
- }
- return x, y, nil
-}
-
-// validateECPublicKey checks that the point is a valid public key for
-// the given curve. See [SEC1], 3.2.2
-func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
- if x.Sign() == 0 && y.Sign() == 0 {
- return false
- }
-
- if x.Cmp(curve.Params().P) >= 0 {
- return false
- }
-
- if y.Cmp(curve.Params().P) >= 0 {
- return false
- }
-
- if !curve.IsOnCurve(x, y) {
- return false
- }
-
- // We don't check if N * PubKey == 0, since
- //
- // - the NIST curves have cofactor = 1, so this is implicit.
- // (We don't foresee an implementation that supports non NIST
- // curves)
- //
- // - for ephemeral keys, we don't need to worry about small
- // subgroup attacks.
- return true
-}
-
-func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
- packet, err := c.readPacket()
- if err != nil {
- return nil, err
- }
-
- var kexECDHInit kexECDHInitMsg
- if err = Unmarshal(packet, &kexECDHInit); err != nil {
- return nil, err
- }
-
- clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey)
- if err != nil {
- return nil, err
- }
-
- // We could cache this key across multiple users/multiple
- // connection attempts, but the benefit is small. OpenSSH
- // generates a new key for each incoming connection.
- ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
- if err != nil {
- return nil, err
- }
-
- hostKeyBytes := priv.PublicKey().Marshal()
-
- serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y)
-
- // generate shared secret
- secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes())
-
- h := ecHash(kex.curve).New()
- magics.write(h)
- writeString(h, hostKeyBytes)
- writeString(h, kexECDHInit.ClientPubKey)
- writeString(h, serializedEphKey)
-
- K := make([]byte, intLength(secret))
- marshalInt(K, secret)
- h.Write(K)
-
- H := h.Sum(nil)
-
- // H is already a hash, but the hostkey signing will apply its
- // own key-specific hash algorithm.
- sig, err := signAndMarshal(priv, rand, H)
- if err != nil {
- return nil, err
- }
-
- reply := kexECDHReplyMsg{
- EphemeralPubKey: serializedEphKey,
- HostKey: hostKeyBytes,
- Signature: sig,
- }
-
- serialized := Marshal(&reply)
- if err := c.writePacket(serialized); err != nil {
- return nil, err
- }
-
- return &kexResult{
- H: H,
- K: K,
- HostKey: reply.HostKey,
- Signature: sig,
- Hash: ecHash(kex.curve),
- }, nil
-}
-
-var kexAlgoMap = map[string]kexAlgorithm{}
-
-func init() {
- // This is the group called diffie-hellman-group1-sha1 in RFC
- // 4253 and Oakley Group 2 in RFC 2409.
- p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
- kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
- g: new(big.Int).SetInt64(2),
- p: p,
- pMinus1: new(big.Int).Sub(p, bigOne),
- }
-
- // This is the group called diffie-hellman-group14-sha1 in RFC
- // 4253 and Oakley Group 14 in RFC 3526.
- p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
-
- kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
- g: new(big.Int).SetInt64(2),
- p: p,
- pMinus1: new(big.Int).Sub(p, bigOne),
- }
-
- kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
- kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
- kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
- kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
-}
-
-// curve25519sha256 implements the curve25519-sha256@libssh.org key
-// agreement protocol, as described in
-// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
-type curve25519sha256 struct{}
-
-type curve25519KeyPair struct {
- priv [32]byte
- pub [32]byte
-}
-
-func (kp *curve25519KeyPair) generate(rand io.Reader) error {
- if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
- return err
- }
- curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
- return nil
-}
-
-// curve25519Zeros is just an array of 32 zero bytes so that we have something
-// convenient to compare against in order to reject curve25519 points with the
-// wrong order.
-var curve25519Zeros [32]byte
-
-func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
- var kp curve25519KeyPair
- if err := kp.generate(rand); err != nil {
- return nil, err
- }
- if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil {
- return nil, err
- }
-
- packet, err := c.readPacket()
- if err != nil {
- return nil, err
- }
-
- var reply kexECDHReplyMsg
- if err = Unmarshal(packet, &reply); err != nil {
- return nil, err
- }
- if len(reply.EphemeralPubKey) != 32 {
- return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
- }
-
- var servPub, secret [32]byte
- copy(servPub[:], reply.EphemeralPubKey)
- curve25519.ScalarMult(&secret, &kp.priv, &servPub)
- if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
- return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
- }
-
- h := crypto.SHA256.New()
- magics.write(h)
- writeString(h, reply.HostKey)
- writeString(h, kp.pub[:])
- writeString(h, reply.EphemeralPubKey)
-
- kInt := new(big.Int).SetBytes(secret[:])
- K := make([]byte, intLength(kInt))
- marshalInt(K, kInt)
- h.Write(K)
-
- return &kexResult{
- H: h.Sum(nil),
- K: K,
- HostKey: reply.HostKey,
- Signature: reply.Signature,
- Hash: crypto.SHA256,
- }, nil
-}
-
-func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
- packet, err := c.readPacket()
- if err != nil {
- return
- }
- var kexInit kexECDHInitMsg
- if err = Unmarshal(packet, &kexInit); err != nil {
- return
- }
-
- if len(kexInit.ClientPubKey) != 32 {
- return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
- }
-
- var kp curve25519KeyPair
- if err := kp.generate(rand); err != nil {
- return nil, err
- }
-
- var clientPub, secret [32]byte
- copy(clientPub[:], kexInit.ClientPubKey)
- curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
- if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
- return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
- }
-
- hostKeyBytes := priv.PublicKey().Marshal()
-
- h := crypto.SHA256.New()
- magics.write(h)
- writeString(h, hostKeyBytes)
- writeString(h, kexInit.ClientPubKey)
- writeString(h, kp.pub[:])
-
- kInt := new(big.Int).SetBytes(secret[:])
- K := make([]byte, intLength(kInt))
- marshalInt(K, kInt)
- h.Write(K)
-
- H := h.Sum(nil)
-
- sig, err := signAndMarshal(priv, rand, H)
- if err != nil {
- return nil, err
- }
-
- reply := kexECDHReplyMsg{
- EphemeralPubKey: kp.pub[:],
- HostKey: hostKeyBytes,
- Signature: sig,
- }
- if err := c.writePacket(Marshal(&reply)); err != nil {
- return nil, err
- }
- return &kexResult{
- H: H,
- K: K,
- HostKey: hostKeyBytes,
- Signature: sig,
- Hash: crypto.SHA256,
- }, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/kex_test.go b/vendor/golang.org/x/crypto/ssh/kex_test.go
deleted file mode 100644
index 12ca0ac..0000000
--- a/vendor/golang.org/x/crypto/ssh/kex_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-// Key exchange tests.
-
-import (
- "crypto/rand"
- "reflect"
- "testing"
-)
-
-func TestKexes(t *testing.T) {
- type kexResultErr struct {
- result *kexResult
- err error
- }
-
- for name, kex := range kexAlgoMap {
- a, b := memPipe()
-
- s := make(chan kexResultErr, 1)
- c := make(chan kexResultErr, 1)
- var magics handshakeMagics
- go func() {
- r, e := kex.Client(a, rand.Reader, &magics)
- a.Close()
- c <- kexResultErr{r, e}
- }()
- go func() {
- r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"])
- b.Close()
- s <- kexResultErr{r, e}
- }()
-
- clientRes := <-c
- serverRes := <-s
- if clientRes.err != nil {
- t.Errorf("client: %v", clientRes.err)
- }
- if serverRes.err != nil {
- t.Errorf("server: %v", serverRes.err)
- }
- if !reflect.DeepEqual(clientRes.result, serverRes.result) {
- t.Errorf("kex %q: mismatch %#v, %#v", name, clientRes.result, serverRes.result)
- }
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
deleted file mode 100644
index 21f7d0d..0000000
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ /dev/null
@@ -1,905 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "crypto"
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/md5"
- "crypto/rsa"
- "crypto/sha256"
- "crypto/x509"
- "encoding/asn1"
- "encoding/base64"
- "encoding/hex"
- "encoding/pem"
- "errors"
- "fmt"
- "io"
- "math/big"
- "strings"
-
- "golang.org/x/crypto/ed25519"
-)
-
-// These constants represent the algorithm names for key types supported by this
-// package.
-const (
- KeyAlgoRSA = "ssh-rsa"
- KeyAlgoDSA = "ssh-dss"
- KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
- KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
- KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
- KeyAlgoED25519 = "ssh-ed25519"
-)
-
-// parsePubKey parses a public key of the given algorithm.
-// Use ParsePublicKey for keys with prepended algorithm.
-func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) {
- switch algo {
- case KeyAlgoRSA:
- return parseRSA(in)
- case KeyAlgoDSA:
- return parseDSA(in)
- case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
- return parseECDSA(in)
- case KeyAlgoED25519:
- return parseED25519(in)
- case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
- cert, err := parseCert(in, certToPrivAlgo(algo))
- if err != nil {
- return nil, nil, err
- }
- return cert, nil, nil
- }
- return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
-}
-
-// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format
-// (see sshd(8) manual page) once the options and key type fields have been
-// removed.
-func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) {
- in = bytes.TrimSpace(in)
-
- i := bytes.IndexAny(in, " \t")
- if i == -1 {
- i = len(in)
- }
- base64Key := in[:i]
-
- key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key)))
- n, err := base64.StdEncoding.Decode(key, base64Key)
- if err != nil {
- return nil, "", err
- }
- key = key[:n]
- out, err = ParsePublicKey(key)
- if err != nil {
- return nil, "", err
- }
- comment = string(bytes.TrimSpace(in[i:]))
- return out, comment, nil
-}
-
-// ParseKnownHosts parses an entry in the format of the known_hosts file.
-//
-// The known_hosts format is documented in the sshd(8) manual page. This
-// function will parse a single entry from in. On successful return, marker
-// will contain the optional marker value (i.e. "cert-authority" or "revoked")
-// or else be empty, hosts will contain the hosts that this entry matches,
-// pubKey will contain the public key and comment will contain any trailing
-// comment at the end of the line. See the sshd(8) manual page for the various
-// forms that a host string can take.
-//
-// The unparsed remainder of the input will be returned in rest. This function
-// can be called repeatedly to parse multiple entries.
-//
-// If no entries were found in the input then err will be io.EOF. Otherwise a
-// non-nil err value indicates a parse error.
-func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) {
- for len(in) > 0 {
- end := bytes.IndexByte(in, '\n')
- if end != -1 {
- rest = in[end+1:]
- in = in[:end]
- } else {
- rest = nil
- }
-
- end = bytes.IndexByte(in, '\r')
- if end != -1 {
- in = in[:end]
- }
-
- in = bytes.TrimSpace(in)
- if len(in) == 0 || in[0] == '#' {
- in = rest
- continue
- }
-
- i := bytes.IndexAny(in, " \t")
- if i == -1 {
- in = rest
- continue
- }
-
- // Strip out the beginning of the known_host key.
- // This is either an optional marker or a (set of) hostname(s).
- keyFields := bytes.Fields(in)
- if len(keyFields) < 3 || len(keyFields) > 5 {
- return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data")
- }
-
- // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated
- // list of hosts
- marker := ""
- if keyFields[0][0] == '@' {
- marker = string(keyFields[0][1:])
- keyFields = keyFields[1:]
- }
-
- hosts := string(keyFields[0])
- // keyFields[1] contains the key type (e.g. “ssh-rsa”).
- // However, that information is duplicated inside the
- // base64-encoded key and so is ignored here.
-
- key := bytes.Join(keyFields[2:], []byte(" "))
- if pubKey, comment, err = parseAuthorizedKey(key); err != nil {
- return "", nil, nil, "", nil, err
- }
-
- return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil
- }
-
- return "", nil, nil, "", nil, io.EOF
-}
-
-// ParseAuthorizedKeys parses a public key from an authorized_keys
-// file used in OpenSSH according to the sshd(8) manual page.
-func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
- for len(in) > 0 {
- end := bytes.IndexByte(in, '\n')
- if end != -1 {
- rest = in[end+1:]
- in = in[:end]
- } else {
- rest = nil
- }
-
- end = bytes.IndexByte(in, '\r')
- if end != -1 {
- in = in[:end]
- }
-
- in = bytes.TrimSpace(in)
- if len(in) == 0 || in[0] == '#' {
- in = rest
- continue
- }
-
- i := bytes.IndexAny(in, " \t")
- if i == -1 {
- in = rest
- continue
- }
-
- if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
- return out, comment, options, rest, nil
- }
-
- // No key type recognised. Maybe there's an options field at
- // the beginning.
- var b byte
- inQuote := false
- var candidateOptions []string
- optionStart := 0
- for i, b = range in {
- isEnd := !inQuote && (b == ' ' || b == '\t')
- if (b == ',' && !inQuote) || isEnd {
- if i-optionStart > 0 {
- candidateOptions = append(candidateOptions, string(in[optionStart:i]))
- }
- optionStart = i + 1
- }
- if isEnd {
- break
- }
- if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) {
- inQuote = !inQuote
- }
- }
- for i < len(in) && (in[i] == ' ' || in[i] == '\t') {
- i++
- }
- if i == len(in) {
- // Invalid line: unmatched quote
- in = rest
- continue
- }
-
- in = in[i:]
- i = bytes.IndexAny(in, " \t")
- if i == -1 {
- in = rest
- continue
- }
-
- if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
- options = candidateOptions
- return out, comment, options, rest, nil
- }
-
- in = rest
- continue
- }
-
- return nil, "", nil, nil, errors.New("ssh: no key found")
-}
-
-// ParsePublicKey parses an SSH public key formatted for use in
-// the SSH wire protocol according to RFC 4253, section 6.6.
-func ParsePublicKey(in []byte) (out PublicKey, err error) {
- algo, in, ok := parseString(in)
- if !ok {
- return nil, errShortRead
- }
- var rest []byte
- out, rest, err = parsePubKey(in, string(algo))
- if len(rest) > 0 {
- return nil, errors.New("ssh: trailing junk in public key")
- }
-
- return out, err
-}
-
-// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH
-// authorized_keys file. The return value ends with newline.
-func MarshalAuthorizedKey(key PublicKey) []byte {
- b := &bytes.Buffer{}
- b.WriteString(key.Type())
- b.WriteByte(' ')
- e := base64.NewEncoder(base64.StdEncoding, b)
- e.Write(key.Marshal())
- e.Close()
- b.WriteByte('\n')
- return b.Bytes()
-}
-
-// PublicKey is an abstraction of different types of public keys.
-type PublicKey interface {
- // Type returns the key's type, e.g. "ssh-rsa".
- Type() string
-
- // Marshal returns the serialized key data in SSH wire format,
- // with the name prefix.
- Marshal() []byte
-
- // Verify that sig is a signature on the given data using this
- // key. This function will hash the data appropriately first.
- Verify(data []byte, sig *Signature) error
-}
-
-// CryptoPublicKey, if implemented by a PublicKey,
-// returns the underlying crypto.PublicKey form of the key.
-type CryptoPublicKey interface {
- CryptoPublicKey() crypto.PublicKey
-}
-
-// A Signer can create signatures that verify against a public key.
-type Signer interface {
- // PublicKey returns an associated PublicKey instance.
- PublicKey() PublicKey
-
- // Sign returns raw signature for the given data. This method
- // will apply the hash specified for the keytype to the data.
- Sign(rand io.Reader, data []byte) (*Signature, error)
-}
-
-type rsaPublicKey rsa.PublicKey
-
-func (r *rsaPublicKey) Type() string {
- return "ssh-rsa"
-}
-
-// parseRSA parses an RSA key according to RFC 4253, section 6.6.
-func parseRSA(in []byte) (out PublicKey, rest []byte, err error) {
- var w struct {
- E *big.Int
- N *big.Int
- Rest []byte `ssh:"rest"`
- }
- if err := Unmarshal(in, &w); err != nil {
- return nil, nil, err
- }
-
- if w.E.BitLen() > 24 {
- return nil, nil, errors.New("ssh: exponent too large")
- }
- e := w.E.Int64()
- if e < 3 || e&1 == 0 {
- return nil, nil, errors.New("ssh: incorrect exponent")
- }
-
- var key rsa.PublicKey
- key.E = int(e)
- key.N = w.N
- return (*rsaPublicKey)(&key), w.Rest, nil
-}
-
-func (r *rsaPublicKey) Marshal() []byte {
- e := new(big.Int).SetInt64(int64(r.E))
- // RSA publickey struct layout should match the struct used by
- // parseRSACert in the x/crypto/ssh/agent package.
- wirekey := struct {
- Name string
- E *big.Int
- N *big.Int
- }{
- KeyAlgoRSA,
- e,
- r.N,
- }
- return Marshal(&wirekey)
-}
-
-func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
- if sig.Format != r.Type() {
- return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
- }
- h := crypto.SHA1.New()
- h.Write(data)
- digest := h.Sum(nil)
- return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob)
-}
-
-func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
- return (*rsa.PublicKey)(r)
-}
-
-type dsaPublicKey dsa.PublicKey
-
-func (r *dsaPublicKey) Type() string {
- return "ssh-dss"
-}
-
-// parseDSA parses an DSA key according to RFC 4253, section 6.6.
-func parseDSA(in []byte) (out PublicKey, rest []byte, err error) {
- var w struct {
- P, Q, G, Y *big.Int
- Rest []byte `ssh:"rest"`
- }
- if err := Unmarshal(in, &w); err != nil {
- return nil, nil, err
- }
-
- key := &dsaPublicKey{
- Parameters: dsa.Parameters{
- P: w.P,
- Q: w.Q,
- G: w.G,
- },
- Y: w.Y,
- }
- return key, w.Rest, nil
-}
-
-func (k *dsaPublicKey) Marshal() []byte {
- // DSA publickey struct layout should match the struct used by
- // parseDSACert in the x/crypto/ssh/agent package.
- w := struct {
- Name string
- P, Q, G, Y *big.Int
- }{
- k.Type(),
- k.P,
- k.Q,
- k.G,
- k.Y,
- }
-
- return Marshal(&w)
-}
-
-func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
- if sig.Format != k.Type() {
- return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
- }
- h := crypto.SHA1.New()
- h.Write(data)
- digest := h.Sum(nil)
-
- // Per RFC 4253, section 6.6,
- // The value for 'dss_signature_blob' is encoded as a string containing
- // r, followed by s (which are 160-bit integers, without lengths or
- // padding, unsigned, and in network byte order).
- // For DSS purposes, sig.Blob should be exactly 40 bytes in length.
- if len(sig.Blob) != 40 {
- return errors.New("ssh: DSA signature parse error")
- }
- r := new(big.Int).SetBytes(sig.Blob[:20])
- s := new(big.Int).SetBytes(sig.Blob[20:])
- if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) {
- return nil
- }
- return errors.New("ssh: signature did not verify")
-}
-
-func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey {
- return (*dsa.PublicKey)(k)
-}
-
-type dsaPrivateKey struct {
- *dsa.PrivateKey
-}
-
-func (k *dsaPrivateKey) PublicKey() PublicKey {
- return (*dsaPublicKey)(&k.PrivateKey.PublicKey)
-}
-
-func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
- h := crypto.SHA1.New()
- h.Write(data)
- digest := h.Sum(nil)
- r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
- if err != nil {
- return nil, err
- }
-
- sig := make([]byte, 40)
- rb := r.Bytes()
- sb := s.Bytes()
-
- copy(sig[20-len(rb):20], rb)
- copy(sig[40-len(sb):], sb)
-
- return &Signature{
- Format: k.PublicKey().Type(),
- Blob: sig,
- }, nil
-}
-
-type ecdsaPublicKey ecdsa.PublicKey
-
-func (key *ecdsaPublicKey) Type() string {
- return "ecdsa-sha2-" + key.nistID()
-}
-
-func (key *ecdsaPublicKey) nistID() string {
- switch key.Params().BitSize {
- case 256:
- return "nistp256"
- case 384:
- return "nistp384"
- case 521:
- return "nistp521"
- }
- panic("ssh: unsupported ecdsa key size")
-}
-
-type ed25519PublicKey ed25519.PublicKey
-
-func (key ed25519PublicKey) Type() string {
- return KeyAlgoED25519
-}
-
-func parseED25519(in []byte) (out PublicKey, rest []byte, err error) {
- var w struct {
- KeyBytes []byte
- Rest []byte `ssh:"rest"`
- }
-
- if err := Unmarshal(in, &w); err != nil {
- return nil, nil, err
- }
-
- key := ed25519.PublicKey(w.KeyBytes)
-
- return (ed25519PublicKey)(key), w.Rest, nil
-}
-
-func (key ed25519PublicKey) Marshal() []byte {
- w := struct {
- Name string
- KeyBytes []byte
- }{
- KeyAlgoED25519,
- []byte(key),
- }
- return Marshal(&w)
-}
-
-func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error {
- if sig.Format != key.Type() {
- return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
- }
-
- edKey := (ed25519.PublicKey)(key)
- if ok := ed25519.Verify(edKey, b, sig.Blob); !ok {
- return errors.New("ssh: signature did not verify")
- }
-
- return nil
-}
-
-func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey {
- return ed25519.PublicKey(k)
-}
-
-func supportedEllipticCurve(curve elliptic.Curve) bool {
- return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
-}
-
-// ecHash returns the hash to match the given elliptic curve, see RFC
-// 5656, section 6.2.1
-func ecHash(curve elliptic.Curve) crypto.Hash {
- bitSize := curve.Params().BitSize
- switch {
- case bitSize <= 256:
- return crypto.SHA256
- case bitSize <= 384:
- return crypto.SHA384
- }
- return crypto.SHA512
-}
-
-// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
-func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
- var w struct {
- Curve string
- KeyBytes []byte
- Rest []byte `ssh:"rest"`
- }
-
- if err := Unmarshal(in, &w); err != nil {
- return nil, nil, err
- }
-
- key := new(ecdsa.PublicKey)
-
- switch w.Curve {
- case "nistp256":
- key.Curve = elliptic.P256()
- case "nistp384":
- key.Curve = elliptic.P384()
- case "nistp521":
- key.Curve = elliptic.P521()
- default:
- return nil, nil, errors.New("ssh: unsupported curve")
- }
-
- key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)
- if key.X == nil || key.Y == nil {
- return nil, nil, errors.New("ssh: invalid curve point")
- }
- return (*ecdsaPublicKey)(key), w.Rest, nil
-}
-
-func (key *ecdsaPublicKey) Marshal() []byte {
- // See RFC 5656, section 3.1.
- keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y)
- // ECDSA publickey struct layout should match the struct used by
- // parseECDSACert in the x/crypto/ssh/agent package.
- w := struct {
- Name string
- ID string
- Key []byte
- }{
- key.Type(),
- key.nistID(),
- keyBytes,
- }
-
- return Marshal(&w)
-}
-
-func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
- if sig.Format != key.Type() {
- return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
- }
-
- h := ecHash(key.Curve).New()
- h.Write(data)
- digest := h.Sum(nil)
-
- // Per RFC 5656, section 3.1.2,
- // The ecdsa_signature_blob value has the following specific encoding:
- // mpint r
- // mpint s
- var ecSig struct {
- R *big.Int
- S *big.Int
- }
-
- if err := Unmarshal(sig.Blob, &ecSig); err != nil {
- return err
- }
-
- if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) {
- return nil
- }
- return errors.New("ssh: signature did not verify")
-}
-
-func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey {
- return (*ecdsa.PublicKey)(k)
-}
-
-// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
-// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding
-// Signer instance. ECDSA keys must use P-256, P-384 or P-521.
-func NewSignerFromKey(key interface{}) (Signer, error) {
- switch key := key.(type) {
- case crypto.Signer:
- return NewSignerFromSigner(key)
- case *dsa.PrivateKey:
- return &dsaPrivateKey{key}, nil
- default:
- return nil, fmt.Errorf("ssh: unsupported key type %T", key)
- }
-}
-
-type wrappedSigner struct {
- signer crypto.Signer
- pubKey PublicKey
-}
-
-// NewSignerFromSigner takes any crypto.Signer implementation and
-// returns a corresponding Signer interface. This can be used, for
-// example, with keys kept in hardware modules.
-func NewSignerFromSigner(signer crypto.Signer) (Signer, error) {
- pubKey, err := NewPublicKey(signer.Public())
- if err != nil {
- return nil, err
- }
-
- return &wrappedSigner{signer, pubKey}, nil
-}
-
-func (s *wrappedSigner) PublicKey() PublicKey {
- return s.pubKey
-}
-
-func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
- var hashFunc crypto.Hash
-
- switch key := s.pubKey.(type) {
- case *rsaPublicKey, *dsaPublicKey:
- hashFunc = crypto.SHA1
- case *ecdsaPublicKey:
- hashFunc = ecHash(key.Curve)
- case ed25519PublicKey:
- default:
- return nil, fmt.Errorf("ssh: unsupported key type %T", key)
- }
-
- var digest []byte
- if hashFunc != 0 {
- h := hashFunc.New()
- h.Write(data)
- digest = h.Sum(nil)
- } else {
- digest = data
- }
-
- signature, err := s.signer.Sign(rand, digest, hashFunc)
- if err != nil {
- return nil, err
- }
-
- // crypto.Signer.Sign is expected to return an ASN.1-encoded signature
- // for ECDSA and DSA, but that's not the encoding expected by SSH, so
- // re-encode.
- switch s.pubKey.(type) {
- case *ecdsaPublicKey, *dsaPublicKey:
- type asn1Signature struct {
- R, S *big.Int
- }
- asn1Sig := new(asn1Signature)
- _, err := asn1.Unmarshal(signature, asn1Sig)
- if err != nil {
- return nil, err
- }
-
- switch s.pubKey.(type) {
- case *ecdsaPublicKey:
- signature = Marshal(asn1Sig)
-
- case *dsaPublicKey:
- signature = make([]byte, 40)
- r := asn1Sig.R.Bytes()
- s := asn1Sig.S.Bytes()
- copy(signature[20-len(r):20], r)
- copy(signature[40-len(s):40], s)
- }
- }
-
- return &Signature{
- Format: s.pubKey.Type(),
- Blob: signature,
- }, nil
-}
-
-// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey,
-// or ed25519.PublicKey returns a corresponding PublicKey instance.
-// ECDSA keys must use P-256, P-384 or P-521.
-func NewPublicKey(key interface{}) (PublicKey, error) {
- switch key := key.(type) {
- case *rsa.PublicKey:
- return (*rsaPublicKey)(key), nil
- case *ecdsa.PublicKey:
- if !supportedEllipticCurve(key.Curve) {
- return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.")
- }
- return (*ecdsaPublicKey)(key), nil
- case *dsa.PublicKey:
- return (*dsaPublicKey)(key), nil
- case ed25519.PublicKey:
- return (ed25519PublicKey)(key), nil
- default:
- return nil, fmt.Errorf("ssh: unsupported key type %T", key)
- }
-}
-
-// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
-// the same keys as ParseRawPrivateKey.
-func ParsePrivateKey(pemBytes []byte) (Signer, error) {
- key, err := ParseRawPrivateKey(pemBytes)
- if err != nil {
- return nil, err
- }
-
- return NewSignerFromKey(key)
-}
-
-// encryptedBlock tells whether a private key is
-// encrypted by examining its Proc-Type header
-// for a mention of ENCRYPTED
-// according to RFC 1421 Section 4.6.1.1.
-func encryptedBlock(block *pem.Block) bool {
- return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED")
-}
-
-// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
-// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
-func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
- block, _ := pem.Decode(pemBytes)
- if block == nil {
- return nil, errors.New("ssh: no key found")
- }
-
- if encryptedBlock(block) {
- return nil, errors.New("ssh: cannot decode encrypted private keys")
- }
-
- switch block.Type {
- case "RSA PRIVATE KEY":
- return x509.ParsePKCS1PrivateKey(block.Bytes)
- case "EC PRIVATE KEY":
- return x509.ParseECPrivateKey(block.Bytes)
- case "DSA PRIVATE KEY":
- return ParseDSAPrivateKey(block.Bytes)
- case "OPENSSH PRIVATE KEY":
- return parseOpenSSHPrivateKey(block.Bytes)
- default:
- return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
- }
-}
-
-// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as
-// specified by the OpenSSL DSA man page.
-func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
- var k struct {
- Version int
- P *big.Int
- Q *big.Int
- G *big.Int
- Priv *big.Int
- Pub *big.Int
- }
- rest, err := asn1.Unmarshal(der, &k)
- if err != nil {
- return nil, errors.New("ssh: failed to parse DSA key: " + err.Error())
- }
- if len(rest) > 0 {
- return nil, errors.New("ssh: garbage after DSA key")
- }
-
- return &dsa.PrivateKey{
- PublicKey: dsa.PublicKey{
- Parameters: dsa.Parameters{
- P: k.P,
- Q: k.Q,
- G: k.G,
- },
- Y: k.Priv,
- },
- X: k.Pub,
- }, nil
-}
-
-// Implemented based on the documentation at
-// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
-func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) {
- magic := append([]byte("openssh-key-v1"), 0)
- if !bytes.Equal(magic, key[0:len(magic)]) {
- return nil, errors.New("ssh: invalid openssh private key format")
- }
- remaining := key[len(magic):]
-
- var w struct {
- CipherName string
- KdfName string
- KdfOpts string
- NumKeys uint32
- PubKey []byte
- PrivKeyBlock []byte
- }
-
- if err := Unmarshal(remaining, &w); err != nil {
- return nil, err
- }
-
- pk1 := struct {
- Check1 uint32
- Check2 uint32
- Keytype string
- Pub []byte
- Priv []byte
- Comment string
- Pad []byte `ssh:"rest"`
- }{}
-
- if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil {
- return nil, err
- }
-
- if pk1.Check1 != pk1.Check2 {
- return nil, errors.New("ssh: checkint mismatch")
- }
-
- // we only handle ed25519 keys currently
- if pk1.Keytype != KeyAlgoED25519 {
- return nil, errors.New("ssh: unhandled key type")
- }
-
- for i, b := range pk1.Pad {
- if int(b) != i+1 {
- return nil, errors.New("ssh: padding not as expected")
- }
- }
-
- if len(pk1.Priv) != ed25519.PrivateKeySize {
- return nil, errors.New("ssh: private key unexpected length")
- }
-
- pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize))
- copy(pk, pk1.Priv)
- return &pk, nil
-}
-
-// FingerprintLegacyMD5 returns the user presentation of the key's
-// fingerprint as described by RFC 4716 section 4.
-func FingerprintLegacyMD5(pubKey PublicKey) string {
- md5sum := md5.Sum(pubKey.Marshal())
- hexarray := make([]string, len(md5sum))
- for i, c := range md5sum {
- hexarray[i] = hex.EncodeToString([]byte{c})
- }
- return strings.Join(hexarray, ":")
-}
-
-// FingerprintSHA256 returns the user presentation of the key's
-// fingerprint as unpadded base64 encoded sha256 hash.
-// This format was introduced from OpenSSH 6.8.
-// https://www.openssh.com/txt/release-6.8
-// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding)
-func FingerprintSHA256(pubKey PublicKey) string {
- sha256sum := sha256.Sum256(pubKey.Marshal())
- hash := base64.RawStdEncoding.EncodeToString(sha256sum[:])
- return "SHA256:" + hash
-}
diff --git a/vendor/golang.org/x/crypto/ssh/keys_test.go b/vendor/golang.org/x/crypto/ssh/keys_test.go
deleted file mode 100644
index a65e87e..0000000
--- a/vendor/golang.org/x/crypto/ssh/keys_test.go
+++ /dev/null
@@ -1,474 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/rsa"
- "encoding/base64"
- "fmt"
- "reflect"
- "strings"
- "testing"
-
- "golang.org/x/crypto/ed25519"
- "golang.org/x/crypto/ssh/testdata"
-)
-
-func rawKey(pub PublicKey) interface{} {
- switch k := pub.(type) {
- case *rsaPublicKey:
- return (*rsa.PublicKey)(k)
- case *dsaPublicKey:
- return (*dsa.PublicKey)(k)
- case *ecdsaPublicKey:
- return (*ecdsa.PublicKey)(k)
- case ed25519PublicKey:
- return (ed25519.PublicKey)(k)
- case *Certificate:
- return k
- }
- panic("unknown key type")
-}
-
-func TestKeyMarshalParse(t *testing.T) {
- for _, priv := range testSigners {
- pub := priv.PublicKey()
- roundtrip, err := ParsePublicKey(pub.Marshal())
- if err != nil {
- t.Errorf("ParsePublicKey(%T): %v", pub, err)
- }
-
- k1 := rawKey(pub)
- k2 := rawKey(roundtrip)
-
- if !reflect.DeepEqual(k1, k2) {
- t.Errorf("got %#v in roundtrip, want %#v", k2, k1)
- }
- }
-}
-
-func TestUnsupportedCurves(t *testing.T) {
- raw, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
- if err != nil {
- t.Fatalf("GenerateKey: %v", err)
- }
-
- if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") {
- t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err)
- }
-
- if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") {
- t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err)
- }
-}
-
-func TestNewPublicKey(t *testing.T) {
- for _, k := range testSigners {
- raw := rawKey(k.PublicKey())
- // Skip certificates, as NewPublicKey does not support them.
- if _, ok := raw.(*Certificate); ok {
- continue
- }
- pub, err := NewPublicKey(raw)
- if err != nil {
- t.Errorf("NewPublicKey(%#v): %v", raw, err)
- }
- if !reflect.DeepEqual(k.PublicKey(), pub) {
- t.Errorf("NewPublicKey(%#v) = %#v, want %#v", raw, pub, k.PublicKey())
- }
- }
-}
-
-func TestKeySignVerify(t *testing.T) {
- for _, priv := range testSigners {
- pub := priv.PublicKey()
-
- data := []byte("sign me")
- sig, err := priv.Sign(rand.Reader, data)
- if err != nil {
- t.Fatalf("Sign(%T): %v", priv, err)
- }
-
- if err := pub.Verify(data, sig); err != nil {
- t.Errorf("publicKey.Verify(%T): %v", priv, err)
- }
- sig.Blob[5]++
- if err := pub.Verify(data, sig); err == nil {
- t.Errorf("publicKey.Verify on broken sig did not fail")
- }
- }
-}
-
-func TestParseRSAPrivateKey(t *testing.T) {
- key := testPrivateKeys["rsa"]
-
- rsa, ok := key.(*rsa.PrivateKey)
- if !ok {
- t.Fatalf("got %T, want *rsa.PrivateKey", rsa)
- }
-
- if err := rsa.Validate(); err != nil {
- t.Errorf("Validate: %v", err)
- }
-}
-
-func TestParseECPrivateKey(t *testing.T) {
- key := testPrivateKeys["ecdsa"]
-
- ecKey, ok := key.(*ecdsa.PrivateKey)
- if !ok {
- t.Fatalf("got %T, want *ecdsa.PrivateKey", ecKey)
- }
-
- if !validateECPublicKey(ecKey.Curve, ecKey.X, ecKey.Y) {
- t.Fatalf("public key does not validate.")
- }
-}
-
-// See Issue https://github.com/golang/go/issues/6650.
-func TestParseEncryptedPrivateKeysFails(t *testing.T) {
- const wantSubstring = "encrypted"
- for i, tt := range testdata.PEMEncryptedKeys {
- _, err := ParsePrivateKey(tt.PEMBytes)
- if err == nil {
- t.Errorf("#%d key %s: ParsePrivateKey successfully parsed, expected an error", i, tt.Name)
- continue
- }
-
- if !strings.Contains(err.Error(), wantSubstring) {
- t.Errorf("#%d key %s: got error %q, want substring %q", i, tt.Name, err, wantSubstring)
- }
- }
-}
-
-func TestParseDSA(t *testing.T) {
- // We actually exercise the ParsePrivateKey codepath here, as opposed to
- // using the ParseRawPrivateKey+NewSignerFromKey path that testdata_test.go
- // uses.
- s, err := ParsePrivateKey(testdata.PEMBytes["dsa"])
- if err != nil {
- t.Fatalf("ParsePrivateKey returned error: %s", err)
- }
-
- data := []byte("sign me")
- sig, err := s.Sign(rand.Reader, data)
- if err != nil {
- t.Fatalf("dsa.Sign: %v", err)
- }
-
- if err := s.PublicKey().Verify(data, sig); err != nil {
- t.Errorf("Verify failed: %v", err)
- }
-}
-
-// Tests for authorized_keys parsing.
-
-// getTestKey returns a public key, and its base64 encoding.
-func getTestKey() (PublicKey, string) {
- k := testPublicKeys["rsa"]
-
- b := &bytes.Buffer{}
- e := base64.NewEncoder(base64.StdEncoding, b)
- e.Write(k.Marshal())
- e.Close()
-
- return k, b.String()
-}
-
-func TestMarshalParsePublicKey(t *testing.T) {
- pub, pubSerialized := getTestKey()
- line := fmt.Sprintf("%s %s user@host", pub.Type(), pubSerialized)
-
- authKeys := MarshalAuthorizedKey(pub)
- actualFields := strings.Fields(string(authKeys))
- if len(actualFields) == 0 {
- t.Fatalf("failed authKeys: %v", authKeys)
- }
-
- // drop the comment
- expectedFields := strings.Fields(line)[0:2]
-
- if !reflect.DeepEqual(actualFields, expectedFields) {
- t.Errorf("got %v, expected %v", actualFields, expectedFields)
- }
-
- actPub, _, _, _, err := ParseAuthorizedKey([]byte(line))
- if err != nil {
- t.Fatalf("cannot parse %v: %v", line, err)
- }
- if !reflect.DeepEqual(actPub, pub) {
- t.Errorf("got %v, expected %v", actPub, pub)
- }
-}
-
-type authResult struct {
- pubKey PublicKey
- options []string
- comments string
- rest string
- ok bool
-}
-
-func testAuthorizedKeys(t *testing.T, authKeys []byte, expected []authResult) {
- rest := authKeys
- var values []authResult
- for len(rest) > 0 {
- var r authResult
- var err error
- r.pubKey, r.comments, r.options, rest, err = ParseAuthorizedKey(rest)
- r.ok = (err == nil)
- t.Log(err)
- r.rest = string(rest)
- values = append(values, r)
- }
-
- if !reflect.DeepEqual(values, expected) {
- t.Errorf("got %#v, expected %#v", values, expected)
- }
-}
-
-func TestAuthorizedKeyBasic(t *testing.T) {
- pub, pubSerialized := getTestKey()
- line := "ssh-rsa " + pubSerialized + " user@host"
- testAuthorizedKeys(t, []byte(line),
- []authResult{
- {pub, nil, "user@host", "", true},
- })
-}
-
-func TestAuth(t *testing.T) {
- pub, pubSerialized := getTestKey()
- authWithOptions := []string{
- `# comments to ignore before any keys...`,
- ``,
- `env="HOME=/home/root",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`,
- `# comments to ignore, along with a blank line`,
- ``,
- `env="HOME=/home/root2" ssh-rsa ` + pubSerialized + ` user2@host2`,
- ``,
- `# more comments, plus a invalid entry`,
- `ssh-rsa data-that-will-not-parse user@host3`,
- }
- for _, eol := range []string{"\n", "\r\n"} {
- authOptions := strings.Join(authWithOptions, eol)
- rest2 := strings.Join(authWithOptions[3:], eol)
- rest3 := strings.Join(authWithOptions[6:], eol)
- testAuthorizedKeys(t, []byte(authOptions), []authResult{
- {pub, []string{`env="HOME=/home/root"`, "no-port-forwarding"}, "user@host", rest2, true},
- {pub, []string{`env="HOME=/home/root2"`}, "user2@host2", rest3, true},
- {nil, nil, "", "", false},
- })
- }
-}
-
-func TestAuthWithQuotedSpaceInEnv(t *testing.T) {
- pub, pubSerialized := getTestKey()
- authWithQuotedSpaceInEnv := []byte(`env="HOME=/home/root dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`)
- testAuthorizedKeys(t, []byte(authWithQuotedSpaceInEnv), []authResult{
- {pub, []string{`env="HOME=/home/root dir"`, "no-port-forwarding"}, "user@host", "", true},
- })
-}
-
-func TestAuthWithQuotedCommaInEnv(t *testing.T) {
- pub, pubSerialized := getTestKey()
- authWithQuotedCommaInEnv := []byte(`env="HOME=/home/root,dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`)
- testAuthorizedKeys(t, []byte(authWithQuotedCommaInEnv), []authResult{
- {pub, []string{`env="HOME=/home/root,dir"`, "no-port-forwarding"}, "user@host", "", true},
- })
-}
-
-func TestAuthWithQuotedQuoteInEnv(t *testing.T) {
- pub, pubSerialized := getTestKey()
- authWithQuotedQuoteInEnv := []byte(`env="HOME=/home/\"root dir",no-port-forwarding` + "\t" + `ssh-rsa` + "\t" + pubSerialized + ` user@host`)
- authWithDoubleQuotedQuote := []byte(`no-port-forwarding,env="HOME=/home/ \"root dir\"" ssh-rsa ` + pubSerialized + "\t" + `user@host`)
- testAuthorizedKeys(t, []byte(authWithQuotedQuoteInEnv), []authResult{
- {pub, []string{`env="HOME=/home/\"root dir"`, "no-port-forwarding"}, "user@host", "", true},
- })
-
- testAuthorizedKeys(t, []byte(authWithDoubleQuotedQuote), []authResult{
- {pub, []string{"no-port-forwarding", `env="HOME=/home/ \"root dir\""`}, "user@host", "", true},
- })
-}
-
-func TestAuthWithInvalidSpace(t *testing.T) {
- _, pubSerialized := getTestKey()
- authWithInvalidSpace := []byte(`env="HOME=/home/root dir", no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host
-#more to follow but still no valid keys`)
- testAuthorizedKeys(t, []byte(authWithInvalidSpace), []authResult{
- {nil, nil, "", "", false},
- })
-}
-
-func TestAuthWithMissingQuote(t *testing.T) {
- pub, pubSerialized := getTestKey()
- authWithMissingQuote := []byte(`env="HOME=/home/root,no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host
-env="HOME=/home/root",shared-control ssh-rsa ` + pubSerialized + ` user@host`)
-
- testAuthorizedKeys(t, []byte(authWithMissingQuote), []authResult{
- {pub, []string{`env="HOME=/home/root"`, `shared-control`}, "user@host", "", true},
- })
-}
-
-func TestInvalidEntry(t *testing.T) {
- authInvalid := []byte(`ssh-rsa`)
- _, _, _, _, err := ParseAuthorizedKey(authInvalid)
- if err == nil {
- t.Errorf("got valid entry for %q", authInvalid)
- }
-}
-
-var knownHostsParseTests = []struct {
- input string
- err string
-
- marker string
- comment string
- hosts []string
- rest string
-}{
- {
- "",
- "EOF",
-
- "", "", nil, "",
- },
- {
- "# Just a comment",
- "EOF",
-
- "", "", nil, "",
- },
- {
- " \t ",
- "EOF",
-
- "", "", nil, "",
- },
- {
- "localhost ssh-rsa {RSAPUB}",
- "",
-
- "", "", []string{"localhost"}, "",
- },
- {
- "localhost\tssh-rsa {RSAPUB}",
- "",
-
- "", "", []string{"localhost"}, "",
- },
- {
- "localhost\tssh-rsa {RSAPUB}\tcomment comment",
- "",
-
- "", "comment comment", []string{"localhost"}, "",
- },
- {
- "localhost\tssh-rsa {RSAPUB}\tcomment comment\n",
- "",
-
- "", "comment comment", []string{"localhost"}, "",
- },
- {
- "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\n",
- "",
-
- "", "comment comment", []string{"localhost"}, "",
- },
- {
- "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\nnext line",
- "",
-
- "", "comment comment", []string{"localhost"}, "next line",
- },
- {
- "localhost,[host2:123]\tssh-rsa {RSAPUB}\tcomment comment",
- "",
-
- "", "comment comment", []string{"localhost", "[host2:123]"}, "",
- },
- {
- "@marker \tlocalhost,[host2:123]\tssh-rsa {RSAPUB}",
- "",
-
- "marker", "", []string{"localhost", "[host2:123]"}, "",
- },
- {
- "@marker \tlocalhost,[host2:123]\tssh-rsa aabbccdd",
- "short read",
-
- "", "", nil, "",
- },
-}
-
-func TestKnownHostsParsing(t *testing.T) {
- rsaPub, rsaPubSerialized := getTestKey()
-
- for i, test := range knownHostsParseTests {
- var expectedKey PublicKey
- const rsaKeyToken = "{RSAPUB}"
-
- input := test.input
- if strings.Contains(input, rsaKeyToken) {
- expectedKey = rsaPub
- input = strings.Replace(test.input, rsaKeyToken, rsaPubSerialized, -1)
- }
-
- marker, hosts, pubKey, comment, rest, err := ParseKnownHosts([]byte(input))
- if err != nil {
- if len(test.err) == 0 {
- t.Errorf("#%d: unexpectedly failed with %q", i, err)
- } else if !strings.Contains(err.Error(), test.err) {
- t.Errorf("#%d: expected error containing %q, but got %q", i, test.err, err)
- }
- continue
- } else if len(test.err) != 0 {
- t.Errorf("#%d: succeeded but expected error including %q", i, test.err)
- continue
- }
-
- if !reflect.DeepEqual(expectedKey, pubKey) {
- t.Errorf("#%d: expected key %#v, but got %#v", i, expectedKey, pubKey)
- }
-
- if marker != test.marker {
- t.Errorf("#%d: expected marker %q, but got %q", i, test.marker, marker)
- }
-
- if comment != test.comment {
- t.Errorf("#%d: expected comment %q, but got %q", i, test.comment, comment)
- }
-
- if !reflect.DeepEqual(test.hosts, hosts) {
- t.Errorf("#%d: expected hosts %#v, but got %#v", i, test.hosts, hosts)
- }
-
- if rest := string(rest); rest != test.rest {
- t.Errorf("#%d: expected remaining input to be %q, but got %q", i, test.rest, rest)
- }
- }
-}
-
-func TestFingerprintLegacyMD5(t *testing.T) {
- pub, _ := getTestKey()
- fingerprint := FingerprintLegacyMD5(pub)
- want := "fb:61:6d:1a:e3:f0:95:45:3c:a0:79:be:4a:93:63:66" // ssh-keygen -lf -E md5 rsa
- if fingerprint != want {
- t.Errorf("got fingerprint %q want %q", fingerprint, want)
- }
-}
-
-func TestFingerprintSHA256(t *testing.T) {
- pub, _ := getTestKey()
- fingerprint := FingerprintSHA256(pub)
- want := "SHA256:Anr3LjZK8YVpjrxu79myrW9Hrb/wpcMNpVvTq/RcBm8" // ssh-keygen -lf rsa
- if fingerprint != want {
- t.Errorf("got fingerprint %q want %q", fingerprint, want)
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go
deleted file mode 100644
index 07744ad..0000000
--- a/vendor/golang.org/x/crypto/ssh/mac.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-// Message authentication support
-
-import (
- "crypto/hmac"
- "crypto/sha1"
- "crypto/sha256"
- "hash"
-)
-
-type macMode struct {
- keySize int
- new func(key []byte) hash.Hash
-}
-
-// truncatingMAC wraps around a hash.Hash and truncates the output digest to
-// a given size.
-type truncatingMAC struct {
- length int
- hmac hash.Hash
-}
-
-func (t truncatingMAC) Write(data []byte) (int, error) {
- return t.hmac.Write(data)
-}
-
-func (t truncatingMAC) Sum(in []byte) []byte {
- out := t.hmac.Sum(in)
- return out[:len(in)+t.length]
-}
-
-func (t truncatingMAC) Reset() {
- t.hmac.Reset()
-}
-
-func (t truncatingMAC) Size() int {
- return t.length
-}
-
-func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
-
-var macModes = map[string]*macMode{
- "hmac-sha2-256": {32, func(key []byte) hash.Hash {
- return hmac.New(sha256.New, key)
- }},
- "hmac-sha1": {20, func(key []byte) hash.Hash {
- return hmac.New(sha1.New, key)
- }},
- "hmac-sha1-96": {20, func(key []byte) hash.Hash {
- return truncatingMAC{12, hmac.New(sha1.New, key)}
- }},
-}
diff --git a/vendor/golang.org/x/crypto/ssh/mempipe_test.go b/vendor/golang.org/x/crypto/ssh/mempipe_test.go
deleted file mode 100644
index 8697cd6..0000000
--- a/vendor/golang.org/x/crypto/ssh/mempipe_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "io"
- "sync"
- "testing"
-)
-
-// An in-memory packetConn. It is safe to call Close and writePacket
-// from different goroutines.
-type memTransport struct {
- eof bool
- pending [][]byte
- write *memTransport
- sync.Mutex
- *sync.Cond
-}
-
-func (t *memTransport) readPacket() ([]byte, error) {
- t.Lock()
- defer t.Unlock()
- for {
- if len(t.pending) > 0 {
- r := t.pending[0]
- t.pending = t.pending[1:]
- return r, nil
- }
- if t.eof {
- return nil, io.EOF
- }
- t.Cond.Wait()
- }
-}
-
-func (t *memTransport) closeSelf() error {
- t.Lock()
- defer t.Unlock()
- if t.eof {
- return io.EOF
- }
- t.eof = true
- t.Cond.Broadcast()
- return nil
-}
-
-func (t *memTransport) Close() error {
- err := t.write.closeSelf()
- t.closeSelf()
- return err
-}
-
-func (t *memTransport) writePacket(p []byte) error {
- t.write.Lock()
- defer t.write.Unlock()
- if t.write.eof {
- return io.EOF
- }
- c := make([]byte, len(p))
- copy(c, p)
- t.write.pending = append(t.write.pending, c)
- t.write.Cond.Signal()
- return nil
-}
-
-func memPipe() (a, b packetConn) {
- t1 := memTransport{}
- t2 := memTransport{}
- t1.write = &t2
- t2.write = &t1
- t1.Cond = sync.NewCond(&t1.Mutex)
- t2.Cond = sync.NewCond(&t2.Mutex)
- return &t1, &t2
-}
-
-func TestMemPipe(t *testing.T) {
- a, b := memPipe()
- if err := a.writePacket([]byte{42}); err != nil {
- t.Fatalf("writePacket: %v", err)
- }
- if err := a.Close(); err != nil {
- t.Fatal("Close: ", err)
- }
- p, err := b.readPacket()
- if err != nil {
- t.Fatal("readPacket: ", err)
- }
- if len(p) != 1 || p[0] != 42 {
- t.Fatalf("got %v, want {42}", p)
- }
- p, err = b.readPacket()
- if err != io.EOF {
- t.Fatalf("got %v, %v, want EOF", p, err)
- }
-}
-
-func TestDoubleClose(t *testing.T) {
- a, _ := memPipe()
- err := a.Close()
- if err != nil {
- t.Errorf("Close: %v", err)
- }
- err = a.Close()
- if err != io.EOF {
- t.Errorf("expect EOF on double close.")
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go
deleted file mode 100644
index e6ecd3a..0000000
--- a/vendor/golang.org/x/crypto/ssh/messages.go
+++ /dev/null
@@ -1,758 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math/big"
- "reflect"
- "strconv"
- "strings"
-)
-
-// These are SSH message type numbers. They are scattered around several
-// documents but many were taken from [SSH-PARAMETERS].
-const (
- msgIgnore = 2
- msgUnimplemented = 3
- msgDebug = 4
- msgNewKeys = 21
-
- // Standard authentication messages
- msgUserAuthSuccess = 52
- msgUserAuthBanner = 53
-)
-
-// SSH messages:
-//
-// These structures mirror the wire format of the corresponding SSH messages.
-// They are marshaled using reflection with the marshal and unmarshal functions
-// in this file. The only wrinkle is that a final member of type []byte with a
-// ssh tag of "rest" receives the remainder of a packet when unmarshaling.
-
-// See RFC 4253, section 11.1.
-const msgDisconnect = 1
-
-// disconnectMsg is the message that signals a disconnect. It is also
-// the error type returned from mux.Wait()
-type disconnectMsg struct {
- Reason uint32 `sshtype:"1"`
- Message string
- Language string
-}
-
-func (d *disconnectMsg) Error() string {
- return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message)
-}
-
-// See RFC 4253, section 7.1.
-const msgKexInit = 20
-
-type kexInitMsg struct {
- Cookie [16]byte `sshtype:"20"`
- KexAlgos []string
- ServerHostKeyAlgos []string
- CiphersClientServer []string
- CiphersServerClient []string
- MACsClientServer []string
- MACsServerClient []string
- CompressionClientServer []string
- CompressionServerClient []string
- LanguagesClientServer []string
- LanguagesServerClient []string
- FirstKexFollows bool
- Reserved uint32
-}
-
-// See RFC 4253, section 8.
-
-// Diffie-Helman
-const msgKexDHInit = 30
-
-type kexDHInitMsg struct {
- X *big.Int `sshtype:"30"`
-}
-
-const msgKexECDHInit = 30
-
-type kexECDHInitMsg struct {
- ClientPubKey []byte `sshtype:"30"`
-}
-
-const msgKexECDHReply = 31
-
-type kexECDHReplyMsg struct {
- HostKey []byte `sshtype:"31"`
- EphemeralPubKey []byte
- Signature []byte
-}
-
-const msgKexDHReply = 31
-
-type kexDHReplyMsg struct {
- HostKey []byte `sshtype:"31"`
- Y *big.Int
- Signature []byte
-}
-
-// See RFC 4253, section 10.
-const msgServiceRequest = 5
-
-type serviceRequestMsg struct {
- Service string `sshtype:"5"`
-}
-
-// See RFC 4253, section 10.
-const msgServiceAccept = 6
-
-type serviceAcceptMsg struct {
- Service string `sshtype:"6"`
-}
-
-// See RFC 4252, section 5.
-const msgUserAuthRequest = 50
-
-type userAuthRequestMsg struct {
- User string `sshtype:"50"`
- Service string
- Method string
- Payload []byte `ssh:"rest"`
-}
-
-// Used for debug printouts of packets.
-type userAuthSuccessMsg struct {
-}
-
-// See RFC 4252, section 5.1
-const msgUserAuthFailure = 51
-
-type userAuthFailureMsg struct {
- Methods []string `sshtype:"51"`
- PartialSuccess bool
-}
-
-// See RFC 4256, section 3.2
-const msgUserAuthInfoRequest = 60
-const msgUserAuthInfoResponse = 61
-
-type userAuthInfoRequestMsg struct {
- User string `sshtype:"60"`
- Instruction string
- DeprecatedLanguage string
- NumPrompts uint32
- Prompts []byte `ssh:"rest"`
-}
-
-// See RFC 4254, section 5.1.
-const msgChannelOpen = 90
-
-type channelOpenMsg struct {
- ChanType string `sshtype:"90"`
- PeersId uint32
- PeersWindow uint32
- MaxPacketSize uint32
- TypeSpecificData []byte `ssh:"rest"`
-}
-
-const msgChannelExtendedData = 95
-const msgChannelData = 94
-
-// Used for debug print outs of packets.
-type channelDataMsg struct {
- PeersId uint32 `sshtype:"94"`
- Length uint32
- Rest []byte `ssh:"rest"`
-}
-
-// See RFC 4254, section 5.1.
-const msgChannelOpenConfirm = 91
-
-type channelOpenConfirmMsg struct {
- PeersId uint32 `sshtype:"91"`
- MyId uint32
- MyWindow uint32
- MaxPacketSize uint32
- TypeSpecificData []byte `ssh:"rest"`
-}
-
-// See RFC 4254, section 5.1.
-const msgChannelOpenFailure = 92
-
-type channelOpenFailureMsg struct {
- PeersId uint32 `sshtype:"92"`
- Reason RejectionReason
- Message string
- Language string
-}
-
-const msgChannelRequest = 98
-
-type channelRequestMsg struct {
- PeersId uint32 `sshtype:"98"`
- Request string
- WantReply bool
- RequestSpecificData []byte `ssh:"rest"`
-}
-
-// See RFC 4254, section 5.4.
-const msgChannelSuccess = 99
-
-type channelRequestSuccessMsg struct {
- PeersId uint32 `sshtype:"99"`
-}
-
-// See RFC 4254, section 5.4.
-const msgChannelFailure = 100
-
-type channelRequestFailureMsg struct {
- PeersId uint32 `sshtype:"100"`
-}
-
-// See RFC 4254, section 5.3
-const msgChannelClose = 97
-
-type channelCloseMsg struct {
- PeersId uint32 `sshtype:"97"`
-}
-
-// See RFC 4254, section 5.3
-const msgChannelEOF = 96
-
-type channelEOFMsg struct {
- PeersId uint32 `sshtype:"96"`
-}
-
-// See RFC 4254, section 4
-const msgGlobalRequest = 80
-
-type globalRequestMsg struct {
- Type string `sshtype:"80"`
- WantReply bool
- Data []byte `ssh:"rest"`
-}
-
-// See RFC 4254, section 4
-const msgRequestSuccess = 81
-
-type globalRequestSuccessMsg struct {
- Data []byte `ssh:"rest" sshtype:"81"`
-}
-
-// See RFC 4254, section 4
-const msgRequestFailure = 82
-
-type globalRequestFailureMsg struct {
- Data []byte `ssh:"rest" sshtype:"82"`
-}
-
-// See RFC 4254, section 5.2
-const msgChannelWindowAdjust = 93
-
-type windowAdjustMsg struct {
- PeersId uint32 `sshtype:"93"`
- AdditionalBytes uint32
-}
-
-// See RFC 4252, section 7
-const msgUserAuthPubKeyOk = 60
-
-type userAuthPubKeyOkMsg struct {
- Algo string `sshtype:"60"`
- PubKey []byte
-}
-
-// typeTags returns the possible type bytes for the given reflect.Type, which
-// should be a struct. The possible values are separated by a '|' character.
-func typeTags(structType reflect.Type) (tags []byte) {
- tagStr := structType.Field(0).Tag.Get("sshtype")
-
- for _, tag := range strings.Split(tagStr, "|") {
- i, err := strconv.Atoi(tag)
- if err == nil {
- tags = append(tags, byte(i))
- }
- }
-
- return tags
-}
-
-func fieldError(t reflect.Type, field int, problem string) error {
- if problem != "" {
- problem = ": " + problem
- }
- return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem)
-}
-
-var errShortRead = errors.New("ssh: short read")
-
-// Unmarshal parses data in SSH wire format into a structure. The out
-// argument should be a pointer to struct. If the first member of the
-// struct has the "sshtype" tag set to a '|'-separated set of numbers
-// in decimal, the packet must start with one of those numbers. In
-// case of error, Unmarshal returns a ParseError or
-// UnexpectedMessageError.
-func Unmarshal(data []byte, out interface{}) error {
- v := reflect.ValueOf(out).Elem()
- structType := v.Type()
- expectedTypes := typeTags(structType)
-
- var expectedType byte
- if len(expectedTypes) > 0 {
- expectedType = expectedTypes[0]
- }
-
- if len(data) == 0 {
- return parseError(expectedType)
- }
-
- if len(expectedTypes) > 0 {
- goodType := false
- for _, e := range expectedTypes {
- if e > 0 && data[0] == e {
- goodType = true
- break
- }
- }
- if !goodType {
- return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes)
- }
- data = data[1:]
- }
-
- var ok bool
- for i := 0; i < v.NumField(); i++ {
- field := v.Field(i)
- t := field.Type()
- switch t.Kind() {
- case reflect.Bool:
- if len(data) < 1 {
- return errShortRead
- }
- field.SetBool(data[0] != 0)
- data = data[1:]
- case reflect.Array:
- if t.Elem().Kind() != reflect.Uint8 {
- return fieldError(structType, i, "array of unsupported type")
- }
- if len(data) < t.Len() {
- return errShortRead
- }
- for j, n := 0, t.Len(); j < n; j++ {
- field.Index(j).Set(reflect.ValueOf(data[j]))
- }
- data = data[t.Len():]
- case reflect.Uint64:
- var u64 uint64
- if u64, data, ok = parseUint64(data); !ok {
- return errShortRead
- }
- field.SetUint(u64)
- case reflect.Uint32:
- var u32 uint32
- if u32, data, ok = parseUint32(data); !ok {
- return errShortRead
- }
- field.SetUint(uint64(u32))
- case reflect.Uint8:
- if len(data) < 1 {
- return errShortRead
- }
- field.SetUint(uint64(data[0]))
- data = data[1:]
- case reflect.String:
- var s []byte
- if s, data, ok = parseString(data); !ok {
- return fieldError(structType, i, "")
- }
- field.SetString(string(s))
- case reflect.Slice:
- switch t.Elem().Kind() {
- case reflect.Uint8:
- if structType.Field(i).Tag.Get("ssh") == "rest" {
- field.Set(reflect.ValueOf(data))
- data = nil
- } else {
- var s []byte
- if s, data, ok = parseString(data); !ok {
- return errShortRead
- }
- field.Set(reflect.ValueOf(s))
- }
- case reflect.String:
- var nl []string
- if nl, data, ok = parseNameList(data); !ok {
- return errShortRead
- }
- field.Set(reflect.ValueOf(nl))
- default:
- return fieldError(structType, i, "slice of unsupported type")
- }
- case reflect.Ptr:
- if t == bigIntType {
- var n *big.Int
- if n, data, ok = parseInt(data); !ok {
- return errShortRead
- }
- field.Set(reflect.ValueOf(n))
- } else {
- return fieldError(structType, i, "pointer to unsupported type")
- }
- default:
- return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t))
- }
- }
-
- if len(data) != 0 {
- return parseError(expectedType)
- }
-
- return nil
-}
-
-// Marshal serializes the message in msg to SSH wire format. The msg
-// argument should be a struct or pointer to struct. If the first
-// member has the "sshtype" tag set to a number in decimal, that
-// number is prepended to the result. If the last of member has the
-// "ssh" tag set to "rest", its contents are appended to the output.
-func Marshal(msg interface{}) []byte {
- out := make([]byte, 0, 64)
- return marshalStruct(out, msg)
-}
-
-func marshalStruct(out []byte, msg interface{}) []byte {
- v := reflect.Indirect(reflect.ValueOf(msg))
- msgTypes := typeTags(v.Type())
- if len(msgTypes) > 0 {
- out = append(out, msgTypes[0])
- }
-
- for i, n := 0, v.NumField(); i < n; i++ {
- field := v.Field(i)
- switch t := field.Type(); t.Kind() {
- case reflect.Bool:
- var v uint8
- if field.Bool() {
- v = 1
- }
- out = append(out, v)
- case reflect.Array:
- if t.Elem().Kind() != reflect.Uint8 {
- panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface()))
- }
- for j, l := 0, t.Len(); j < l; j++ {
- out = append(out, uint8(field.Index(j).Uint()))
- }
- case reflect.Uint32:
- out = appendU32(out, uint32(field.Uint()))
- case reflect.Uint64:
- out = appendU64(out, uint64(field.Uint()))
- case reflect.Uint8:
- out = append(out, uint8(field.Uint()))
- case reflect.String:
- s := field.String()
- out = appendInt(out, len(s))
- out = append(out, s...)
- case reflect.Slice:
- switch t.Elem().Kind() {
- case reflect.Uint8:
- if v.Type().Field(i).Tag.Get("ssh") != "rest" {
- out = appendInt(out, field.Len())
- }
- out = append(out, field.Bytes()...)
- case reflect.String:
- offset := len(out)
- out = appendU32(out, 0)
- if n := field.Len(); n > 0 {
- for j := 0; j < n; j++ {
- f := field.Index(j)
- if j != 0 {
- out = append(out, ',')
- }
- out = append(out, f.String()...)
- }
- // overwrite length value
- binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4))
- }
- default:
- panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface()))
- }
- case reflect.Ptr:
- if t == bigIntType {
- var n *big.Int
- nValue := reflect.ValueOf(&n)
- nValue.Elem().Set(field)
- needed := intLength(n)
- oldLength := len(out)
-
- if cap(out)-len(out) < needed {
- newOut := make([]byte, len(out), 2*(len(out)+needed))
- copy(newOut, out)
- out = newOut
- }
- out = out[:oldLength+needed]
- marshalInt(out[oldLength:], n)
- } else {
- panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface()))
- }
- }
- }
-
- return out
-}
-
-var bigOne = big.NewInt(1)
-
-func parseString(in []byte) (out, rest []byte, ok bool) {
- if len(in) < 4 {
- return
- }
- length := binary.BigEndian.Uint32(in)
- in = in[4:]
- if uint32(len(in)) < length {
- return
- }
- out = in[:length]
- rest = in[length:]
- ok = true
- return
-}
-
-var (
- comma = []byte{','}
- emptyNameList = []string{}
-)
-
-func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
- contents, rest, ok := parseString(in)
- if !ok {
- return
- }
- if len(contents) == 0 {
- out = emptyNameList
- return
- }
- parts := bytes.Split(contents, comma)
- out = make([]string, len(parts))
- for i, part := range parts {
- out[i] = string(part)
- }
- return
-}
-
-func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
- contents, rest, ok := parseString(in)
- if !ok {
- return
- }
- out = new(big.Int)
-
- if len(contents) > 0 && contents[0]&0x80 == 0x80 {
- // This is a negative number
- notBytes := make([]byte, len(contents))
- for i := range notBytes {
- notBytes[i] = ^contents[i]
- }
- out.SetBytes(notBytes)
- out.Add(out, bigOne)
- out.Neg(out)
- } else {
- // Positive number
- out.SetBytes(contents)
- }
- ok = true
- return
-}
-
-func parseUint32(in []byte) (uint32, []byte, bool) {
- if len(in) < 4 {
- return 0, nil, false
- }
- return binary.BigEndian.Uint32(in), in[4:], true
-}
-
-func parseUint64(in []byte) (uint64, []byte, bool) {
- if len(in) < 8 {
- return 0, nil, false
- }
- return binary.BigEndian.Uint64(in), in[8:], true
-}
-
-func intLength(n *big.Int) int {
- length := 4 /* length bytes */
- if n.Sign() < 0 {
- nMinus1 := new(big.Int).Neg(n)
- nMinus1.Sub(nMinus1, bigOne)
- bitLen := nMinus1.BitLen()
- if bitLen%8 == 0 {
- // The number will need 0xff padding
- length++
- }
- length += (bitLen + 7) / 8
- } else if n.Sign() == 0 {
- // A zero is the zero length string
- } else {
- bitLen := n.BitLen()
- if bitLen%8 == 0 {
- // The number will need 0x00 padding
- length++
- }
- length += (bitLen + 7) / 8
- }
-
- return length
-}
-
-func marshalUint32(to []byte, n uint32) []byte {
- binary.BigEndian.PutUint32(to, n)
- return to[4:]
-}
-
-func marshalUint64(to []byte, n uint64) []byte {
- binary.BigEndian.PutUint64(to, n)
- return to[8:]
-}
-
-func marshalInt(to []byte, n *big.Int) []byte {
- lengthBytes := to
- to = to[4:]
- length := 0
-
- if n.Sign() < 0 {
- // A negative number has to be converted to two's-complement
- // form. So we'll subtract 1 and invert. If the
- // most-significant-bit isn't set then we'll need to pad the
- // beginning with 0xff in order to keep the number negative.
- nMinus1 := new(big.Int).Neg(n)
- nMinus1.Sub(nMinus1, bigOne)
- bytes := nMinus1.Bytes()
- for i := range bytes {
- bytes[i] ^= 0xff
- }
- if len(bytes) == 0 || bytes[0]&0x80 == 0 {
- to[0] = 0xff
- to = to[1:]
- length++
- }
- nBytes := copy(to, bytes)
- to = to[nBytes:]
- length += nBytes
- } else if n.Sign() == 0 {
- // A zero is the zero length string
- } else {
- bytes := n.Bytes()
- if len(bytes) > 0 && bytes[0]&0x80 != 0 {
- // We'll have to pad this with a 0x00 in order to
- // stop it looking like a negative number.
- to[0] = 0
- to = to[1:]
- length++
- }
- nBytes := copy(to, bytes)
- to = to[nBytes:]
- length += nBytes
- }
-
- lengthBytes[0] = byte(length >> 24)
- lengthBytes[1] = byte(length >> 16)
- lengthBytes[2] = byte(length >> 8)
- lengthBytes[3] = byte(length)
- return to
-}
-
-func writeInt(w io.Writer, n *big.Int) {
- length := intLength(n)
- buf := make([]byte, length)
- marshalInt(buf, n)
- w.Write(buf)
-}
-
-func writeString(w io.Writer, s []byte) {
- var lengthBytes [4]byte
- lengthBytes[0] = byte(len(s) >> 24)
- lengthBytes[1] = byte(len(s) >> 16)
- lengthBytes[2] = byte(len(s) >> 8)
- lengthBytes[3] = byte(len(s))
- w.Write(lengthBytes[:])
- w.Write(s)
-}
-
-func stringLength(n int) int {
- return 4 + n
-}
-
-func marshalString(to []byte, s []byte) []byte {
- to[0] = byte(len(s) >> 24)
- to[1] = byte(len(s) >> 16)
- to[2] = byte(len(s) >> 8)
- to[3] = byte(len(s))
- to = to[4:]
- copy(to, s)
- return to[len(s):]
-}
-
-var bigIntType = reflect.TypeOf((*big.Int)(nil))
-
-// Decode a packet into its corresponding message.
-func decode(packet []byte) (interface{}, error) {
- var msg interface{}
- switch packet[0] {
- case msgDisconnect:
- msg = new(disconnectMsg)
- case msgServiceRequest:
- msg = new(serviceRequestMsg)
- case msgServiceAccept:
- msg = new(serviceAcceptMsg)
- case msgKexInit:
- msg = new(kexInitMsg)
- case msgKexDHInit:
- msg = new(kexDHInitMsg)
- case msgKexDHReply:
- msg = new(kexDHReplyMsg)
- case msgUserAuthRequest:
- msg = new(userAuthRequestMsg)
- case msgUserAuthSuccess:
- return new(userAuthSuccessMsg), nil
- case msgUserAuthFailure:
- msg = new(userAuthFailureMsg)
- case msgUserAuthPubKeyOk:
- msg = new(userAuthPubKeyOkMsg)
- case msgGlobalRequest:
- msg = new(globalRequestMsg)
- case msgRequestSuccess:
- msg = new(globalRequestSuccessMsg)
- case msgRequestFailure:
- msg = new(globalRequestFailureMsg)
- case msgChannelOpen:
- msg = new(channelOpenMsg)
- case msgChannelData:
- msg = new(channelDataMsg)
- case msgChannelOpenConfirm:
- msg = new(channelOpenConfirmMsg)
- case msgChannelOpenFailure:
- msg = new(channelOpenFailureMsg)
- case msgChannelWindowAdjust:
- msg = new(windowAdjustMsg)
- case msgChannelEOF:
- msg = new(channelEOFMsg)
- case msgChannelClose:
- msg = new(channelCloseMsg)
- case msgChannelRequest:
- msg = new(channelRequestMsg)
- case msgChannelSuccess:
- msg = new(channelRequestSuccessMsg)
- case msgChannelFailure:
- msg = new(channelRequestFailureMsg)
- default:
- return nil, unexpectedMessageError(0, packet[0])
- }
- if err := Unmarshal(packet, msg); err != nil {
- return nil, err
- }
- return msg, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/messages_test.go b/vendor/golang.org/x/crypto/ssh/messages_test.go
deleted file mode 100644
index e790764..0000000
--- a/vendor/golang.org/x/crypto/ssh/messages_test.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "math/big"
- "math/rand"
- "reflect"
- "testing"
- "testing/quick"
-)
-
-var intLengthTests = []struct {
- val, length int
-}{
- {0, 4 + 0},
- {1, 4 + 1},
- {127, 4 + 1},
- {128, 4 + 2},
- {-1, 4 + 1},
-}
-
-func TestIntLength(t *testing.T) {
- for _, test := range intLengthTests {
- v := new(big.Int).SetInt64(int64(test.val))
- length := intLength(v)
- if length != test.length {
- t.Errorf("For %d, got length %d but expected %d", test.val, length, test.length)
- }
- }
-}
-
-type msgAllTypes struct {
- Bool bool `sshtype:"21"`
- Array [16]byte
- Uint64 uint64
- Uint32 uint32
- Uint8 uint8
- String string
- Strings []string
- Bytes []byte
- Int *big.Int
- Rest []byte `ssh:"rest"`
-}
-
-func (t *msgAllTypes) Generate(rand *rand.Rand, size int) reflect.Value {
- m := &msgAllTypes{}
- m.Bool = rand.Intn(2) == 1
- randomBytes(m.Array[:], rand)
- m.Uint64 = uint64(rand.Int63n(1<<63 - 1))
- m.Uint32 = uint32(rand.Intn((1 << 31) - 1))
- m.Uint8 = uint8(rand.Intn(1 << 8))
- m.String = string(m.Array[:])
- m.Strings = randomNameList(rand)
- m.Bytes = m.Array[:]
- m.Int = randomInt(rand)
- m.Rest = m.Array[:]
- return reflect.ValueOf(m)
-}
-
-func TestMarshalUnmarshal(t *testing.T) {
- rand := rand.New(rand.NewSource(0))
- iface := &msgAllTypes{}
- ty := reflect.ValueOf(iface).Type()
-
- n := 100
- if testing.Short() {
- n = 5
- }
- for j := 0; j < n; j++ {
- v, ok := quick.Value(ty, rand)
- if !ok {
- t.Errorf("failed to create value")
- break
- }
-
- m1 := v.Elem().Interface()
- m2 := iface
-
- marshaled := Marshal(m1)
- if err := Unmarshal(marshaled, m2); err != nil {
- t.Errorf("Unmarshal %#v: %s", m1, err)
- break
- }
-
- if !reflect.DeepEqual(v.Interface(), m2) {
- t.Errorf("got: %#v\nwant:%#v\n%x", m2, m1, marshaled)
- break
- }
- }
-}
-
-func TestUnmarshalEmptyPacket(t *testing.T) {
- var b []byte
- var m channelRequestSuccessMsg
- if err := Unmarshal(b, &m); err == nil {
- t.Fatalf("unmarshal of empty slice succeeded")
- }
-}
-
-func TestUnmarshalUnexpectedPacket(t *testing.T) {
- type S struct {
- I uint32 `sshtype:"43"`
- S string
- B bool
- }
-
- s := S{11, "hello", true}
- packet := Marshal(s)
- packet[0] = 42
- roundtrip := S{}
- err := Unmarshal(packet, &roundtrip)
- if err == nil {
- t.Fatal("expected error, not nil")
- }
-}
-
-func TestMarshalPtr(t *testing.T) {
- s := struct {
- S string
- }{"hello"}
-
- m1 := Marshal(s)
- m2 := Marshal(&s)
- if !bytes.Equal(m1, m2) {
- t.Errorf("got %q, want %q for marshaled pointer", m2, m1)
- }
-}
-
-func TestBareMarshalUnmarshal(t *testing.T) {
- type S struct {
- I uint32
- S string
- B bool
- }
-
- s := S{42, "hello", true}
- packet := Marshal(s)
- roundtrip := S{}
- Unmarshal(packet, &roundtrip)
-
- if !reflect.DeepEqual(s, roundtrip) {
- t.Errorf("got %#v, want %#v", roundtrip, s)
- }
-}
-
-func TestBareMarshal(t *testing.T) {
- type S2 struct {
- I uint32
- }
- s := S2{42}
- packet := Marshal(s)
- i, rest, ok := parseUint32(packet)
- if len(rest) > 0 || !ok {
- t.Errorf("parseInt(%q): parse error", packet)
- }
- if i != s.I {
- t.Errorf("got %d, want %d", i, s.I)
- }
-}
-
-func TestUnmarshalShortKexInitPacket(t *testing.T) {
- // This used to panic.
- // Issue 11348
- packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff}
- kim := &kexInitMsg{}
- if err := Unmarshal(packet, kim); err == nil {
- t.Error("truncated packet unmarshaled without error")
- }
-}
-
-func TestMarshalMultiTag(t *testing.T) {
- var res struct {
- A uint32 `sshtype:"1|2"`
- }
-
- good1 := struct {
- A uint32 `sshtype:"1"`
- }{
- 1,
- }
- good2 := struct {
- A uint32 `sshtype:"2"`
- }{
- 1,
- }
-
- if e := Unmarshal(Marshal(good1), &res); e != nil {
- t.Errorf("error unmarshaling multipart tag: %v", e)
- }
-
- if e := Unmarshal(Marshal(good2), &res); e != nil {
- t.Errorf("error unmarshaling multipart tag: %v", e)
- }
-
- bad1 := struct {
- A uint32 `sshtype:"3"`
- }{
- 1,
- }
- if e := Unmarshal(Marshal(bad1), &res); e == nil {
- t.Errorf("bad struct unmarshaled without error")
- }
-}
-
-func randomBytes(out []byte, rand *rand.Rand) {
- for i := 0; i < len(out); i++ {
- out[i] = byte(rand.Int31())
- }
-}
-
-func randomNameList(rand *rand.Rand) []string {
- ret := make([]string, rand.Int31()&15)
- for i := range ret {
- s := make([]byte, 1+(rand.Int31()&15))
- for j := range s {
- s[j] = 'a' + uint8(rand.Int31()&15)
- }
- ret[i] = string(s)
- }
- return ret
-}
-
-func randomInt(rand *rand.Rand) *big.Int {
- return new(big.Int).SetInt64(int64(int32(rand.Uint32())))
-}
-
-func (*kexInitMsg) Generate(rand *rand.Rand, size int) reflect.Value {
- ki := &kexInitMsg{}
- randomBytes(ki.Cookie[:], rand)
- ki.KexAlgos = randomNameList(rand)
- ki.ServerHostKeyAlgos = randomNameList(rand)
- ki.CiphersClientServer = randomNameList(rand)
- ki.CiphersServerClient = randomNameList(rand)
- ki.MACsClientServer = randomNameList(rand)
- ki.MACsServerClient = randomNameList(rand)
- ki.CompressionClientServer = randomNameList(rand)
- ki.CompressionServerClient = randomNameList(rand)
- ki.LanguagesClientServer = randomNameList(rand)
- ki.LanguagesServerClient = randomNameList(rand)
- if rand.Int31()&1 == 1 {
- ki.FirstKexFollows = true
- }
- return reflect.ValueOf(ki)
-}
-
-func (*kexDHInitMsg) Generate(rand *rand.Rand, size int) reflect.Value {
- dhi := &kexDHInitMsg{}
- dhi.X = randomInt(rand)
- return reflect.ValueOf(dhi)
-}
-
-var (
- _kexInitMsg = new(kexInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface()
- _kexDHInitMsg = new(kexDHInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface()
-
- _kexInit = Marshal(_kexInitMsg)
- _kexDHInit = Marshal(_kexDHInitMsg)
-)
-
-func BenchmarkMarshalKexInitMsg(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Marshal(_kexInitMsg)
- }
-}
-
-func BenchmarkUnmarshalKexInitMsg(b *testing.B) {
- m := new(kexInitMsg)
- for i := 0; i < b.N; i++ {
- Unmarshal(_kexInit, m)
- }
-}
-
-func BenchmarkMarshalKexDHInitMsg(b *testing.B) {
- for i := 0; i < b.N; i++ {
- Marshal(_kexDHInitMsg)
- }
-}
-
-func BenchmarkUnmarshalKexDHInitMsg(b *testing.B) {
- m := new(kexDHInitMsg)
- for i := 0; i < b.N; i++ {
- Unmarshal(_kexDHInit, m)
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go
deleted file mode 100644
index f3a3ddd..0000000
--- a/vendor/golang.org/x/crypto/ssh/mux.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "encoding/binary"
- "fmt"
- "io"
- "log"
- "sync"
- "sync/atomic"
-)
-
-// debugMux, if set, causes messages in the connection protocol to be
-// logged.
-const debugMux = false
-
-// chanList is a thread safe channel list.
-type chanList struct {
- // protects concurrent access to chans
- sync.Mutex
-
- // chans are indexed by the local id of the channel, which the
- // other side should send in the PeersId field.
- chans []*channel
-
- // This is a debugging aid: it offsets all IDs by this
- // amount. This helps distinguish otherwise identical
- // server/client muxes
- offset uint32
-}
-
-// Assigns a channel ID to the given channel.
-func (c *chanList) add(ch *channel) uint32 {
- c.Lock()
- defer c.Unlock()
- for i := range c.chans {
- if c.chans[i] == nil {
- c.chans[i] = ch
- return uint32(i) + c.offset
- }
- }
- c.chans = append(c.chans, ch)
- return uint32(len(c.chans)-1) + c.offset
-}
-
-// getChan returns the channel for the given ID.
-func (c *chanList) getChan(id uint32) *channel {
- id -= c.offset
-
- c.Lock()
- defer c.Unlock()
- if id < uint32(len(c.chans)) {
- return c.chans[id]
- }
- return nil
-}
-
-func (c *chanList) remove(id uint32) {
- id -= c.offset
- c.Lock()
- if id < uint32(len(c.chans)) {
- c.chans[id] = nil
- }
- c.Unlock()
-}
-
-// dropAll forgets all channels it knows, returning them in a slice.
-func (c *chanList) dropAll() []*channel {
- c.Lock()
- defer c.Unlock()
- var r []*channel
-
- for _, ch := range c.chans {
- if ch == nil {
- continue
- }
- r = append(r, ch)
- }
- c.chans = nil
- return r
-}
-
-// mux represents the state for the SSH connection protocol, which
-// multiplexes many channels onto a single packet transport.
-type mux struct {
- conn packetConn
- chanList chanList
-
- incomingChannels chan NewChannel
-
- globalSentMu sync.Mutex
- globalResponses chan interface{}
- incomingRequests chan *Request
-
- errCond *sync.Cond
- err error
-}
-
-// When debugging, each new chanList instantiation has a different
-// offset.
-var globalOff uint32
-
-func (m *mux) Wait() error {
- m.errCond.L.Lock()
- defer m.errCond.L.Unlock()
- for m.err == nil {
- m.errCond.Wait()
- }
- return m.err
-}
-
-// newMux returns a mux that runs over the given connection.
-func newMux(p packetConn) *mux {
- m := &mux{
- conn: p,
- incomingChannels: make(chan NewChannel, 16),
- globalResponses: make(chan interface{}, 1),
- incomingRequests: make(chan *Request, 16),
- errCond: newCond(),
- }
- if debugMux {
- m.chanList.offset = atomic.AddUint32(&globalOff, 1)
- }
-
- go m.loop()
- return m
-}
-
-func (m *mux) sendMessage(msg interface{}) error {
- p := Marshal(msg)
- if debugMux {
- log.Printf("send global(%d): %#v", m.chanList.offset, msg)
- }
- return m.conn.writePacket(p)
-}
-
-func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) {
- if wantReply {
- m.globalSentMu.Lock()
- defer m.globalSentMu.Unlock()
- }
-
- if err := m.sendMessage(globalRequestMsg{
- Type: name,
- WantReply: wantReply,
- Data: payload,
- }); err != nil {
- return false, nil, err
- }
-
- if !wantReply {
- return false, nil, nil
- }
-
- msg, ok := <-m.globalResponses
- if !ok {
- return false, nil, io.EOF
- }
- switch msg := msg.(type) {
- case *globalRequestFailureMsg:
- return false, msg.Data, nil
- case *globalRequestSuccessMsg:
- return true, msg.Data, nil
- default:
- return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg)
- }
-}
-
-// ackRequest must be called after processing a global request that
-// has WantReply set.
-func (m *mux) ackRequest(ok bool, data []byte) error {
- if ok {
- return m.sendMessage(globalRequestSuccessMsg{Data: data})
- }
- return m.sendMessage(globalRequestFailureMsg{Data: data})
-}
-
-func (m *mux) Close() error {
- return m.conn.Close()
-}
-
-// loop runs the connection machine. It will process packets until an
-// error is encountered. To synchronize on loop exit, use mux.Wait.
-func (m *mux) loop() {
- var err error
- for err == nil {
- err = m.onePacket()
- }
-
- for _, ch := range m.chanList.dropAll() {
- ch.close()
- }
-
- close(m.incomingChannels)
- close(m.incomingRequests)
- close(m.globalResponses)
-
- m.conn.Close()
-
- m.errCond.L.Lock()
- m.err = err
- m.errCond.Broadcast()
- m.errCond.L.Unlock()
-
- if debugMux {
- log.Println("loop exit", err)
- }
-}
-
-// onePacket reads and processes one packet.
-func (m *mux) onePacket() error {
- packet, err := m.conn.readPacket()
- if err != nil {
- return err
- }
-
- if debugMux {
- if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData {
- log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet))
- } else {
- p, _ := decode(packet)
- log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet))
- }
- }
-
- switch packet[0] {
- case msgChannelOpen:
- return m.handleChannelOpen(packet)
- case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
- return m.handleGlobalPacket(packet)
- }
-
- // assume a channel packet.
- if len(packet) < 5 {
- return parseError(packet[0])
- }
- id := binary.BigEndian.Uint32(packet[1:])
- ch := m.chanList.getChan(id)
- if ch == nil {
- return fmt.Errorf("ssh: invalid channel %d", id)
- }
-
- return ch.handlePacket(packet)
-}
-
-func (m *mux) handleGlobalPacket(packet []byte) error {
- msg, err := decode(packet)
- if err != nil {
- return err
- }
-
- switch msg := msg.(type) {
- case *globalRequestMsg:
- m.incomingRequests <- &Request{
- Type: msg.Type,
- WantReply: msg.WantReply,
- Payload: msg.Data,
- mux: m,
- }
- case *globalRequestSuccessMsg, *globalRequestFailureMsg:
- m.globalResponses <- msg
- default:
- panic(fmt.Sprintf("not a global message %#v", msg))
- }
-
- return nil
-}
-
-// handleChannelOpen schedules a channel to be Accept()ed.
-func (m *mux) handleChannelOpen(packet []byte) error {
- var msg channelOpenMsg
- if err := Unmarshal(packet, &msg); err != nil {
- return err
- }
-
- if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
- failMsg := channelOpenFailureMsg{
- PeersId: msg.PeersId,
- Reason: ConnectionFailed,
- Message: "invalid request",
- Language: "en_US.UTF-8",
- }
- return m.sendMessage(failMsg)
- }
-
- c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
- c.remoteId = msg.PeersId
- c.maxRemotePayload = msg.MaxPacketSize
- c.remoteWin.add(msg.PeersWindow)
- m.incomingChannels <- c
- return nil
-}
-
-func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) {
- ch, err := m.openChannel(chanType, extra)
- if err != nil {
- return nil, nil, err
- }
-
- return ch, ch.incomingRequests, nil
-}
-
-func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
- ch := m.newChannel(chanType, channelOutbound, extra)
-
- ch.maxIncomingPayload = channelMaxPacket
-
- open := channelOpenMsg{
- ChanType: chanType,
- PeersWindow: ch.myWindow,
- MaxPacketSize: ch.maxIncomingPayload,
- TypeSpecificData: extra,
- PeersId: ch.localId,
- }
- if err := m.sendMessage(open); err != nil {
- return nil, err
- }
-
- switch msg := (<-ch.msg).(type) {
- case *channelOpenConfirmMsg:
- return ch, nil
- case *channelOpenFailureMsg:
- return nil, &OpenChannelError{msg.Reason, msg.Message}
- default:
- return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/mux_test.go b/vendor/golang.org/x/crypto/ssh/mux_test.go
deleted file mode 100644
index 591aae8..0000000
--- a/vendor/golang.org/x/crypto/ssh/mux_test.go
+++ /dev/null
@@ -1,502 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "io"
- "io/ioutil"
- "sync"
- "testing"
-)
-
-func muxPair() (*mux, *mux) {
- a, b := memPipe()
-
- s := newMux(a)
- c := newMux(b)
-
- return s, c
-}
-
-// Returns both ends of a channel, and the mux for the the 2nd
-// channel.
-func channelPair(t *testing.T) (*channel, *channel, *mux) {
- c, s := muxPair()
-
- res := make(chan *channel, 1)
- go func() {
- newCh, ok := <-s.incomingChannels
- if !ok {
- t.Fatalf("No incoming channel")
- }
- if newCh.ChannelType() != "chan" {
- t.Fatalf("got type %q want chan", newCh.ChannelType())
- }
- ch, _, err := newCh.Accept()
- if err != nil {
- t.Fatalf("Accept %v", err)
- }
- res <- ch.(*channel)
- }()
-
- ch, err := c.openChannel("chan", nil)
- if err != nil {
- t.Fatalf("OpenChannel: %v", err)
- }
-
- return <-res, ch, c
-}
-
-// Test that stderr and stdout can be addressed from different
-// goroutines. This is intended for use with the race detector.
-func TestMuxChannelExtendedThreadSafety(t *testing.T) {
- writer, reader, mux := channelPair(t)
- defer writer.Close()
- defer reader.Close()
- defer mux.Close()
-
- var wr, rd sync.WaitGroup
- magic := "hello world"
-
- wr.Add(2)
- go func() {
- io.WriteString(writer, magic)
- wr.Done()
- }()
- go func() {
- io.WriteString(writer.Stderr(), magic)
- wr.Done()
- }()
-
- rd.Add(2)
- go func() {
- c, err := ioutil.ReadAll(reader)
- if string(c) != magic {
- t.Fatalf("stdout read got %q, want %q (error %s)", c, magic, err)
- }
- rd.Done()
- }()
- go func() {
- c, err := ioutil.ReadAll(reader.Stderr())
- if string(c) != magic {
- t.Fatalf("stderr read got %q, want %q (error %s)", c, magic, err)
- }
- rd.Done()
- }()
-
- wr.Wait()
- writer.CloseWrite()
- rd.Wait()
-}
-
-func TestMuxReadWrite(t *testing.T) {
- s, c, mux := channelPair(t)
- defer s.Close()
- defer c.Close()
- defer mux.Close()
-
- magic := "hello world"
- magicExt := "hello stderr"
- go func() {
- _, err := s.Write([]byte(magic))
- if err != nil {
- t.Fatalf("Write: %v", err)
- }
- _, err = s.Extended(1).Write([]byte(magicExt))
- if err != nil {
- t.Fatalf("Write: %v", err)
- }
- err = s.Close()
- if err != nil {
- t.Fatalf("Close: %v", err)
- }
- }()
-
- var buf [1024]byte
- n, err := c.Read(buf[:])
- if err != nil {
- t.Fatalf("server Read: %v", err)
- }
- got := string(buf[:n])
- if got != magic {
- t.Fatalf("server: got %q want %q", got, magic)
- }
-
- n, err = c.Extended(1).Read(buf[:])
- if err != nil {
- t.Fatalf("server Read: %v", err)
- }
-
- got = string(buf[:n])
- if got != magicExt {
- t.Fatalf("server: got %q want %q", got, magic)
- }
-}
-
-func TestMuxChannelOverflow(t *testing.T) {
- reader, writer, mux := channelPair(t)
- defer reader.Close()
- defer writer.Close()
- defer mux.Close()
-
- wDone := make(chan int, 1)
- go func() {
- if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
- t.Errorf("could not fill window: %v", err)
- }
- writer.Write(make([]byte, 1))
- wDone <- 1
- }()
- writer.remoteWin.waitWriterBlocked()
-
- // Send 1 byte.
- packet := make([]byte, 1+4+4+1)
- packet[0] = msgChannelData
- marshalUint32(packet[1:], writer.remoteId)
- marshalUint32(packet[5:], uint32(1))
- packet[9] = 42
-
- if err := writer.mux.conn.writePacket(packet); err != nil {
- t.Errorf("could not send packet")
- }
- if _, err := reader.SendRequest("hello", true, nil); err == nil {
- t.Errorf("SendRequest succeeded.")
- }
- <-wDone
-}
-
-func TestMuxChannelCloseWriteUnblock(t *testing.T) {
- reader, writer, mux := channelPair(t)
- defer reader.Close()
- defer writer.Close()
- defer mux.Close()
-
- wDone := make(chan int, 1)
- go func() {
- if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
- t.Errorf("could not fill window: %v", err)
- }
- if _, err := writer.Write(make([]byte, 1)); err != io.EOF {
- t.Errorf("got %v, want EOF for unblock write", err)
- }
- wDone <- 1
- }()
-
- writer.remoteWin.waitWriterBlocked()
- reader.Close()
- <-wDone
-}
-
-func TestMuxConnectionCloseWriteUnblock(t *testing.T) {
- reader, writer, mux := channelPair(t)
- defer reader.Close()
- defer writer.Close()
- defer mux.Close()
-
- wDone := make(chan int, 1)
- go func() {
- if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
- t.Errorf("could not fill window: %v", err)
- }
- if _, err := writer.Write(make([]byte, 1)); err != io.EOF {
- t.Errorf("got %v, want EOF for unblock write", err)
- }
- wDone <- 1
- }()
-
- writer.remoteWin.waitWriterBlocked()
- mux.Close()
- <-wDone
-}
-
-func TestMuxReject(t *testing.T) {
- client, server := muxPair()
- defer server.Close()
- defer client.Close()
-
- go func() {
- ch, ok := <-server.incomingChannels
- if !ok {
- t.Fatalf("Accept")
- }
- if ch.ChannelType() != "ch" || string(ch.ExtraData()) != "extra" {
- t.Fatalf("unexpected channel: %q, %q", ch.ChannelType(), ch.ExtraData())
- }
- ch.Reject(RejectionReason(42), "message")
- }()
-
- ch, err := client.openChannel("ch", []byte("extra"))
- if ch != nil {
- t.Fatal("openChannel not rejected")
- }
-
- ocf, ok := err.(*OpenChannelError)
- if !ok {
- t.Errorf("got %#v want *OpenChannelError", err)
- } else if ocf.Reason != 42 || ocf.Message != "message" {
- t.Errorf("got %#v, want {Reason: 42, Message: %q}", ocf, "message")
- }
-
- want := "ssh: rejected: unknown reason 42 (message)"
- if err.Error() != want {
- t.Errorf("got %q, want %q", err.Error(), want)
- }
-}
-
-func TestMuxChannelRequest(t *testing.T) {
- client, server, mux := channelPair(t)
- defer server.Close()
- defer client.Close()
- defer mux.Close()
-
- var received int
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- for r := range server.incomingRequests {
- received++
- r.Reply(r.Type == "yes", nil)
- }
- wg.Done()
- }()
- _, err := client.SendRequest("yes", false, nil)
- if err != nil {
- t.Fatalf("SendRequest: %v", err)
- }
- ok, err := client.SendRequest("yes", true, nil)
- if err != nil {
- t.Fatalf("SendRequest: %v", err)
- }
-
- if !ok {
- t.Errorf("SendRequest(yes): %v", ok)
-
- }
-
- ok, err = client.SendRequest("no", true, nil)
- if err != nil {
- t.Fatalf("SendRequest: %v", err)
- }
- if ok {
- t.Errorf("SendRequest(no): %v", ok)
-
- }
-
- client.Close()
- wg.Wait()
-
- if received != 3 {
- t.Errorf("got %d requests, want %d", received, 3)
- }
-}
-
-func TestMuxGlobalRequest(t *testing.T) {
- clientMux, serverMux := muxPair()
- defer serverMux.Close()
- defer clientMux.Close()
-
- var seen bool
- go func() {
- for r := range serverMux.incomingRequests {
- seen = seen || r.Type == "peek"
- if r.WantReply {
- err := r.Reply(r.Type == "yes",
- append([]byte(r.Type), r.Payload...))
- if err != nil {
- t.Errorf("AckRequest: %v", err)
- }
- }
- }
- }()
-
- _, _, err := clientMux.SendRequest("peek", false, nil)
- if err != nil {
- t.Errorf("SendRequest: %v", err)
- }
-
- ok, data, err := clientMux.SendRequest("yes", true, []byte("a"))
- if !ok || string(data) != "yesa" || err != nil {
- t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v",
- ok, data, err)
- }
- if ok, data, err := clientMux.SendRequest("yes", true, []byte("a")); !ok || string(data) != "yesa" || err != nil {
- t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v",
- ok, data, err)
- }
-
- if ok, data, err := clientMux.SendRequest("no", true, []byte("a")); ok || string(data) != "noa" || err != nil {
- t.Errorf("SendRequest(\"no\", true, \"a\"): %v %v %v",
- ok, data, err)
- }
-
- if !seen {
- t.Errorf("never saw 'peek' request")
- }
-}
-
-func TestMuxGlobalRequestUnblock(t *testing.T) {
- clientMux, serverMux := muxPair()
- defer serverMux.Close()
- defer clientMux.Close()
-
- result := make(chan error, 1)
- go func() {
- _, _, err := clientMux.SendRequest("hello", true, nil)
- result <- err
- }()
-
- <-serverMux.incomingRequests
- serverMux.conn.Close()
- err := <-result
-
- if err != io.EOF {
- t.Errorf("want EOF, got %v", io.EOF)
- }
-}
-
-func TestMuxChannelRequestUnblock(t *testing.T) {
- a, b, connB := channelPair(t)
- defer a.Close()
- defer b.Close()
- defer connB.Close()
-
- result := make(chan error, 1)
- go func() {
- _, err := a.SendRequest("hello", true, nil)
- result <- err
- }()
-
- <-b.incomingRequests
- connB.conn.Close()
- err := <-result
-
- if err != io.EOF {
- t.Errorf("want EOF, got %v", err)
- }
-}
-
-func TestMuxCloseChannel(t *testing.T) {
- r, w, mux := channelPair(t)
- defer mux.Close()
- defer r.Close()
- defer w.Close()
-
- result := make(chan error, 1)
- go func() {
- var b [1024]byte
- _, err := r.Read(b[:])
- result <- err
- }()
- if err := w.Close(); err != nil {
- t.Errorf("w.Close: %v", err)
- }
-
- if _, err := w.Write([]byte("hello")); err != io.EOF {
- t.Errorf("got err %v, want io.EOF after Close", err)
- }
-
- if err := <-result; err != io.EOF {
- t.Errorf("got %v (%T), want io.EOF", err, err)
- }
-}
-
-func TestMuxCloseWriteChannel(t *testing.T) {
- r, w, mux := channelPair(t)
- defer mux.Close()
-
- result := make(chan error, 1)
- go func() {
- var b [1024]byte
- _, err := r.Read(b[:])
- result <- err
- }()
- if err := w.CloseWrite(); err != nil {
- t.Errorf("w.CloseWrite: %v", err)
- }
-
- if _, err := w.Write([]byte("hello")); err != io.EOF {
- t.Errorf("got err %v, want io.EOF after CloseWrite", err)
- }
-
- if err := <-result; err != io.EOF {
- t.Errorf("got %v (%T), want io.EOF", err, err)
- }
-}
-
-func TestMuxInvalidRecord(t *testing.T) {
- a, b := muxPair()
- defer a.Close()
- defer b.Close()
-
- packet := make([]byte, 1+4+4+1)
- packet[0] = msgChannelData
- marshalUint32(packet[1:], 29348723 /* invalid channel id */)
- marshalUint32(packet[5:], 1)
- packet[9] = 42
-
- a.conn.writePacket(packet)
- go a.SendRequest("hello", false, nil)
- // 'a' wrote an invalid packet, so 'b' has exited.
- req, ok := <-b.incomingRequests
- if ok {
- t.Errorf("got request %#v after receiving invalid packet", req)
- }
-}
-
-func TestZeroWindowAdjust(t *testing.T) {
- a, b, mux := channelPair(t)
- defer a.Close()
- defer b.Close()
- defer mux.Close()
-
- go func() {
- io.WriteString(a, "hello")
- // bogus adjust.
- a.sendMessage(windowAdjustMsg{})
- io.WriteString(a, "world")
- a.Close()
- }()
-
- want := "helloworld"
- c, _ := ioutil.ReadAll(b)
- if string(c) != want {
- t.Errorf("got %q want %q", c, want)
- }
-}
-
-func TestMuxMaxPacketSize(t *testing.T) {
- a, b, mux := channelPair(t)
- defer a.Close()
- defer b.Close()
- defer mux.Close()
-
- large := make([]byte, a.maxRemotePayload+1)
- packet := make([]byte, 1+4+4+1+len(large))
- packet[0] = msgChannelData
- marshalUint32(packet[1:], a.remoteId)
- marshalUint32(packet[5:], uint32(len(large)))
- packet[9] = 42
-
- if err := a.mux.conn.writePacket(packet); err != nil {
- t.Errorf("could not send packet")
- }
-
- go a.SendRequest("hello", false, nil)
-
- _, ok := <-b.incomingRequests
- if ok {
- t.Errorf("connection still alive after receiving large packet.")
- }
-}
-
-// Don't ship code with debug=true.
-func TestDebug(t *testing.T) {
- if debugMux {
- t.Error("mux debug switched on")
- }
- if debugHandshake {
- t.Error("handshake debug switched on")
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
deleted file mode 100644
index 37df1b3..0000000
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ /dev/null
@@ -1,488 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "net"
-)
-
-// The Permissions type holds fine-grained permissions that are
-// specific to a user or a specific authentication method for a
-// user. Permissions, except for "source-address", must be enforced in
-// the server application layer, after successful authentication. The
-// Permissions are passed on in ServerConn so a server implementation
-// can honor them.
-type Permissions struct {
- // Critical options restrict default permissions. Common
- // restrictions are "source-address" and "force-command". If
- // the server cannot enforce the restriction, or does not
- // recognize it, the user should not authenticate.
- CriticalOptions map[string]string
-
- // Extensions are extra functionality that the server may
- // offer on authenticated connections. Common extensions are
- // "permit-agent-forwarding", "permit-X11-forwarding". Lack of
- // support for an extension does not preclude authenticating a
- // user.
- Extensions map[string]string
-}
-
-// ServerConfig holds server specific configuration data.
-type ServerConfig struct {
- // Config contains configuration shared between client and server.
- Config
-
- hostKeys []Signer
-
- // NoClientAuth is true if clients are allowed to connect without
- // authenticating.
- NoClientAuth bool
-
- // PasswordCallback, if non-nil, is called when a user
- // attempts to authenticate using a password.
- PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
-
- // PublicKeyCallback, if non-nil, is called when a client attempts public
- // key authentication. It must return true if the given public key is
- // valid for the given user. For example, see CertChecker.Authenticate.
- PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
-
- // KeyboardInteractiveCallback, if non-nil, is called when
- // keyboard-interactive authentication is selected (RFC
- // 4256). The client object's Challenge function should be
- // used to query the user. The callback may offer multiple
- // Challenge rounds. To avoid information leaks, the client
- // should be presented a challenge even if the user is
- // unknown.
- KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
-
- // AuthLogCallback, if non-nil, is called to log all authentication
- // attempts.
- AuthLogCallback func(conn ConnMetadata, method string, err error)
-
- // ServerVersion is the version identification string to announce in
- // the public handshake.
- // If empty, a reasonable default is used.
- // Note that RFC 4253 section 4.2 requires that this string start with
- // "SSH-2.0-".
- ServerVersion string
-}
-
-// AddHostKey adds a private key as a host key. If an existing host
-// key exists with the same algorithm, it is overwritten. Each server
-// config must have at least one host key.
-func (s *ServerConfig) AddHostKey(key Signer) {
- for i, k := range s.hostKeys {
- if k.PublicKey().Type() == key.PublicKey().Type() {
- s.hostKeys[i] = key
- return
- }
- }
-
- s.hostKeys = append(s.hostKeys, key)
-}
-
-// cachedPubKey contains the results of querying whether a public key is
-// acceptable for a user.
-type cachedPubKey struct {
- user string
- pubKeyData []byte
- result error
- perms *Permissions
-}
-
-const maxCachedPubKeys = 16
-
-// pubKeyCache caches tests for public keys. Since SSH clients
-// will query whether a public key is acceptable before attempting to
-// authenticate with it, we end up with duplicate queries for public
-// key validity. The cache only applies to a single ServerConn.
-type pubKeyCache struct {
- keys []cachedPubKey
-}
-
-// get returns the result for a given user/algo/key tuple.
-func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
- for _, k := range c.keys {
- if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) {
- return k, true
- }
- }
- return cachedPubKey{}, false
-}
-
-// add adds the given tuple to the cache.
-func (c *pubKeyCache) add(candidate cachedPubKey) {
- if len(c.keys) < maxCachedPubKeys {
- c.keys = append(c.keys, candidate)
- }
-}
-
-// ServerConn is an authenticated SSH connection, as seen from the
-// server
-type ServerConn struct {
- Conn
-
- // If the succeeding authentication callback returned a
- // non-nil Permissions pointer, it is stored here.
- Permissions *Permissions
-}
-
-// NewServerConn starts a new SSH server with c as the underlying
-// transport. It starts with a handshake and, if the handshake is
-// unsuccessful, it closes the connection and returns an error. The
-// Request and NewChannel channels must be serviced, or the connection
-// will hang.
-func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) {
- fullConf := *config
- fullConf.SetDefaults()
- s := &connection{
- sshConn: sshConn{conn: c},
- }
- perms, err := s.serverHandshake(&fullConf)
- if err != nil {
- c.Close()
- return nil, nil, nil, err
- }
- return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil
-}
-
-// signAndMarshal signs the data with the appropriate algorithm,
-// and serializes the result in SSH wire format.
-func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
- sig, err := k.Sign(rand, data)
- if err != nil {
- return nil, err
- }
-
- return Marshal(sig), nil
-}
-
-// handshake performs key exchange and user authentication.
-func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) {
- if len(config.hostKeys) == 0 {
- return nil, errors.New("ssh: server has no host keys")
- }
-
- if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil {
- return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
- }
-
- if config.ServerVersion != "" {
- s.serverVersion = []byte(config.ServerVersion)
- } else {
- s.serverVersion = []byte(packageVersion)
- }
- var err error
- s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion)
- if err != nil {
- return nil, err
- }
-
- tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
- s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
-
- if err := s.transport.requestInitialKeyChange(); err != nil {
- return nil, err
- }
-
- // We just did the key change, so the session ID is established.
- s.sessionID = s.transport.getSessionID()
-
- var packet []byte
- if packet, err = s.transport.readPacket(); err != nil {
- return nil, err
- }
-
- var serviceRequest serviceRequestMsg
- if err = Unmarshal(packet, &serviceRequest); err != nil {
- return nil, err
- }
- if serviceRequest.Service != serviceUserAuth {
- return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
- }
- serviceAccept := serviceAcceptMsg{
- Service: serviceUserAuth,
- }
- if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil {
- return nil, err
- }
-
- perms, err := s.serverAuthenticate(config)
- if err != nil {
- return nil, err
- }
- s.mux = newMux(s.transport)
- return perms, err
-}
-
-func isAcceptableAlgo(algo string) bool {
- switch algo {
- case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519,
- CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
- return true
- }
- return false
-}
-
-func checkSourceAddress(addr net.Addr, sourceAddr string) error {
- if addr == nil {
- return errors.New("ssh: no address known for client, but source-address match required")
- }
-
- tcpAddr, ok := addr.(*net.TCPAddr)
- if !ok {
- return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
- }
-
- if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
- if bytes.Equal(allowedIP, tcpAddr.IP) {
- return nil
- }
- } else {
- _, ipNet, err := net.ParseCIDR(sourceAddr)
- if err != nil {
- return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
- }
-
- if ipNet.Contains(tcpAddr.IP) {
- return nil
- }
- }
-
- return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
-}
-
-func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
- var err error
- var cache pubKeyCache
- var perms *Permissions
-
-userAuthLoop:
- for {
- var userAuthReq userAuthRequestMsg
- if packet, err := s.transport.readPacket(); err != nil {
- return nil, err
- } else if err = Unmarshal(packet, &userAuthReq); err != nil {
- return nil, err
- }
-
- if userAuthReq.Service != serviceSSH {
- return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
- }
-
- s.user = userAuthReq.User
- perms = nil
- authErr := errors.New("no auth passed yet")
-
- switch userAuthReq.Method {
- case "none":
- if config.NoClientAuth {
- authErr = nil
- }
- case "password":
- if config.PasswordCallback == nil {
- authErr = errors.New("ssh: password auth not configured")
- break
- }
- payload := userAuthReq.Payload
- if len(payload) < 1 || payload[0] != 0 {
- return nil, parseError(msgUserAuthRequest)
- }
- payload = payload[1:]
- password, payload, ok := parseString(payload)
- if !ok || len(payload) > 0 {
- return nil, parseError(msgUserAuthRequest)
- }
-
- perms, authErr = config.PasswordCallback(s, password)
- case "keyboard-interactive":
- if config.KeyboardInteractiveCallback == nil {
- authErr = errors.New("ssh: keyboard-interactive auth not configubred")
- break
- }
-
- prompter := &sshClientKeyboardInteractive{s}
- perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
- case "publickey":
- if config.PublicKeyCallback == nil {
- authErr = errors.New("ssh: publickey auth not configured")
- break
- }
- payload := userAuthReq.Payload
- if len(payload) < 1 {
- return nil, parseError(msgUserAuthRequest)
- }
- isQuery := payload[0] == 0
- payload = payload[1:]
- algoBytes, payload, ok := parseString(payload)
- if !ok {
- return nil, parseError(msgUserAuthRequest)
- }
- algo := string(algoBytes)
- if !isAcceptableAlgo(algo) {
- authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
- break
- }
-
- pubKeyData, payload, ok := parseString(payload)
- if !ok {
- return nil, parseError(msgUserAuthRequest)
- }
-
- pubKey, err := ParsePublicKey(pubKeyData)
- if err != nil {
- return nil, err
- }
-
- candidate, ok := cache.get(s.user, pubKeyData)
- if !ok {
- candidate.user = s.user
- candidate.pubKeyData = pubKeyData
- candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
- if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
- candidate.result = checkSourceAddress(
- s.RemoteAddr(),
- candidate.perms.CriticalOptions[sourceAddressCriticalOption])
- }
- cache.add(candidate)
- }
-
- if isQuery {
- // The client can query if the given public key
- // would be okay.
- if len(payload) > 0 {
- return nil, parseError(msgUserAuthRequest)
- }
-
- if candidate.result == nil {
- okMsg := userAuthPubKeyOkMsg{
- Algo: algo,
- PubKey: pubKeyData,
- }
- if err = s.transport.writePacket(Marshal(&okMsg)); err != nil {
- return nil, err
- }
- continue userAuthLoop
- }
- authErr = candidate.result
- } else {
- sig, payload, ok := parseSignature(payload)
- if !ok || len(payload) > 0 {
- return nil, parseError(msgUserAuthRequest)
- }
- // Ensure the public key algo and signature algo
- // are supported. Compare the private key
- // algorithm name that corresponds to algo with
- // sig.Format. This is usually the same, but
- // for certs, the names differ.
- if !isAcceptableAlgo(sig.Format) {
- break
- }
- signedData := buildDataSignedForAuth(s.transport.getSessionID(), userAuthReq, algoBytes, pubKeyData)
-
- if err := pubKey.Verify(signedData, sig); err != nil {
- return nil, err
- }
-
- authErr = candidate.result
- perms = candidate.perms
- }
- default:
- authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method)
- }
-
- if config.AuthLogCallback != nil {
- config.AuthLogCallback(s, userAuthReq.Method, authErr)
- }
-
- if authErr == nil {
- break userAuthLoop
- }
-
- var failureMsg userAuthFailureMsg
- if config.PasswordCallback != nil {
- failureMsg.Methods = append(failureMsg.Methods, "password")
- }
- if config.PublicKeyCallback != nil {
- failureMsg.Methods = append(failureMsg.Methods, "publickey")
- }
- if config.KeyboardInteractiveCallback != nil {
- failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
- }
-
- if len(failureMsg.Methods) == 0 {
- return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
- }
-
- if err = s.transport.writePacket(Marshal(&failureMsg)); err != nil {
- return nil, err
- }
- }
-
- if err = s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
- return nil, err
- }
- return perms, nil
-}
-
-// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by
-// asking the client on the other side of a ServerConn.
-type sshClientKeyboardInteractive struct {
- *connection
-}
-
-func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
- if len(questions) != len(echos) {
- return nil, errors.New("ssh: echos and questions must have equal length")
- }
-
- var prompts []byte
- for i := range questions {
- prompts = appendString(prompts, questions[i])
- prompts = appendBool(prompts, echos[i])
- }
-
- if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
- Instruction: instruction,
- NumPrompts: uint32(len(questions)),
- Prompts: prompts,
- })); err != nil {
- return nil, err
- }
-
- packet, err := c.transport.readPacket()
- if err != nil {
- return nil, err
- }
- if packet[0] != msgUserAuthInfoResponse {
- return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0])
- }
- packet = packet[1:]
-
- n, packet, ok := parseUint32(packet)
- if !ok || int(n) != len(questions) {
- return nil, parseError(msgUserAuthInfoResponse)
- }
-
- for i := uint32(0); i < n; i++ {
- ans, rest, ok := parseString(packet)
- if !ok {
- return nil, parseError(msgUserAuthInfoResponse)
- }
-
- answers = append(answers, string(ans))
- packet = rest
- }
- if len(packet) != 0 {
- return nil, errors.New("ssh: junk at end of message")
- }
-
- return answers, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go
deleted file mode 100644
index 17e2aa8..0000000
--- a/vendor/golang.org/x/crypto/ssh/session.go
+++ /dev/null
@@ -1,627 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-// Session implements an interactive session described in
-// "RFC 4254, section 6".
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "sync"
-)
-
-type Signal string
-
-// POSIX signals as listed in RFC 4254 Section 6.10.
-const (
- SIGABRT Signal = "ABRT"
- SIGALRM Signal = "ALRM"
- SIGFPE Signal = "FPE"
- SIGHUP Signal = "HUP"
- SIGILL Signal = "ILL"
- SIGINT Signal = "INT"
- SIGKILL Signal = "KILL"
- SIGPIPE Signal = "PIPE"
- SIGQUIT Signal = "QUIT"
- SIGSEGV Signal = "SEGV"
- SIGTERM Signal = "TERM"
- SIGUSR1 Signal = "USR1"
- SIGUSR2 Signal = "USR2"
-)
-
-var signals = map[Signal]int{
- SIGABRT: 6,
- SIGALRM: 14,
- SIGFPE: 8,
- SIGHUP: 1,
- SIGILL: 4,
- SIGINT: 2,
- SIGKILL: 9,
- SIGPIPE: 13,
- SIGQUIT: 3,
- SIGSEGV: 11,
- SIGTERM: 15,
-}
-
-type TerminalModes map[uint8]uint32
-
-// POSIX terminal mode flags as listed in RFC 4254 Section 8.
-const (
- tty_OP_END = 0
- VINTR = 1
- VQUIT = 2
- VERASE = 3
- VKILL = 4
- VEOF = 5
- VEOL = 6
- VEOL2 = 7
- VSTART = 8
- VSTOP = 9
- VSUSP = 10
- VDSUSP = 11
- VREPRINT = 12
- VWERASE = 13
- VLNEXT = 14
- VFLUSH = 15
- VSWTCH = 16
- VSTATUS = 17
- VDISCARD = 18
- IGNPAR = 30
- PARMRK = 31
- INPCK = 32
- ISTRIP = 33
- INLCR = 34
- IGNCR = 35
- ICRNL = 36
- IUCLC = 37
- IXON = 38
- IXANY = 39
- IXOFF = 40
- IMAXBEL = 41
- ISIG = 50
- ICANON = 51
- XCASE = 52
- ECHO = 53
- ECHOE = 54
- ECHOK = 55
- ECHONL = 56
- NOFLSH = 57
- TOSTOP = 58
- IEXTEN = 59
- ECHOCTL = 60
- ECHOKE = 61
- PENDIN = 62
- OPOST = 70
- OLCUC = 71
- ONLCR = 72
- OCRNL = 73
- ONOCR = 74
- ONLRET = 75
- CS7 = 90
- CS8 = 91
- PARENB = 92
- PARODD = 93
- TTY_OP_ISPEED = 128
- TTY_OP_OSPEED = 129
-)
-
-// A Session represents a connection to a remote command or shell.
-type Session struct {
- // Stdin specifies the remote process's standard input.
- // If Stdin is nil, the remote process reads from an empty
- // bytes.Buffer.
- Stdin io.Reader
-
- // Stdout and Stderr specify the remote process's standard
- // output and error.
- //
- // If either is nil, Run connects the corresponding file
- // descriptor to an instance of ioutil.Discard. There is a
- // fixed amount of buffering that is shared for the two streams.
- // If either blocks it may eventually cause the remote
- // command to block.
- Stdout io.Writer
- Stderr io.Writer
-
- ch Channel // the channel backing this session
- started bool // true once Start, Run or Shell is invoked.
- copyFuncs []func() error
- errors chan error // one send per copyFunc
-
- // true if pipe method is active
- stdinpipe, stdoutpipe, stderrpipe bool
-
- // stdinPipeWriter is non-nil if StdinPipe has not been called
- // and Stdin was specified by the user; it is the write end of
- // a pipe connecting Session.Stdin to the stdin channel.
- stdinPipeWriter io.WriteCloser
-
- exitStatus chan error
-}
-
-// SendRequest sends an out-of-band channel request on the SSH channel
-// underlying the session.
-func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
- return s.ch.SendRequest(name, wantReply, payload)
-}
-
-func (s *Session) Close() error {
- return s.ch.Close()
-}
-
-// RFC 4254 Section 6.4.
-type setenvRequest struct {
- Name string
- Value string
-}
-
-// Setenv sets an environment variable that will be applied to any
-// command executed by Shell or Run.
-func (s *Session) Setenv(name, value string) error {
- msg := setenvRequest{
- Name: name,
- Value: value,
- }
- ok, err := s.ch.SendRequest("env", true, Marshal(&msg))
- if err == nil && !ok {
- err = errors.New("ssh: setenv failed")
- }
- return err
-}
-
-// RFC 4254 Section 6.2.
-type ptyRequestMsg struct {
- Term string
- Columns uint32
- Rows uint32
- Width uint32
- Height uint32
- Modelist string
-}
-
-// RequestPty requests the association of a pty with the session on the remote host.
-func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error {
- var tm []byte
- for k, v := range termmodes {
- kv := struct {
- Key byte
- Val uint32
- }{k, v}
-
- tm = append(tm, Marshal(&kv)...)
- }
- tm = append(tm, tty_OP_END)
- req := ptyRequestMsg{
- Term: term,
- Columns: uint32(w),
- Rows: uint32(h),
- Width: uint32(w * 8),
- Height: uint32(h * 8),
- Modelist: string(tm),
- }
- ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req))
- if err == nil && !ok {
- err = errors.New("ssh: pty-req failed")
- }
- return err
-}
-
-// RFC 4254 Section 6.5.
-type subsystemRequestMsg struct {
- Subsystem string
-}
-
-// RequestSubsystem requests the association of a subsystem with the session on the remote host.
-// A subsystem is a predefined command that runs in the background when the ssh session is initiated
-func (s *Session) RequestSubsystem(subsystem string) error {
- msg := subsystemRequestMsg{
- Subsystem: subsystem,
- }
- ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg))
- if err == nil && !ok {
- err = errors.New("ssh: subsystem request failed")
- }
- return err
-}
-
-// RFC 4254 Section 6.9.
-type signalMsg struct {
- Signal string
-}
-
-// Signal sends the given signal to the remote process.
-// sig is one of the SIG* constants.
-func (s *Session) Signal(sig Signal) error {
- msg := signalMsg{
- Signal: string(sig),
- }
-
- _, err := s.ch.SendRequest("signal", false, Marshal(&msg))
- return err
-}
-
-// RFC 4254 Section 6.5.
-type execMsg struct {
- Command string
-}
-
-// Start runs cmd on the remote host. Typically, the remote
-// server passes cmd to the shell for interpretation.
-// A Session only accepts one call to Run, Start or Shell.
-func (s *Session) Start(cmd string) error {
- if s.started {
- return errors.New("ssh: session already started")
- }
- req := execMsg{
- Command: cmd,
- }
-
- ok, err := s.ch.SendRequest("exec", true, Marshal(&req))
- if err == nil && !ok {
- err = fmt.Errorf("ssh: command %v failed", cmd)
- }
- if err != nil {
- return err
- }
- return s.start()
-}
-
-// Run runs cmd on the remote host. Typically, the remote
-// server passes cmd to the shell for interpretation.
-// A Session only accepts one call to Run, Start, Shell, Output,
-// or CombinedOutput.
-//
-// The returned error is nil if the command runs, has no problems
-// copying stdin, stdout, and stderr, and exits with a zero exit
-// status.
-//
-// If the remote server does not send an exit status, an error of type
-// *ExitMissingError is returned. If the command completes
-// unsuccessfully or is interrupted by a signal, the error is of type
-// *ExitError. Other error types may be returned for I/O problems.
-func (s *Session) Run(cmd string) error {
- err := s.Start(cmd)
- if err != nil {
- return err
- }
- return s.Wait()
-}
-
-// Output runs cmd on the remote host and returns its standard output.
-func (s *Session) Output(cmd string) ([]byte, error) {
- if s.Stdout != nil {
- return nil, errors.New("ssh: Stdout already set")
- }
- var b bytes.Buffer
- s.Stdout = &b
- err := s.Run(cmd)
- return b.Bytes(), err
-}
-
-type singleWriter struct {
- b bytes.Buffer
- mu sync.Mutex
-}
-
-func (w *singleWriter) Write(p []byte) (int, error) {
- w.mu.Lock()
- defer w.mu.Unlock()
- return w.b.Write(p)
-}
-
-// CombinedOutput runs cmd on the remote host and returns its combined
-// standard output and standard error.
-func (s *Session) CombinedOutput(cmd string) ([]byte, error) {
- if s.Stdout != nil {
- return nil, errors.New("ssh: Stdout already set")
- }
- if s.Stderr != nil {
- return nil, errors.New("ssh: Stderr already set")
- }
- var b singleWriter
- s.Stdout = &b
- s.Stderr = &b
- err := s.Run(cmd)
- return b.b.Bytes(), err
-}
-
-// Shell starts a login shell on the remote host. A Session only
-// accepts one call to Run, Start, Shell, Output, or CombinedOutput.
-func (s *Session) Shell() error {
- if s.started {
- return errors.New("ssh: session already started")
- }
-
- ok, err := s.ch.SendRequest("shell", true, nil)
- if err == nil && !ok {
- return errors.New("ssh: could not start shell")
- }
- if err != nil {
- return err
- }
- return s.start()
-}
-
-func (s *Session) start() error {
- s.started = true
-
- type F func(*Session)
- for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} {
- setupFd(s)
- }
-
- s.errors = make(chan error, len(s.copyFuncs))
- for _, fn := range s.copyFuncs {
- go func(fn func() error) {
- s.errors <- fn()
- }(fn)
- }
- return nil
-}
-
-// Wait waits for the remote command to exit.
-//
-// The returned error is nil if the command runs, has no problems
-// copying stdin, stdout, and stderr, and exits with a zero exit
-// status.
-//
-// If the remote server does not send an exit status, an error of type
-// *ExitMissingError is returned. If the command completes
-// unsuccessfully or is interrupted by a signal, the error is of type
-// *ExitError. Other error types may be returned for I/O problems.
-func (s *Session) Wait() error {
- if !s.started {
- return errors.New("ssh: session not started")
- }
- waitErr := <-s.exitStatus
-
- if s.stdinPipeWriter != nil {
- s.stdinPipeWriter.Close()
- }
- var copyError error
- for _ = range s.copyFuncs {
- if err := <-s.errors; err != nil && copyError == nil {
- copyError = err
- }
- }
- if waitErr != nil {
- return waitErr
- }
- return copyError
-}
-
-func (s *Session) wait(reqs <-chan *Request) error {
- wm := Waitmsg{status: -1}
- // Wait for msg channel to be closed before returning.
- for msg := range reqs {
- switch msg.Type {
- case "exit-status":
- wm.status = int(binary.BigEndian.Uint32(msg.Payload))
- case "exit-signal":
- var sigval struct {
- Signal string
- CoreDumped bool
- Error string
- Lang string
- }
- if err := Unmarshal(msg.Payload, &sigval); err != nil {
- return err
- }
-
- // Must sanitize strings?
- wm.signal = sigval.Signal
- wm.msg = sigval.Error
- wm.lang = sigval.Lang
- default:
- // This handles keepalives and matches
- // OpenSSH's behaviour.
- if msg.WantReply {
- msg.Reply(false, nil)
- }
- }
- }
- if wm.status == 0 {
- return nil
- }
- if wm.status == -1 {
- // exit-status was never sent from server
- if wm.signal == "" {
- // signal was not sent either. RFC 4254
- // section 6.10 recommends against this
- // behavior, but it is allowed, so we let
- // clients handle it.
- return &ExitMissingError{}
- }
- wm.status = 128
- if _, ok := signals[Signal(wm.signal)]; ok {
- wm.status += signals[Signal(wm.signal)]
- }
- }
-
- return &ExitError{wm}
-}
-
-// ExitMissingError is returned if a session is torn down cleanly, but
-// the server sends no confirmation of the exit status.
-type ExitMissingError struct{}
-
-func (e *ExitMissingError) Error() string {
- return "wait: remote command exited without exit status or exit signal"
-}
-
-func (s *Session) stdin() {
- if s.stdinpipe {
- return
- }
- var stdin io.Reader
- if s.Stdin == nil {
- stdin = new(bytes.Buffer)
- } else {
- r, w := io.Pipe()
- go func() {
- _, err := io.Copy(w, s.Stdin)
- w.CloseWithError(err)
- }()
- stdin, s.stdinPipeWriter = r, w
- }
- s.copyFuncs = append(s.copyFuncs, func() error {
- _, err := io.Copy(s.ch, stdin)
- if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF {
- err = err1
- }
- return err
- })
-}
-
-func (s *Session) stdout() {
- if s.stdoutpipe {
- return
- }
- if s.Stdout == nil {
- s.Stdout = ioutil.Discard
- }
- s.copyFuncs = append(s.copyFuncs, func() error {
- _, err := io.Copy(s.Stdout, s.ch)
- return err
- })
-}
-
-func (s *Session) stderr() {
- if s.stderrpipe {
- return
- }
- if s.Stderr == nil {
- s.Stderr = ioutil.Discard
- }
- s.copyFuncs = append(s.copyFuncs, func() error {
- _, err := io.Copy(s.Stderr, s.ch.Stderr())
- return err
- })
-}
-
-// sessionStdin reroutes Close to CloseWrite.
-type sessionStdin struct {
- io.Writer
- ch Channel
-}
-
-func (s *sessionStdin) Close() error {
- return s.ch.CloseWrite()
-}
-
-// StdinPipe returns a pipe that will be connected to the
-// remote command's standard input when the command starts.
-func (s *Session) StdinPipe() (io.WriteCloser, error) {
- if s.Stdin != nil {
- return nil, errors.New("ssh: Stdin already set")
- }
- if s.started {
- return nil, errors.New("ssh: StdinPipe after process started")
- }
- s.stdinpipe = true
- return &sessionStdin{s.ch, s.ch}, nil
-}
-
-// StdoutPipe returns a pipe that will be connected to the
-// remote command's standard output when the command starts.
-// There is a fixed amount of buffering that is shared between
-// stdout and stderr streams. If the StdoutPipe reader is
-// not serviced fast enough it may eventually cause the
-// remote command to block.
-func (s *Session) StdoutPipe() (io.Reader, error) {
- if s.Stdout != nil {
- return nil, errors.New("ssh: Stdout already set")
- }
- if s.started {
- return nil, errors.New("ssh: StdoutPipe after process started")
- }
- s.stdoutpipe = true
- return s.ch, nil
-}
-
-// StderrPipe returns a pipe that will be connected to the
-// remote command's standard error when the command starts.
-// There is a fixed amount of buffering that is shared between
-// stdout and stderr streams. If the StderrPipe reader is
-// not serviced fast enough it may eventually cause the
-// remote command to block.
-func (s *Session) StderrPipe() (io.Reader, error) {
- if s.Stderr != nil {
- return nil, errors.New("ssh: Stderr already set")
- }
- if s.started {
- return nil, errors.New("ssh: StderrPipe after process started")
- }
- s.stderrpipe = true
- return s.ch.Stderr(), nil
-}
-
-// newSession returns a new interactive session on the remote host.
-func newSession(ch Channel, reqs <-chan *Request) (*Session, error) {
- s := &Session{
- ch: ch,
- }
- s.exitStatus = make(chan error, 1)
- go func() {
- s.exitStatus <- s.wait(reqs)
- }()
-
- return s, nil
-}
-
-// An ExitError reports unsuccessful completion of a remote command.
-type ExitError struct {
- Waitmsg
-}
-
-func (e *ExitError) Error() string {
- return e.Waitmsg.String()
-}
-
-// Waitmsg stores the information about an exited remote command
-// as reported by Wait.
-type Waitmsg struct {
- status int
- signal string
- msg string
- lang string
-}
-
-// ExitStatus returns the exit status of the remote command.
-func (w Waitmsg) ExitStatus() int {
- return w.status
-}
-
-// Signal returns the exit signal of the remote command if
-// it was terminated violently.
-func (w Waitmsg) Signal() string {
- return w.signal
-}
-
-// Msg returns the exit message given by the remote command
-func (w Waitmsg) Msg() string {
- return w.msg
-}
-
-// Lang returns the language tag. See RFC 3066
-func (w Waitmsg) Lang() string {
- return w.lang
-}
-
-func (w Waitmsg) String() string {
- str := fmt.Sprintf("Process exited with status %v", w.status)
- if w.signal != "" {
- str += fmt.Sprintf(" from signal %v", w.signal)
- }
- if w.msg != "" {
- str += fmt.Sprintf(". Reason was: %v", w.msg)
- }
- return str
-}
diff --git a/vendor/golang.org/x/crypto/ssh/session_test.go b/vendor/golang.org/x/crypto/ssh/session_test.go
deleted file mode 100644
index f35a378..0000000
--- a/vendor/golang.org/x/crypto/ssh/session_test.go
+++ /dev/null
@@ -1,770 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-// Session tests.
-
-import (
- "bytes"
- crypto_rand "crypto/rand"
- "errors"
- "io"
- "io/ioutil"
- "math/rand"
- "net"
- "testing"
-
- "golang.org/x/crypto/ssh/terminal"
-)
-
-type serverType func(Channel, <-chan *Request, *testing.T)
-
-// dial constructs a new test server and returns a *ClientConn.
-func dial(handler serverType, t *testing.T) *Client {
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
-
- go func() {
- defer c1.Close()
- conf := ServerConfig{
- NoClientAuth: true,
- }
- conf.AddHostKey(testSigners["rsa"])
-
- _, chans, reqs, err := NewServerConn(c1, &conf)
- if err != nil {
- t.Fatalf("Unable to handshake: %v", err)
- }
- go DiscardRequests(reqs)
-
- for newCh := range chans {
- if newCh.ChannelType() != "session" {
- newCh.Reject(UnknownChannelType, "unknown channel type")
- continue
- }
-
- ch, inReqs, err := newCh.Accept()
- if err != nil {
- t.Errorf("Accept: %v", err)
- continue
- }
- go func() {
- handler(ch, inReqs, t)
- }()
- }
- }()
-
- config := &ClientConfig{
- User: "testuser",
- }
-
- conn, chans, reqs, err := NewClientConn(c2, "", config)
- if err != nil {
- t.Fatalf("unable to dial remote side: %v", err)
- }
-
- return NewClient(conn, chans, reqs)
-}
-
-// Test a simple string is returned to session.Stdout.
-func TestSessionShell(t *testing.T) {
- conn := dial(shellHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
- stdout := new(bytes.Buffer)
- session.Stdout = stdout
- if err := session.Shell(); err != nil {
- t.Fatalf("Unable to execute command: %s", err)
- }
- if err := session.Wait(); err != nil {
- t.Fatalf("Remote command did not exit cleanly: %v", err)
- }
- actual := stdout.String()
- if actual != "golang" {
- t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual)
- }
-}
-
-// TODO(dfc) add support for Std{in,err}Pipe when the Server supports it.
-
-// Test a simple string is returned via StdoutPipe.
-func TestSessionStdoutPipe(t *testing.T) {
- conn := dial(shellHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
- stdout, err := session.StdoutPipe()
- if err != nil {
- t.Fatalf("Unable to request StdoutPipe(): %v", err)
- }
- var buf bytes.Buffer
- if err := session.Shell(); err != nil {
- t.Fatalf("Unable to execute command: %v", err)
- }
- done := make(chan bool, 1)
- go func() {
- if _, err := io.Copy(&buf, stdout); err != nil {
- t.Errorf("Copy of stdout failed: %v", err)
- }
- done <- true
- }()
- if err := session.Wait(); err != nil {
- t.Fatalf("Remote command did not exit cleanly: %v", err)
- }
- <-done
- actual := buf.String()
- if actual != "golang" {
- t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual)
- }
-}
-
-// Test that a simple string is returned via the Output helper,
-// and that stderr is discarded.
-func TestSessionOutput(t *testing.T) {
- conn := dial(fixedOutputHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
-
- buf, err := session.Output("") // cmd is ignored by fixedOutputHandler
- if err != nil {
- t.Error("Remote command did not exit cleanly:", err)
- }
- w := "this-is-stdout."
- g := string(buf)
- if g != w {
- t.Error("Remote command did not return expected string:")
- t.Logf("want %q", w)
- t.Logf("got %q", g)
- }
-}
-
-// Test that both stdout and stderr are returned
-// via the CombinedOutput helper.
-func TestSessionCombinedOutput(t *testing.T) {
- conn := dial(fixedOutputHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
-
- buf, err := session.CombinedOutput("") // cmd is ignored by fixedOutputHandler
- if err != nil {
- t.Error("Remote command did not exit cleanly:", err)
- }
- const stdout = "this-is-stdout."
- const stderr = "this-is-stderr."
- g := string(buf)
- if g != stdout+stderr && g != stderr+stdout {
- t.Error("Remote command did not return expected string:")
- t.Logf("want %q, or %q", stdout+stderr, stderr+stdout)
- t.Logf("got %q", g)
- }
-}
-
-// Test non-0 exit status is returned correctly.
-func TestExitStatusNonZero(t *testing.T) {
- conn := dial(exitStatusNonZeroHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
- if err := session.Shell(); err != nil {
- t.Fatalf("Unable to execute command: %v", err)
- }
- err = session.Wait()
- if err == nil {
- t.Fatalf("expected command to fail but it didn't")
- }
- e, ok := err.(*ExitError)
- if !ok {
- t.Fatalf("expected *ExitError but got %T", err)
- }
- if e.ExitStatus() != 15 {
- t.Fatalf("expected command to exit with 15 but got %v", e.ExitStatus())
- }
-}
-
-// Test 0 exit status is returned correctly.
-func TestExitStatusZero(t *testing.T) {
- conn := dial(exitStatusZeroHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
-
- if err := session.Shell(); err != nil {
- t.Fatalf("Unable to execute command: %v", err)
- }
- err = session.Wait()
- if err != nil {
- t.Fatalf("expected nil but got %v", err)
- }
-}
-
-// Test exit signal and status are both returned correctly.
-func TestExitSignalAndStatus(t *testing.T) {
- conn := dial(exitSignalAndStatusHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
- if err := session.Shell(); err != nil {
- t.Fatalf("Unable to execute command: %v", err)
- }
- err = session.Wait()
- if err == nil {
- t.Fatalf("expected command to fail but it didn't")
- }
- e, ok := err.(*ExitError)
- if !ok {
- t.Fatalf("expected *ExitError but got %T", err)
- }
- if e.Signal() != "TERM" || e.ExitStatus() != 15 {
- t.Fatalf("expected command to exit with signal TERM and status 15 but got signal %s and status %v", e.Signal(), e.ExitStatus())
- }
-}
-
-// Test exit signal and status are both returned correctly.
-func TestKnownExitSignalOnly(t *testing.T) {
- conn := dial(exitSignalHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
- if err := session.Shell(); err != nil {
- t.Fatalf("Unable to execute command: %v", err)
- }
- err = session.Wait()
- if err == nil {
- t.Fatalf("expected command to fail but it didn't")
- }
- e, ok := err.(*ExitError)
- if !ok {
- t.Fatalf("expected *ExitError but got %T", err)
- }
- if e.Signal() != "TERM" || e.ExitStatus() != 143 {
- t.Fatalf("expected command to exit with signal TERM and status 143 but got signal %s and status %v", e.Signal(), e.ExitStatus())
- }
-}
-
-// Test exit signal and status are both returned correctly.
-func TestUnknownExitSignal(t *testing.T) {
- conn := dial(exitSignalUnknownHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
- if err := session.Shell(); err != nil {
- t.Fatalf("Unable to execute command: %v", err)
- }
- err = session.Wait()
- if err == nil {
- t.Fatalf("expected command to fail but it didn't")
- }
- e, ok := err.(*ExitError)
- if !ok {
- t.Fatalf("expected *ExitError but got %T", err)
- }
- if e.Signal() != "SYS" || e.ExitStatus() != 128 {
- t.Fatalf("expected command to exit with signal SYS and status 128 but got signal %s and status %v", e.Signal(), e.ExitStatus())
- }
-}
-
-func TestExitWithoutStatusOrSignal(t *testing.T) {
- conn := dial(exitWithoutSignalOrStatus, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("Unable to request new session: %v", err)
- }
- defer session.Close()
- if err := session.Shell(); err != nil {
- t.Fatalf("Unable to execute command: %v", err)
- }
- err = session.Wait()
- if err == nil {
- t.Fatalf("expected command to fail but it didn't")
- }
- if _, ok := err.(*ExitMissingError); !ok {
- t.Fatalf("got %T want *ExitMissingError", err)
- }
-}
-
-// windowTestBytes is the number of bytes that we'll send to the SSH server.
-const windowTestBytes = 16000 * 200
-
-// TestServerWindow writes random data to the server. The server is expected to echo
-// the same data back, which is compared against the original.
-func TestServerWindow(t *testing.T) {
- origBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes))
- io.CopyN(origBuf, crypto_rand.Reader, windowTestBytes)
- origBytes := origBuf.Bytes()
-
- conn := dial(echoHandler, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatal(err)
- }
- defer session.Close()
- result := make(chan []byte)
-
- go func() {
- defer close(result)
- echoedBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes))
- serverStdout, err := session.StdoutPipe()
- if err != nil {
- t.Errorf("StdoutPipe failed: %v", err)
- return
- }
- n, err := copyNRandomly("stdout", echoedBuf, serverStdout, windowTestBytes)
- if err != nil && err != io.EOF {
- t.Errorf("Read only %d bytes from server, expected %d: %v", n, windowTestBytes, err)
- }
- result <- echoedBuf.Bytes()
- }()
-
- serverStdin, err := session.StdinPipe()
- if err != nil {
- t.Fatalf("StdinPipe failed: %v", err)
- }
- written, err := copyNRandomly("stdin", serverStdin, origBuf, windowTestBytes)
- if err != nil {
- t.Fatalf("failed to copy origBuf to serverStdin: %v", err)
- }
- if written != windowTestBytes {
- t.Fatalf("Wrote only %d of %d bytes to server", written, windowTestBytes)
- }
-
- echoedBytes := <-result
-
- if !bytes.Equal(origBytes, echoedBytes) {
- t.Fatalf("Echoed buffer differed from original, orig %d, echoed %d", len(origBytes), len(echoedBytes))
- }
-}
-
-// Verify the client can handle a keepalive packet from the server.
-func TestClientHandlesKeepalives(t *testing.T) {
- conn := dial(channelKeepaliveSender, t)
- defer conn.Close()
- session, err := conn.NewSession()
- if err != nil {
- t.Fatal(err)
- }
- defer session.Close()
- if err := session.Shell(); err != nil {
- t.Fatalf("Unable to execute command: %v", err)
- }
- err = session.Wait()
- if err != nil {
- t.Fatalf("expected nil but got: %v", err)
- }
-}
-
-type exitStatusMsg struct {
- Status uint32
-}
-
-type exitSignalMsg struct {
- Signal string
- CoreDumped bool
- Errmsg string
- Lang string
-}
-
-func handleTerminalRequests(in <-chan *Request) {
- for req := range in {
- ok := false
- switch req.Type {
- case "shell":
- ok = true
- if len(req.Payload) > 0 {
- // We don't accept any commands, only the default shell.
- ok = false
- }
- case "env":
- ok = true
- }
- req.Reply(ok, nil)
- }
-}
-
-func newServerShell(ch Channel, in <-chan *Request, prompt string) *terminal.Terminal {
- term := terminal.NewTerminal(ch, prompt)
- go handleTerminalRequests(in)
- return term
-}
-
-func exitStatusZeroHandler(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- // this string is returned to stdout
- shell := newServerShell(ch, in, "> ")
- readLine(shell, t)
- sendStatus(0, ch, t)
-}
-
-func exitStatusNonZeroHandler(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- shell := newServerShell(ch, in, "> ")
- readLine(shell, t)
- sendStatus(15, ch, t)
-}
-
-func exitSignalAndStatusHandler(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- shell := newServerShell(ch, in, "> ")
- readLine(shell, t)
- sendStatus(15, ch, t)
- sendSignal("TERM", ch, t)
-}
-
-func exitSignalHandler(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- shell := newServerShell(ch, in, "> ")
- readLine(shell, t)
- sendSignal("TERM", ch, t)
-}
-
-func exitSignalUnknownHandler(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- shell := newServerShell(ch, in, "> ")
- readLine(shell, t)
- sendSignal("SYS", ch, t)
-}
-
-func exitWithoutSignalOrStatus(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- shell := newServerShell(ch, in, "> ")
- readLine(shell, t)
-}
-
-func shellHandler(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- // this string is returned to stdout
- shell := newServerShell(ch, in, "golang")
- readLine(shell, t)
- sendStatus(0, ch, t)
-}
-
-// Ignores the command, writes fixed strings to stderr and stdout.
-// Strings are "this-is-stdout." and "this-is-stderr.".
-func fixedOutputHandler(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- _, err := ch.Read(nil)
-
- req, ok := <-in
- if !ok {
- t.Fatalf("error: expected channel request, got: %#v", err)
- return
- }
-
- // ignore request, always send some text
- req.Reply(true, nil)
-
- _, err = io.WriteString(ch, "this-is-stdout.")
- if err != nil {
- t.Fatalf("error writing on server: %v", err)
- }
- _, err = io.WriteString(ch.Stderr(), "this-is-stderr.")
- if err != nil {
- t.Fatalf("error writing on server: %v", err)
- }
- sendStatus(0, ch, t)
-}
-
-func readLine(shell *terminal.Terminal, t *testing.T) {
- if _, err := shell.ReadLine(); err != nil && err != io.EOF {
- t.Errorf("unable to read line: %v", err)
- }
-}
-
-func sendStatus(status uint32, ch Channel, t *testing.T) {
- msg := exitStatusMsg{
- Status: status,
- }
- if _, err := ch.SendRequest("exit-status", false, Marshal(&msg)); err != nil {
- t.Errorf("unable to send status: %v", err)
- }
-}
-
-func sendSignal(signal string, ch Channel, t *testing.T) {
- sig := exitSignalMsg{
- Signal: signal,
- CoreDumped: false,
- Errmsg: "Process terminated",
- Lang: "en-GB-oed",
- }
- if _, err := ch.SendRequest("exit-signal", false, Marshal(&sig)); err != nil {
- t.Errorf("unable to send signal: %v", err)
- }
-}
-
-func discardHandler(ch Channel, t *testing.T) {
- defer ch.Close()
- io.Copy(ioutil.Discard, ch)
-}
-
-func echoHandler(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- if n, err := copyNRandomly("echohandler", ch, ch, windowTestBytes); err != nil {
- t.Errorf("short write, wrote %d, expected %d: %v ", n, windowTestBytes, err)
- }
-}
-
-// copyNRandomly copies n bytes from src to dst. It uses a variable, and random,
-// buffer size to exercise more code paths.
-func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) {
- var (
- buf = make([]byte, 32*1024)
- written int
- remaining = n
- )
- for remaining > 0 {
- l := rand.Intn(1 << 15)
- if remaining < l {
- l = remaining
- }
- nr, er := src.Read(buf[:l])
- nw, ew := dst.Write(buf[:nr])
- remaining -= nw
- written += nw
- if ew != nil {
- return written, ew
- }
- if nr != nw {
- return written, io.ErrShortWrite
- }
- if er != nil && er != io.EOF {
- return written, er
- }
- }
- return written, nil
-}
-
-func channelKeepaliveSender(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- shell := newServerShell(ch, in, "> ")
- readLine(shell, t)
- if _, err := ch.SendRequest("keepalive@openssh.com", true, nil); err != nil {
- t.Errorf("unable to send channel keepalive request: %v", err)
- }
- sendStatus(0, ch, t)
-}
-
-func TestClientWriteEOF(t *testing.T) {
- conn := dial(simpleEchoHandler, t)
- defer conn.Close()
-
- session, err := conn.NewSession()
- if err != nil {
- t.Fatal(err)
- }
- defer session.Close()
- stdin, err := session.StdinPipe()
- if err != nil {
- t.Fatalf("StdinPipe failed: %v", err)
- }
- stdout, err := session.StdoutPipe()
- if err != nil {
- t.Fatalf("StdoutPipe failed: %v", err)
- }
-
- data := []byte(`0000`)
- _, err = stdin.Write(data)
- if err != nil {
- t.Fatalf("Write failed: %v", err)
- }
- stdin.Close()
-
- res, err := ioutil.ReadAll(stdout)
- if err != nil {
- t.Fatalf("Read failed: %v", err)
- }
-
- if !bytes.Equal(data, res) {
- t.Fatalf("Read differed from write, wrote: %v, read: %v", data, res)
- }
-}
-
-func simpleEchoHandler(ch Channel, in <-chan *Request, t *testing.T) {
- defer ch.Close()
- data, err := ioutil.ReadAll(ch)
- if err != nil {
- t.Errorf("handler read error: %v", err)
- }
- _, err = ch.Write(data)
- if err != nil {
- t.Errorf("handler write error: %v", err)
- }
-}
-
-func TestSessionID(t *testing.T) {
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
-
- serverID := make(chan []byte, 1)
- clientID := make(chan []byte, 1)
-
- serverConf := &ServerConfig{
- NoClientAuth: true,
- }
- serverConf.AddHostKey(testSigners["ecdsa"])
- clientConf := &ClientConfig{
- User: "user",
- }
-
- go func() {
- conn, chans, reqs, err := NewServerConn(c1, serverConf)
- if err != nil {
- t.Fatalf("server handshake: %v", err)
- }
- serverID <- conn.SessionID()
- go DiscardRequests(reqs)
- for ch := range chans {
- ch.Reject(Prohibited, "")
- }
- }()
-
- go func() {
- conn, chans, reqs, err := NewClientConn(c2, "", clientConf)
- if err != nil {
- t.Fatalf("client handshake: %v", err)
- }
- clientID <- conn.SessionID()
- go DiscardRequests(reqs)
- for ch := range chans {
- ch.Reject(Prohibited, "")
- }
- }()
-
- s := <-serverID
- c := <-clientID
- if bytes.Compare(s, c) != 0 {
- t.Errorf("server session ID (%x) != client session ID (%x)", s, c)
- } else if len(s) == 0 {
- t.Errorf("client and server SessionID were empty.")
- }
-}
-
-type noReadConn struct {
- readSeen bool
- net.Conn
-}
-
-func (c *noReadConn) Close() error {
- return nil
-}
-
-func (c *noReadConn) Read(b []byte) (int, error) {
- c.readSeen = true
- return 0, errors.New("noReadConn error")
-}
-
-func TestInvalidServerConfiguration(t *testing.T) {
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
-
- serveConn := noReadConn{Conn: c1}
- serverConf := &ServerConfig{}
-
- NewServerConn(&serveConn, serverConf)
- if serveConn.readSeen {
- t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key")
- }
-
- serverConf.AddHostKey(testSigners["ecdsa"])
-
- NewServerConn(&serveConn, serverConf)
- if serveConn.readSeen {
- t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method")
- }
-}
-
-func TestHostKeyAlgorithms(t *testing.T) {
- serverConf := &ServerConfig{
- NoClientAuth: true,
- }
- serverConf.AddHostKey(testSigners["rsa"])
- serverConf.AddHostKey(testSigners["ecdsa"])
-
- connect := func(clientConf *ClientConfig, want string) {
- var alg string
- clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error {
- alg = key.Type()
- return nil
- }
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
-
- go NewServerConn(c1, serverConf)
- _, _, _, err = NewClientConn(c2, "", clientConf)
- if err != nil {
- t.Fatalf("NewClientConn: %v", err)
- }
- if alg != want {
- t.Errorf("selected key algorithm %s, want %s", alg, want)
- }
- }
-
- // By default, we get the preferred algorithm, which is ECDSA 256.
-
- clientConf := &ClientConfig{}
- connect(clientConf, KeyAlgoECDSA256)
-
- // Client asks for RSA explicitly.
- clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA}
- connect(clientConf, KeyAlgoRSA)
-
- c1, c2, err := netPipe()
- if err != nil {
- t.Fatalf("netPipe: %v", err)
- }
- defer c1.Close()
- defer c2.Close()
-
- go NewServerConn(c1, serverConf)
- clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"}
- _, _, _, err = NewClientConn(c2, "", clientConf)
- if err == nil {
- t.Fatal("succeeded connecting with unknown hostkey algorithm")
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go
deleted file mode 100644
index 6151241..0000000
--- a/vendor/golang.org/x/crypto/ssh/tcpip.go
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "errors"
- "fmt"
- "io"
- "math/rand"
- "net"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-// Listen requests the remote peer open a listening socket on
-// addr. Incoming connections will be available by calling Accept on
-// the returned net.Listener. The listener must be serviced, or the
-// SSH connection may hang.
-func (c *Client) Listen(n, addr string) (net.Listener, error) {
- laddr, err := net.ResolveTCPAddr(n, addr)
- if err != nil {
- return nil, err
- }
- return c.ListenTCP(laddr)
-}
-
-// Automatic port allocation is broken with OpenSSH before 6.0. See
-// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In
-// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0,
-// rather than the actual port number. This means you can never open
-// two different listeners with auto allocated ports. We work around
-// this by trying explicit ports until we succeed.
-
-const openSSHPrefix = "OpenSSH_"
-
-var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano()))
-
-// isBrokenOpenSSHVersion returns true if the given version string
-// specifies a version of OpenSSH that is known to have a bug in port
-// forwarding.
-func isBrokenOpenSSHVersion(versionStr string) bool {
- i := strings.Index(versionStr, openSSHPrefix)
- if i < 0 {
- return false
- }
- i += len(openSSHPrefix)
- j := i
- for ; j < len(versionStr); j++ {
- if versionStr[j] < '0' || versionStr[j] > '9' {
- break
- }
- }
- version, _ := strconv.Atoi(versionStr[i:j])
- return version < 6
-}
-
-// autoPortListenWorkaround simulates automatic port allocation by
-// trying random ports repeatedly.
-func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) {
- var sshListener net.Listener
- var err error
- const tries = 10
- for i := 0; i < tries; i++ {
- addr := *laddr
- addr.Port = 1024 + portRandomizer.Intn(60000)
- sshListener, err = c.ListenTCP(&addr)
- if err == nil {
- laddr.Port = addr.Port
- return sshListener, err
- }
- }
- return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err)
-}
-
-// RFC 4254 7.1
-type channelForwardMsg struct {
- addr string
- rport uint32
-}
-
-// ListenTCP requests the remote peer open a listening socket
-// on laddr. Incoming connections will be available by calling
-// Accept on the returned net.Listener.
-func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
- if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
- return c.autoPortListenWorkaround(laddr)
- }
-
- m := channelForwardMsg{
- laddr.IP.String(),
- uint32(laddr.Port),
- }
- // send message
- ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
- if err != nil {
- return nil, err
- }
- if !ok {
- return nil, errors.New("ssh: tcpip-forward request denied by peer")
- }
-
- // If the original port was 0, then the remote side will
- // supply a real port number in the response.
- if laddr.Port == 0 {
- var p struct {
- Port uint32
- }
- if err := Unmarshal(resp, &p); err != nil {
- return nil, err
- }
- laddr.Port = int(p.Port)
- }
-
- // Register this forward, using the port number we obtained.
- ch := c.forwards.add(*laddr)
-
- return &tcpListener{laddr, c, ch}, nil
-}
-
-// forwardList stores a mapping between remote
-// forward requests and the tcpListeners.
-type forwardList struct {
- sync.Mutex
- entries []forwardEntry
-}
-
-// forwardEntry represents an established mapping of a laddr on a
-// remote ssh server to a channel connected to a tcpListener.
-type forwardEntry struct {
- laddr net.TCPAddr
- c chan forward
-}
-
-// forward represents an incoming forwarded tcpip connection. The
-// arguments to add/remove/lookup should be address as specified in
-// the original forward-request.
-type forward struct {
- newCh NewChannel // the ssh client channel underlying this forward
- raddr *net.TCPAddr // the raddr of the incoming connection
-}
-
-func (l *forwardList) add(addr net.TCPAddr) chan forward {
- l.Lock()
- defer l.Unlock()
- f := forwardEntry{
- addr,
- make(chan forward, 1),
- }
- l.entries = append(l.entries, f)
- return f.c
-}
-
-// See RFC 4254, section 7.2
-type forwardedTCPPayload struct {
- Addr string
- Port uint32
- OriginAddr string
- OriginPort uint32
-}
-
-// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr.
-func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
- if port == 0 || port > 65535 {
- return nil, fmt.Errorf("ssh: port number out of range: %d", port)
- }
- ip := net.ParseIP(string(addr))
- if ip == nil {
- return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
- }
- return &net.TCPAddr{IP: ip, Port: int(port)}, nil
-}
-
-func (l *forwardList) handleChannels(in <-chan NewChannel) {
- for ch := range in {
- var payload forwardedTCPPayload
- if err := Unmarshal(ch.ExtraData(), &payload); err != nil {
- ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error())
- continue
- }
-
- // RFC 4254 section 7.2 specifies that incoming
- // addresses should list the address, in string
- // format. It is implied that this should be an IP
- // address, as it would be impossible to connect to it
- // otherwise.
- laddr, err := parseTCPAddr(payload.Addr, payload.Port)
- if err != nil {
- ch.Reject(ConnectionFailed, err.Error())
- continue
- }
- raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort)
- if err != nil {
- ch.Reject(ConnectionFailed, err.Error())
- continue
- }
-
- if ok := l.forward(*laddr, *raddr, ch); !ok {
- // Section 7.2, implementations MUST reject spurious incoming
- // connections.
- ch.Reject(Prohibited, "no forward for address")
- continue
- }
- }
-}
-
-// remove removes the forward entry, and the channel feeding its
-// listener.
-func (l *forwardList) remove(addr net.TCPAddr) {
- l.Lock()
- defer l.Unlock()
- for i, f := range l.entries {
- if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port {
- l.entries = append(l.entries[:i], l.entries[i+1:]...)
- close(f.c)
- return
- }
- }
-}
-
-// closeAll closes and clears all forwards.
-func (l *forwardList) closeAll() {
- l.Lock()
- defer l.Unlock()
- for _, f := range l.entries {
- close(f.c)
- }
- l.entries = nil
-}
-
-func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool {
- l.Lock()
- defer l.Unlock()
- for _, f := range l.entries {
- if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port {
- f.c <- forward{ch, &raddr}
- return true
- }
- }
- return false
-}
-
-type tcpListener struct {
- laddr *net.TCPAddr
-
- conn *Client
- in <-chan forward
-}
-
-// Accept waits for and returns the next connection to the listener.
-func (l *tcpListener) Accept() (net.Conn, error) {
- s, ok := <-l.in
- if !ok {
- return nil, io.EOF
- }
- ch, incoming, err := s.newCh.Accept()
- if err != nil {
- return nil, err
- }
- go DiscardRequests(incoming)
-
- return &tcpChanConn{
- Channel: ch,
- laddr: l.laddr,
- raddr: s.raddr,
- }, nil
-}
-
-// Close closes the listener.
-func (l *tcpListener) Close() error {
- m := channelForwardMsg{
- l.laddr.IP.String(),
- uint32(l.laddr.Port),
- }
-
- // this also closes the listener.
- l.conn.forwards.remove(*l.laddr)
- ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
- if err == nil && !ok {
- err = errors.New("ssh: cancel-tcpip-forward failed")
- }
- return err
-}
-
-// Addr returns the listener's network address.
-func (l *tcpListener) Addr() net.Addr {
- return l.laddr
-}
-
-// Dial initiates a connection to the addr from the remote host.
-// The resulting connection has a zero LocalAddr() and RemoteAddr().
-func (c *Client) Dial(n, addr string) (net.Conn, error) {
- // Parse the address into host and numeric port.
- host, portString, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
- port, err := strconv.ParseUint(portString, 10, 16)
- if err != nil {
- return nil, err
- }
- // Use a zero address for local and remote address.
- zeroAddr := &net.TCPAddr{
- IP: net.IPv4zero,
- Port: 0,
- }
- ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port))
- if err != nil {
- return nil, err
- }
- return &tcpChanConn{
- Channel: ch,
- laddr: zeroAddr,
- raddr: zeroAddr,
- }, nil
-}
-
-// DialTCP connects to the remote address raddr on the network net,
-// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
-// as the local address for the connection.
-func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) {
- if laddr == nil {
- laddr = &net.TCPAddr{
- IP: net.IPv4zero,
- Port: 0,
- }
- }
- ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port)
- if err != nil {
- return nil, err
- }
- return &tcpChanConn{
- Channel: ch,
- laddr: laddr,
- raddr: raddr,
- }, nil
-}
-
-// RFC 4254 7.2
-type channelOpenDirectMsg struct {
- raddr string
- rport uint32
- laddr string
- lport uint32
-}
-
-func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) {
- msg := channelOpenDirectMsg{
- raddr: raddr,
- rport: uint32(rport),
- laddr: laddr,
- lport: uint32(lport),
- }
- ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg))
- if err != nil {
- return nil, err
- }
- go DiscardRequests(in)
- return ch, err
-}
-
-type tcpChan struct {
- Channel // the backing channel
-}
-
-// tcpChanConn fulfills the net.Conn interface without
-// the tcpChan having to hold laddr or raddr directly.
-type tcpChanConn struct {
- Channel
- laddr, raddr net.Addr
-}
-
-// LocalAddr returns the local network address.
-func (t *tcpChanConn) LocalAddr() net.Addr {
- return t.laddr
-}
-
-// RemoteAddr returns the remote network address.
-func (t *tcpChanConn) RemoteAddr() net.Addr {
- return t.raddr
-}
-
-// SetDeadline sets the read and write deadlines associated
-// with the connection.
-func (t *tcpChanConn) SetDeadline(deadline time.Time) error {
- if err := t.SetReadDeadline(deadline); err != nil {
- return err
- }
- return t.SetWriteDeadline(deadline)
-}
-
-// SetReadDeadline sets the read deadline.
-// A zero value for t means Read will not time out.
-// After the deadline, the error from Read will implement net.Error
-// with Timeout() == true.
-func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error {
- return errors.New("ssh: tcpChan: deadline not supported")
-}
-
-// SetWriteDeadline exists to satisfy the net.Conn interface
-// but is not implemented by this type. It always returns an error.
-func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error {
- return errors.New("ssh: tcpChan: deadline not supported")
-}
diff --git a/vendor/golang.org/x/crypto/ssh/tcpip_test.go b/vendor/golang.org/x/crypto/ssh/tcpip_test.go
deleted file mode 100644
index f1265cb..0000000
--- a/vendor/golang.org/x/crypto/ssh/tcpip_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "testing"
-)
-
-func TestAutoPortListenBroken(t *testing.T) {
- broken := "SSH-2.0-OpenSSH_5.9hh11"
- works := "SSH-2.0-OpenSSH_6.1"
- if !isBrokenOpenSSHVersion(broken) {
- t.Errorf("version %q not marked as broken", broken)
- }
- if isBrokenOpenSSHVersion(works) {
- t.Errorf("version %q marked as broken", works)
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
deleted file mode 100644
index 1d54c4f..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package terminal
-
-import (
- "bytes"
- "io"
- "os"
- "testing"
-)
-
-type MockTerminal struct {
- toSend []byte
- bytesPerRead int
- received []byte
-}
-
-func (c *MockTerminal) Read(data []byte) (n int, err error) {
- n = len(data)
- if n == 0 {
- return
- }
- if n > len(c.toSend) {
- n = len(c.toSend)
- }
- if n == 0 {
- return 0, io.EOF
- }
- if c.bytesPerRead > 0 && n > c.bytesPerRead {
- n = c.bytesPerRead
- }
- copy(data, c.toSend[:n])
- c.toSend = c.toSend[n:]
- return
-}
-
-func (c *MockTerminal) Write(data []byte) (n int, err error) {
- c.received = append(c.received, data...)
- return len(data), nil
-}
-
-func TestClose(t *testing.T) {
- c := &MockTerminal{}
- ss := NewTerminal(c, "> ")
- line, err := ss.ReadLine()
- if line != "" {
- t.Errorf("Expected empty line but got: %s", line)
- }
- if err != io.EOF {
- t.Errorf("Error should have been EOF but got: %s", err)
- }
-}
-
-var keyPressTests = []struct {
- in string
- line string
- err error
- throwAwayLines int
-}{
- {
- err: io.EOF,
- },
- {
- in: "\r",
- line: "",
- },
- {
- in: "foo\r",
- line: "foo",
- },
- {
- in: "a\x1b[Cb\r", // right
- line: "ab",
- },
- {
- in: "a\x1b[Db\r", // left
- line: "ba",
- },
- {
- in: "a\177b\r", // backspace
- line: "b",
- },
- {
- in: "\x1b[A\r", // up
- },
- {
- in: "\x1b[B\r", // down
- },
- {
- in: "line\x1b[A\x1b[B\r", // up then down
- line: "line",
- },
- {
- in: "line1\rline2\x1b[A\r", // recall previous line.
- line: "line1",
- throwAwayLines: 1,
- },
- {
- // recall two previous lines and append.
- in: "line1\rline2\rline3\x1b[A\x1b[Axxx\r",
- line: "line1xxx",
- throwAwayLines: 2,
- },
- {
- // Ctrl-A to move to beginning of line followed by ^K to kill
- // line.
- in: "a b \001\013\r",
- line: "",
- },
- {
- // Ctrl-A to move to beginning of line, Ctrl-E to move to end,
- // finally ^K to kill nothing.
- in: "a b \001\005\013\r",
- line: "a b ",
- },
- {
- in: "\027\r",
- line: "",
- },
- {
- in: "a\027\r",
- line: "",
- },
- {
- in: "a \027\r",
- line: "",
- },
- {
- in: "a b\027\r",
- line: "a ",
- },
- {
- in: "a b \027\r",
- line: "a ",
- },
- {
- in: "one two thr\x1b[D\027\r",
- line: "one two r",
- },
- {
- in: "\013\r",
- line: "",
- },
- {
- in: "a\013\r",
- line: "a",
- },
- {
- in: "ab\x1b[D\013\r",
- line: "a",
- },
- {
- in: "Ξεσκεπάζω\r",
- line: "Ξεσκεπάζω",
- },
- {
- in: "£\r\x1b[A\177\r", // non-ASCII char, enter, up, backspace.
- line: "",
- throwAwayLines: 1,
- },
- {
- in: "£\r££\x1b[A\x1b[B\177\r", // non-ASCII char, enter, 2x non-ASCII, up, down, backspace, enter.
- line: "£",
- throwAwayLines: 1,
- },
- {
- // Ctrl-D at the end of the line should be ignored.
- in: "a\004\r",
- line: "a",
- },
- {
- // a, b, left, Ctrl-D should erase the b.
- in: "ab\x1b[D\004\r",
- line: "a",
- },
- {
- // a, b, c, d, left, left, ^U should erase to the beginning of
- // the line.
- in: "abcd\x1b[D\x1b[D\025\r",
- line: "cd",
- },
- {
- // Bracketed paste mode: control sequences should be returned
- // verbatim in paste mode.
- in: "abc\x1b[200~de\177f\x1b[201~\177\r",
- line: "abcde\177",
- },
- {
- // Enter in bracketed paste mode should still work.
- in: "abc\x1b[200~d\refg\x1b[201~h\r",
- line: "efgh",
- throwAwayLines: 1,
- },
- {
- // Lines consisting entirely of pasted data should be indicated as such.
- in: "\x1b[200~a\r",
- line: "a",
- err: ErrPasteIndicator,
- },
-}
-
-func TestKeyPresses(t *testing.T) {
- for i, test := range keyPressTests {
- for j := 1; j < len(test.in); j++ {
- c := &MockTerminal{
- toSend: []byte(test.in),
- bytesPerRead: j,
- }
- ss := NewTerminal(c, "> ")
- for k := 0; k < test.throwAwayLines; k++ {
- _, err := ss.ReadLine()
- if err != nil {
- t.Errorf("Throwaway line %d from test %d resulted in error: %s", k, i, err)
- }
- }
- line, err := ss.ReadLine()
- if line != test.line {
- t.Errorf("Line resulting from test %d (%d bytes per read) was '%s', expected '%s'", i, j, line, test.line)
- break
- }
- if err != test.err {
- t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err)
- break
- }
- }
- }
-}
-
-func TestPasswordNotSaved(t *testing.T) {
- c := &MockTerminal{
- toSend: []byte("password\r\x1b[A\r"),
- bytesPerRead: 1,
- }
- ss := NewTerminal(c, "> ")
- pw, _ := ss.ReadPassword("> ")
- if pw != "password" {
- t.Fatalf("failed to read password, got %s", pw)
- }
- line, _ := ss.ReadLine()
- if len(line) > 0 {
- t.Fatalf("password was saved in history")
- }
-}
-
-var setSizeTests = []struct {
- width, height int
-}{
- {40, 13},
- {80, 24},
- {132, 43},
-}
-
-func TestTerminalSetSize(t *testing.T) {
- for _, setSize := range setSizeTests {
- c := &MockTerminal{
- toSend: []byte("password\r\x1b[A\r"),
- bytesPerRead: 1,
- }
- ss := NewTerminal(c, "> ")
- ss.SetSize(setSize.width, setSize.height)
- pw, _ := ss.ReadPassword("Password: ")
- if pw != "password" {
- t.Fatalf("failed to read password, got %s", pw)
- }
- if string(c.received) != "Password: \r\n" {
- t.Errorf("failed to set the temporary prompt expected %q, got %q", "Password: ", c.received)
- }
- }
-}
-
-func TestMakeRawState(t *testing.T) {
- fd := int(os.Stdout.Fd())
- if !IsTerminal(fd) {
- t.Skip("stdout is not a terminal; skipping test")
- }
-
- st, err := GetState(fd)
- if err != nil {
- t.Fatalf("failed to get terminal state from GetState: %s", err)
- }
- defer Restore(fd, st)
- raw, err := MakeRaw(fd)
- if err != nil {
- t.Fatalf("failed to get terminal state from MakeRaw: %s", err)
- }
-
- if *st != *raw {
- t.Errorf("states do not match; was %v, expected %v", raw, st)
- }
-}
-
-func TestOutputNewlines(t *testing.T) {
- // \n should be changed to \r\n in terminal output.
- buf := new(bytes.Buffer)
- term := NewTerminal(buf, ">")
-
- term.Write([]byte("1\n2\n"))
- output := string(buf.Bytes())
- const expected = "1\r\n2\r\n"
-
- if output != expected {
- t.Errorf("incorrect output: was %q, expected %q", output, expected)
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go
deleted file mode 100644
index c869213..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
-
-// Package terminal provides support functions for dealing with terminals, as
-// commonly found on UNIX systems.
-//
-// Putting a terminal into raw mode is the most common requirement:
-//
-// oldState, err := terminal.MakeRaw(0)
-// if err != nil {
-// panic(err)
-// }
-// defer terminal.Restore(0, oldState)
-package terminal // import "golang.org/x/crypto/ssh/terminal"
-
-import (
- "io"
- "syscall"
- "unsafe"
-)
-
-// State contains the state of a terminal.
-type State struct {
- termios syscall.Termios
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd int) (*State, error) {
- var oldState State
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- newState := oldState.termios
- // This attempts to replicate the behaviour documented for cfmakeraw in
- // the termios(3) manpage.
- newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
- newState.Oflag &^= syscall.OPOST
- newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
- newState.Cflag &^= syscall.CSIZE | syscall.PARENB
- newState.Cflag |= syscall.CS8
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- return &oldState, nil
-}
-
-// GetState returns the current state of a terminal which may be useful to
-// restore the terminal after a signal.
-func GetState(fd int) (*State, error) {
- var oldState State
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- return &oldState, nil
-}
-
-// Restore restores the terminal connected to the given file descriptor to a
-// previous state.
-func Restore(fd int, state *State) error {
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
- return err
-}
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (width, height int, err error) {
- var dimensions [4]uint16
-
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
- return -1, -1, err
- }
- return int(dimensions[1]), int(dimensions[0]), nil
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- var oldState syscall.Termios
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- newState := oldState
- newState.Lflag &^= syscall.ECHO
- newState.Lflag |= syscall.ICANON | syscall.ISIG
- newState.Iflag |= syscall.ICRNL
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- defer func() {
- syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
- }()
-
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(fd, buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
deleted file mode 100644
index 5883b22..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package terminal
-
-// These constants are declared here, rather than importing
-// them from the syscall package as some syscall packages, even
-// on linux, for example gccgo, do not declare them.
-const ioctlReadTermios = 0x5401 // syscall.TCGETS
-const ioctlWriteTermios = 0x5402 // syscall.TCSETS
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
deleted file mode 100644
index 799f049..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package terminal provides support functions for dealing with terminals, as
-// commonly found on UNIX systems.
-//
-// Putting a terminal into raw mode is the most common requirement:
-//
-// oldState, err := terminal.MakeRaw(0)
-// if err != nil {
-// panic(err)
-// }
-// defer terminal.Restore(0, oldState)
-package terminal
-
-import (
- "fmt"
- "runtime"
-)
-
-type State struct{}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- return false
-}
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd int) (*State, error) {
- return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
-}
-
-// GetState returns the current state of a terminal which may be useful to
-// restore the terminal after a signal.
-func GetState(fd int) (*State, error) {
- return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
-}
-
-// Restore restores the terminal connected to the given file descriptor to a
-// previous state.
-func Restore(fd int, state *State) error {
- return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
-}
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (width, height int, err error) {
- return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
deleted file mode 100644
index 07eb5ed..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build solaris
-
-package terminal // import "golang.org/x/crypto/ssh/terminal"
-
-import (
- "golang.org/x/sys/unix"
- "io"
- "syscall"
-)
-
-// State contains the state of a terminal.
-type State struct {
- termios syscall.Termios
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- // see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
- var termio unix.Termio
- err := unix.IoctlSetTermio(fd, unix.TCGETA, &termio)
- return err == nil
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
- val, err := unix.IoctlGetTermios(fd, unix.TCGETS)
- if err != nil {
- return nil, err
- }
- oldState := *val
-
- newState := oldState
- newState.Lflag &^= syscall.ECHO
- newState.Lflag |= syscall.ICANON | syscall.ISIG
- newState.Iflag |= syscall.ICRNL
- err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState)
- if err != nil {
- return nil, err
- }
-
- defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState)
-
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(fd, buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
deleted file mode 100644
index ae9fa9e..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-// Package terminal provides support functions for dealing with terminals, as
-// commonly found on UNIX systems.
-//
-// Putting a terminal into raw mode is the most common requirement:
-//
-// oldState, err := terminal.MakeRaw(0)
-// if err != nil {
-// panic(err)
-// }
-// defer terminal.Restore(0, oldState)
-package terminal
-
-import (
- "io"
- "syscall"
- "unsafe"
-)
-
-const (
- enableLineInput = 2
- enableEchoInput = 4
- enableProcessedInput = 1
- enableWindowInput = 8
- enableMouseInput = 16
- enableInsertMode = 32
- enableQuickEditMode = 64
- enableExtendedFlags = 128
- enableAutoPosition = 256
- enableProcessedOutput = 1
- enableWrapAtEolOutput = 2
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
-)
-
-type (
- short int16
- word uint16
-
- coord struct {
- x short
- y short
- }
- smallRect struct {
- left short
- top short
- right short
- bottom short
- }
- consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes word
- window smallRect
- maximumWindowSize coord
- }
-)
-
-type State struct {
- mode uint32
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd int) (*State, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0)
- if e != 0 {
- return nil, error(e)
- }
- return &State{st}, nil
-}
-
-// GetState returns the current state of a terminal which may be useful to
-// restore the terminal after a signal.
-func GetState(fd int) (*State, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- return &State{st}, nil
-}
-
-// Restore restores the terminal connected to the given file descriptor to a
-// previous state.
-func Restore(fd int, state *State) error {
- _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
- return err
-}
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (width, height int, err error) {
- var info consoleScreenBufferInfo
- _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
- if e != 0 {
- return 0, 0, error(e)
- }
- return int(info.size.x), int(info.size.y), nil
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- old := st
-
- st &^= (enableEchoInput)
- st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
- if e != 0 {
- return nil, error(e)
- }
-
- defer func() {
- syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
- }()
-
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(syscall.Handle(fd), buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- if n > 0 && buf[n-1] == '\r' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go
deleted file mode 100644
index f481253..0000000
--- a/vendor/golang.org/x/crypto/ssh/test/agent_unix_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd
-
-package test
-
-import (
- "bytes"
- "testing"
-
- "golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/agent"
-)
-
-func TestAgentForward(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- keyring := agent.NewKeyring()
- if err := keyring.Add(agent.AddedKey{PrivateKey: testPrivateKeys["dsa"]}); err != nil {
- t.Fatalf("Error adding key: %s", err)
- }
- if err := keyring.Add(agent.AddedKey{
- PrivateKey: testPrivateKeys["dsa"],
- ConfirmBeforeUse: true,
- LifetimeSecs: 3600,
- }); err != nil {
- t.Fatalf("Error adding key with constraints: %s", err)
- }
- pub := testPublicKeys["dsa"]
-
- sess, err := conn.NewSession()
- if err != nil {
- t.Fatalf("NewSession: %v", err)
- }
- if err := agent.RequestAgentForwarding(sess); err != nil {
- t.Fatalf("RequestAgentForwarding: %v", err)
- }
-
- if err := agent.ForwardToAgent(conn, keyring); err != nil {
- t.Fatalf("SetupForwardKeyring: %v", err)
- }
- out, err := sess.CombinedOutput("ssh-add -L")
- if err != nil {
- t.Fatalf("running ssh-add: %v, out %s", err, out)
- }
- key, _, _, _, err := ssh.ParseAuthorizedKey(out)
- if err != nil {
- t.Fatalf("ParseAuthorizedKey(%q): %v", out, err)
- }
-
- if !bytes.Equal(key.Marshal(), pub.Marshal()) {
- t.Fatalf("got key %s, want %s", ssh.MarshalAuthorizedKey(key), ssh.MarshalAuthorizedKey(pub))
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/test/cert_test.go b/vendor/golang.org/x/crypto/ssh/test/cert_test.go
deleted file mode 100644
index 364790f..0000000
--- a/vendor/golang.org/x/crypto/ssh/test/cert_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd
-
-package test
-
-import (
- "crypto/rand"
- "testing"
-
- "golang.org/x/crypto/ssh"
-)
-
-func TestCertLogin(t *testing.T) {
- s := newServer(t)
- defer s.Shutdown()
-
- // Use a key different from the default.
- clientKey := testSigners["dsa"]
- caAuthKey := testSigners["ecdsa"]
- cert := &ssh.Certificate{
- Key: clientKey.PublicKey(),
- ValidPrincipals: []string{username()},
- CertType: ssh.UserCert,
- ValidBefore: ssh.CertTimeInfinity,
- }
- if err := cert.SignCert(rand.Reader, caAuthKey); err != nil {
- t.Fatalf("SetSignature: %v", err)
- }
-
- certSigner, err := ssh.NewCertSigner(cert, clientKey)
- if err != nil {
- t.Fatalf("NewCertSigner: %v", err)
- }
-
- conf := &ssh.ClientConfig{
- User: username(),
- }
- conf.Auth = append(conf.Auth, ssh.PublicKeys(certSigner))
- client, err := s.TryDial(conf)
- if err != nil {
- t.Fatalf("TryDial: %v", err)
- }
- client.Close()
-}
diff --git a/vendor/golang.org/x/crypto/ssh/test/doc.go b/vendor/golang.org/x/crypto/ssh/test/doc.go
deleted file mode 100644
index 3f9b334..0000000
--- a/vendor/golang.org/x/crypto/ssh/test/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This package contains integration tests for the
-// golang.org/x/crypto/ssh package.
-package test // import "golang.org/x/crypto/ssh/test"
diff --git a/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go
deleted file mode 100644
index 877a88c..0000000
--- a/vendor/golang.org/x/crypto/ssh/test/forward_unix_test.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd
-
-package test
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "math/rand"
- "net"
- "testing"
- "time"
-)
-
-func TestPortForward(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- sshListener, err := conn.Listen("tcp", "localhost:0")
- if err != nil {
- t.Fatal(err)
- }
-
- go func() {
- sshConn, err := sshListener.Accept()
- if err != nil {
- t.Fatalf("listen.Accept failed: %v", err)
- }
-
- _, err = io.Copy(sshConn, sshConn)
- if err != nil && err != io.EOF {
- t.Fatalf("ssh client copy: %v", err)
- }
- sshConn.Close()
- }()
-
- forwardedAddr := sshListener.Addr().String()
- tcpConn, err := net.Dial("tcp", forwardedAddr)
- if err != nil {
- t.Fatalf("TCP dial failed: %v", err)
- }
-
- readChan := make(chan []byte)
- go func() {
- data, _ := ioutil.ReadAll(tcpConn)
- readChan <- data
- }()
-
- // Invent some data.
- data := make([]byte, 100*1000)
- for i := range data {
- data[i] = byte(i % 255)
- }
-
- var sent []byte
- for len(sent) < 1000*1000 {
- // Send random sized chunks
- m := rand.Intn(len(data))
- n, err := tcpConn.Write(data[:m])
- if err != nil {
- break
- }
- sent = append(sent, data[:n]...)
- }
- if err := tcpConn.(*net.TCPConn).CloseWrite(); err != nil {
- t.Errorf("tcpConn.CloseWrite: %v", err)
- }
-
- read := <-readChan
-
- if len(sent) != len(read) {
- t.Fatalf("got %d bytes, want %d", len(read), len(sent))
- }
- if bytes.Compare(sent, read) != 0 {
- t.Fatalf("read back data does not match")
- }
-
- if err := sshListener.Close(); err != nil {
- t.Fatalf("sshListener.Close: %v", err)
- }
-
- // Check that the forward disappeared.
- tcpConn, err = net.Dial("tcp", forwardedAddr)
- if err == nil {
- tcpConn.Close()
- t.Errorf("still listening to %s after closing", forwardedAddr)
- }
-}
-
-func TestAcceptClose(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
-
- sshListener, err := conn.Listen("tcp", "localhost:0")
- if err != nil {
- t.Fatal(err)
- }
-
- quit := make(chan error, 1)
- go func() {
- for {
- c, err := sshListener.Accept()
- if err != nil {
- quit <- err
- break
- }
- c.Close()
- }
- }()
- sshListener.Close()
-
- select {
- case <-time.After(1 * time.Second):
- t.Errorf("timeout: listener did not close.")
- case err := <-quit:
- t.Logf("quit as expected (error %v)", err)
- }
-}
-
-// Check that listeners exit if the underlying client transport dies.
-func TestPortForwardConnectionClose(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
-
- sshListener, err := conn.Listen("tcp", "localhost:0")
- if err != nil {
- t.Fatal(err)
- }
-
- quit := make(chan error, 1)
- go func() {
- for {
- c, err := sshListener.Accept()
- if err != nil {
- quit <- err
- break
- }
- c.Close()
- }
- }()
-
- // It would be even nicer if we closed the server side, but it
- // is more involved as the fd for that side is dup()ed.
- server.clientConn.Close()
-
- select {
- case <-time.After(1 * time.Second):
- t.Errorf("timeout: listener did not close.")
- case err := <-quit:
- t.Logf("quit as expected (error %v)", err)
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/test/session_test.go b/vendor/golang.org/x/crypto/ssh/test/session_test.go
deleted file mode 100644
index fc7e471..0000000
--- a/vendor/golang.org/x/crypto/ssh/test/session_test.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !windows
-
-package test
-
-// Session functional tests.
-
-import (
- "bytes"
- "errors"
- "io"
- "strings"
- "testing"
-
- "golang.org/x/crypto/ssh"
-)
-
-func TestRunCommandSuccess(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("session failed: %v", err)
- }
- defer session.Close()
- err = session.Run("true")
- if err != nil {
- t.Fatalf("session failed: %v", err)
- }
-}
-
-func TestHostKeyCheck(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
-
- conf := clientConfig()
- hostDB := hostKeyDB()
- conf.HostKeyCallback = hostDB.Check
-
- // change the keys.
- hostDB.keys[ssh.KeyAlgoRSA][25]++
- hostDB.keys[ssh.KeyAlgoDSA][25]++
- hostDB.keys[ssh.KeyAlgoECDSA256][25]++
-
- conn, err := server.TryDial(conf)
- if err == nil {
- conn.Close()
- t.Fatalf("dial should have failed.")
- } else if !strings.Contains(err.Error(), "host key mismatch") {
- t.Fatalf("'host key mismatch' not found in %v", err)
- }
-}
-
-func TestRunCommandStdin(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("session failed: %v", err)
- }
- defer session.Close()
-
- r, w := io.Pipe()
- defer r.Close()
- defer w.Close()
- session.Stdin = r
-
- err = session.Run("true")
- if err != nil {
- t.Fatalf("session failed: %v", err)
- }
-}
-
-func TestRunCommandStdinError(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("session failed: %v", err)
- }
- defer session.Close()
-
- r, w := io.Pipe()
- defer r.Close()
- session.Stdin = r
- pipeErr := errors.New("closing write end of pipe")
- w.CloseWithError(pipeErr)
-
- err = session.Run("true")
- if err != pipeErr {
- t.Fatalf("expected %v, found %v", pipeErr, err)
- }
-}
-
-func TestRunCommandFailed(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("session failed: %v", err)
- }
- defer session.Close()
- err = session.Run(`bash -c "kill -9 $$"`)
- if err == nil {
- t.Fatalf("session succeeded: %v", err)
- }
-}
-
-func TestRunCommandWeClosed(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("session failed: %v", err)
- }
- err = session.Shell()
- if err != nil {
- t.Fatalf("shell failed: %v", err)
- }
- err = session.Close()
- if err != nil {
- t.Fatalf("shell failed: %v", err)
- }
-}
-
-func TestFuncLargeRead(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("unable to create new session: %s", err)
- }
-
- stdout, err := session.StdoutPipe()
- if err != nil {
- t.Fatalf("unable to acquire stdout pipe: %s", err)
- }
-
- err = session.Start("dd if=/dev/urandom bs=2048 count=1024")
- if err != nil {
- t.Fatalf("unable to execute remote command: %s", err)
- }
-
- buf := new(bytes.Buffer)
- n, err := io.Copy(buf, stdout)
- if err != nil {
- t.Fatalf("error reading from remote stdout: %s", err)
- }
-
- if n != 2048*1024 {
- t.Fatalf("Expected %d bytes but read only %d from remote command", 2048, n)
- }
-}
-
-func TestKeyChange(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conf := clientConfig()
- hostDB := hostKeyDB()
- conf.HostKeyCallback = hostDB.Check
- conf.RekeyThreshold = 1024
- conn := server.Dial(conf)
- defer conn.Close()
-
- for i := 0; i < 4; i++ {
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("unable to create new session: %s", err)
- }
-
- stdout, err := session.StdoutPipe()
- if err != nil {
- t.Fatalf("unable to acquire stdout pipe: %s", err)
- }
-
- err = session.Start("dd if=/dev/urandom bs=1024 count=1")
- if err != nil {
- t.Fatalf("unable to execute remote command: %s", err)
- }
- buf := new(bytes.Buffer)
- n, err := io.Copy(buf, stdout)
- if err != nil {
- t.Fatalf("error reading from remote stdout: %s", err)
- }
-
- want := int64(1024)
- if n != want {
- t.Fatalf("Expected %d bytes but read only %d from remote command", want, n)
- }
- }
-
- if changes := hostDB.checkCount; changes < 4 {
- t.Errorf("got %d key changes, want 4", changes)
- }
-}
-
-func TestInvalidTerminalMode(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("session failed: %v", err)
- }
- defer session.Close()
-
- if err = session.RequestPty("vt100", 80, 40, ssh.TerminalModes{255: 1984}); err == nil {
- t.Fatalf("req-pty failed: successful request with invalid mode")
- }
-}
-
-func TestValidTerminalMode(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- conn := server.Dial(clientConfig())
- defer conn.Close()
-
- session, err := conn.NewSession()
- if err != nil {
- t.Fatalf("session failed: %v", err)
- }
- defer session.Close()
-
- stdout, err := session.StdoutPipe()
- if err != nil {
- t.Fatalf("unable to acquire stdout pipe: %s", err)
- }
-
- stdin, err := session.StdinPipe()
- if err != nil {
- t.Fatalf("unable to acquire stdin pipe: %s", err)
- }
-
- tm := ssh.TerminalModes{ssh.ECHO: 0}
- if err = session.RequestPty("xterm", 80, 40, tm); err != nil {
- t.Fatalf("req-pty failed: %s", err)
- }
-
- err = session.Shell()
- if err != nil {
- t.Fatalf("session failed: %s", err)
- }
-
- stdin.Write([]byte("stty -a && exit\n"))
-
- var buf bytes.Buffer
- if _, err := io.Copy(&buf, stdout); err != nil {
- t.Fatalf("reading failed: %s", err)
- }
-
- if sttyOutput := buf.String(); !strings.Contains(sttyOutput, "-echo ") {
- t.Fatalf("terminal mode failure: expected -echo in stty output, got %s", sttyOutput)
- }
-}
-
-func TestCiphers(t *testing.T) {
- var config ssh.Config
- config.SetDefaults()
- cipherOrder := config.Ciphers
- // These ciphers will not be tested when commented out in cipher.go it will
- // fallback to the next available as per line 292.
- cipherOrder = append(cipherOrder, "aes128-cbc", "3des-cbc")
-
- for _, ciph := range cipherOrder {
- server := newServer(t)
- defer server.Shutdown()
- conf := clientConfig()
- conf.Ciphers = []string{ciph}
- // Don't fail if sshd doesn't have the cipher.
- conf.Ciphers = append(conf.Ciphers, cipherOrder...)
- conn, err := server.TryDial(conf)
- if err == nil {
- conn.Close()
- } else {
- t.Fatalf("failed for cipher %q", ciph)
- }
- }
-}
-
-func TestMACs(t *testing.T) {
- var config ssh.Config
- config.SetDefaults()
- macOrder := config.MACs
-
- for _, mac := range macOrder {
- server := newServer(t)
- defer server.Shutdown()
- conf := clientConfig()
- conf.MACs = []string{mac}
- // Don't fail if sshd doesn't have the MAC.
- conf.MACs = append(conf.MACs, macOrder...)
- if conn, err := server.TryDial(conf); err == nil {
- conn.Close()
- } else {
- t.Fatalf("failed for MAC %q", mac)
- }
- }
-}
-
-func TestKeyExchanges(t *testing.T) {
- var config ssh.Config
- config.SetDefaults()
- kexOrder := config.KeyExchanges
- for _, kex := range kexOrder {
- server := newServer(t)
- defer server.Shutdown()
- conf := clientConfig()
- // Don't fail if sshd doesn't have the kex.
- conf.KeyExchanges = append([]string{kex}, kexOrder...)
- conn, err := server.TryDial(conf)
- if err == nil {
- conn.Close()
- } else {
- t.Errorf("failed for kex %q", kex)
- }
- }
-}
-
-func TestClientAuthAlgorithms(t *testing.T) {
- for _, key := range []string{
- "rsa",
- "dsa",
- "ecdsa",
- "ed25519",
- } {
- server := newServer(t)
- conf := clientConfig()
- conf.SetDefaults()
- conf.Auth = []ssh.AuthMethod{
- ssh.PublicKeys(testSigners[key]),
- }
-
- conn, err := server.TryDial(conf)
- if err == nil {
- conn.Close()
- } else {
- t.Errorf("failed for key %q", key)
- }
-
- server.Shutdown()
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/test/tcpip_test.go b/vendor/golang.org/x/crypto/ssh/test/tcpip_test.go
deleted file mode 100644
index a2eb935..0000000
--- a/vendor/golang.org/x/crypto/ssh/test/tcpip_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !windows
-
-package test
-
-// direct-tcpip functional tests
-
-import (
- "io"
- "net"
- "testing"
-)
-
-func TestDial(t *testing.T) {
- server := newServer(t)
- defer server.Shutdown()
- sshConn := server.Dial(clientConfig())
- defer sshConn.Close()
-
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("Listen: %v", err)
- }
- defer l.Close()
-
- go func() {
- for {
- c, err := l.Accept()
- if err != nil {
- break
- }
-
- io.WriteString(c, c.RemoteAddr().String())
- c.Close()
- }
- }()
-
- conn, err := sshConn.Dial("tcp", l.Addr().String())
- if err != nil {
- t.Fatalf("Dial: %v", err)
- }
- defer conn.Close()
-}
diff --git a/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go
deleted file mode 100644
index 3bfd881..0000000
--- a/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd plan9
-
-package test
-
-// functional test harness for unix.
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "log"
- "net"
- "os"
- "os/exec"
- "os/user"
- "path/filepath"
- "testing"
- "text/template"
-
- "golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/testdata"
-)
-
-const sshd_config = `
-Protocol 2
-HostKey {{.Dir}}/id_rsa
-HostKey {{.Dir}}/id_dsa
-HostKey {{.Dir}}/id_ecdsa
-Pidfile {{.Dir}}/sshd.pid
-#UsePrivilegeSeparation no
-KeyRegenerationInterval 3600
-ServerKeyBits 768
-SyslogFacility AUTH
-LogLevel DEBUG2
-LoginGraceTime 120
-PermitRootLogin no
-StrictModes no
-RSAAuthentication yes
-PubkeyAuthentication yes
-AuthorizedKeysFile {{.Dir}}/authorized_keys
-TrustedUserCAKeys {{.Dir}}/id_ecdsa.pub
-IgnoreRhosts yes
-RhostsRSAAuthentication no
-HostbasedAuthentication no
-PubkeyAcceptedKeyTypes=*
-`
-
-var configTmpl = template.Must(template.New("").Parse(sshd_config))
-
-type server struct {
- t *testing.T
- cleanup func() // executed during Shutdown
- configfile string
- cmd *exec.Cmd
- output bytes.Buffer // holds stderr from sshd process
-
- // Client half of the network connection.
- clientConn net.Conn
-}
-
-func username() string {
- var username string
- if user, err := user.Current(); err == nil {
- username = user.Username
- } else {
- // user.Current() currently requires cgo. If an error is
- // returned attempt to get the username from the environment.
- log.Printf("user.Current: %v; falling back on $USER", err)
- username = os.Getenv("USER")
- }
- if username == "" {
- panic("Unable to get username")
- }
- return username
-}
-
-type storedHostKey struct {
- // keys map from an algorithm string to binary key data.
- keys map[string][]byte
-
- // checkCount counts the Check calls. Used for testing
- // rekeying.
- checkCount int
-}
-
-func (k *storedHostKey) Add(key ssh.PublicKey) {
- if k.keys == nil {
- k.keys = map[string][]byte{}
- }
- k.keys[key.Type()] = key.Marshal()
-}
-
-func (k *storedHostKey) Check(addr string, remote net.Addr, key ssh.PublicKey) error {
- k.checkCount++
- algo := key.Type()
-
- if k.keys == nil || bytes.Compare(key.Marshal(), k.keys[algo]) != 0 {
- return fmt.Errorf("host key mismatch. Got %q, want %q", key, k.keys[algo])
- }
- return nil
-}
-
-func hostKeyDB() *storedHostKey {
- keyChecker := &storedHostKey{}
- keyChecker.Add(testPublicKeys["ecdsa"])
- keyChecker.Add(testPublicKeys["rsa"])
- keyChecker.Add(testPublicKeys["dsa"])
- return keyChecker
-}
-
-func clientConfig() *ssh.ClientConfig {
- config := &ssh.ClientConfig{
- User: username(),
- Auth: []ssh.AuthMethod{
- ssh.PublicKeys(testSigners["user"]),
- },
- HostKeyCallback: hostKeyDB().Check,
- }
- return config
-}
-
-// unixConnection creates two halves of a connected net.UnixConn. It
-// is used for connecting the Go SSH client with sshd without opening
-// ports.
-func unixConnection() (*net.UnixConn, *net.UnixConn, error) {
- dir, err := ioutil.TempDir("", "unixConnection")
- if err != nil {
- return nil, nil, err
- }
- defer os.Remove(dir)
-
- addr := filepath.Join(dir, "ssh")
- listener, err := net.Listen("unix", addr)
- if err != nil {
- return nil, nil, err
- }
- defer listener.Close()
- c1, err := net.Dial("unix", addr)
- if err != nil {
- return nil, nil, err
- }
-
- c2, err := listener.Accept()
- if err != nil {
- c1.Close()
- return nil, nil, err
- }
-
- return c1.(*net.UnixConn), c2.(*net.UnixConn), nil
-}
-
-func (s *server) TryDial(config *ssh.ClientConfig) (*ssh.Client, error) {
- sshd, err := exec.LookPath("sshd")
- if err != nil {
- s.t.Skipf("skipping test: %v", err)
- }
-
- c1, c2, err := unixConnection()
- if err != nil {
- s.t.Fatalf("unixConnection: %v", err)
- }
-
- s.cmd = exec.Command(sshd, "-f", s.configfile, "-i", "-e")
- f, err := c2.File()
- if err != nil {
- s.t.Fatalf("UnixConn.File: %v", err)
- }
- defer f.Close()
- s.cmd.Stdin = f
- s.cmd.Stdout = f
- s.cmd.Stderr = &s.output
- if err := s.cmd.Start(); err != nil {
- s.t.Fail()
- s.Shutdown()
- s.t.Fatalf("s.cmd.Start: %v", err)
- }
- s.clientConn = c1
- conn, chans, reqs, err := ssh.NewClientConn(c1, "", config)
- if err != nil {
- return nil, err
- }
- return ssh.NewClient(conn, chans, reqs), nil
-}
-
-func (s *server) Dial(config *ssh.ClientConfig) *ssh.Client {
- conn, err := s.TryDial(config)
- if err != nil {
- s.t.Fail()
- s.Shutdown()
- s.t.Fatalf("ssh.Client: %v", err)
- }
- return conn
-}
-
-func (s *server) Shutdown() {
- if s.cmd != nil && s.cmd.Process != nil {
- // Don't check for errors; if it fails it's most
- // likely "os: process already finished", and we don't
- // care about that. Use os.Interrupt, so child
- // processes are killed too.
- s.cmd.Process.Signal(os.Interrupt)
- s.cmd.Wait()
- }
- if s.t.Failed() {
- // log any output from sshd process
- s.t.Logf("sshd: %s", s.output.String())
- }
- s.cleanup()
-}
-
-func writeFile(path string, contents []byte) {
- f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
- if err != nil {
- panic(err)
- }
- defer f.Close()
- if _, err := f.Write(contents); err != nil {
- panic(err)
- }
-}
-
-// newServer returns a new mock ssh server.
-func newServer(t *testing.T) *server {
- if testing.Short() {
- t.Skip("skipping test due to -short")
- }
- dir, err := ioutil.TempDir("", "sshtest")
- if err != nil {
- t.Fatal(err)
- }
- f, err := os.Create(filepath.Join(dir, "sshd_config"))
- if err != nil {
- t.Fatal(err)
- }
- err = configTmpl.Execute(f, map[string]string{
- "Dir": dir,
- })
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
-
- for k, v := range testdata.PEMBytes {
- filename := "id_" + k
- writeFile(filepath.Join(dir, filename), v)
- writeFile(filepath.Join(dir, filename+".pub"), ssh.MarshalAuthorizedKey(testPublicKeys[k]))
- }
-
- var authkeys bytes.Buffer
- for k, _ := range testdata.PEMBytes {
- authkeys.Write(ssh.MarshalAuthorizedKey(testPublicKeys[k]))
- }
- writeFile(filepath.Join(dir, "authorized_keys"), authkeys.Bytes())
-
- return &server{
- t: t,
- configfile: f.Name(),
- cleanup: func() {
- if err := os.RemoveAll(dir); err != nil {
- t.Error(err)
- }
- },
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/test/testdata_test.go b/vendor/golang.org/x/crypto/ssh/test/testdata_test.go
deleted file mode 100644
index a053f67..0000000
--- a/vendor/golang.org/x/crypto/ssh/test/testdata_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places:
-// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
-// instances.
-
-package test
-
-import (
- "crypto/rand"
- "fmt"
-
- "golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/testdata"
-)
-
-var (
- testPrivateKeys map[string]interface{}
- testSigners map[string]ssh.Signer
- testPublicKeys map[string]ssh.PublicKey
-)
-
-func init() {
- var err error
-
- n := len(testdata.PEMBytes)
- testPrivateKeys = make(map[string]interface{}, n)
- testSigners = make(map[string]ssh.Signer, n)
- testPublicKeys = make(map[string]ssh.PublicKey, n)
- for t, k := range testdata.PEMBytes {
- testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k)
- if err != nil {
- panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
- }
- testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t])
- if err != nil {
- panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
- }
- testPublicKeys[t] = testSigners[t].PublicKey()
- }
-
- // Create a cert and sign it for use in tests.
- testCert := &ssh.Certificate{
- Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
- ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
- ValidAfter: 0, // unix epoch
- ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time.
- Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
- Key: testPublicKeys["ecdsa"],
- SignatureKey: testPublicKeys["rsa"],
- Permissions: ssh.Permissions{
- CriticalOptions: map[string]string{},
- Extensions: map[string]string{},
- },
- }
- testCert.SignCert(rand.Reader, testSigners["rsa"])
- testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
- testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"])
- if err != nil {
- panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/testdata/doc.go b/vendor/golang.org/x/crypto/ssh/testdata/doc.go
deleted file mode 100644
index fcae47c..0000000
--- a/vendor/golang.org/x/crypto/ssh/testdata/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This package contains test data shared between the various subpackages of
-// the golang.org/x/crypto/ssh package. Under no circumstance should
-// this data be used for production code.
-package testdata // import "golang.org/x/crypto/ssh/testdata"
diff --git a/vendor/golang.org/x/crypto/ssh/testdata/keys.go b/vendor/golang.org/x/crypto/ssh/testdata/keys.go
deleted file mode 100644
index 736dad9..0000000
--- a/vendor/golang.org/x/crypto/ssh/testdata/keys.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package testdata
-
-var PEMBytes = map[string][]byte{
- "dsa": []byte(`-----BEGIN DSA PRIVATE KEY-----
-MIIBuwIBAAKBgQD6PDSEyXiI9jfNs97WuM46MSDCYlOqWw80ajN16AohtBncs1YB
-lHk//dQOvCYOsYaE+gNix2jtoRjwXhDsc25/IqQbU1ahb7mB8/rsaILRGIbA5WH3
-EgFtJmXFovDz3if6F6TzvhFpHgJRmLYVR8cqsezL3hEZOvvs2iH7MorkxwIVAJHD
-nD82+lxh2fb4PMsIiaXudAsBAoGAQRf7Q/iaPRn43ZquUhd6WwvirqUj+tkIu6eV
-2nZWYmXLlqFQKEy4Tejl7Wkyzr2OSYvbXLzo7TNxLKoWor6ips0phYPPMyXld14r
-juhT24CrhOzuLMhDduMDi032wDIZG4Y+K7ElU8Oufn8Sj5Wge8r6ANmmVgmFfynr
-FhdYCngCgYEA3ucGJ93/Mx4q4eKRDxcWD3QzWyqpbRVRRV1Vmih9Ha/qC994nJFz
-DQIdjxDIT2Rk2AGzMqFEB68Zc3O+Wcsmz5eWWzEwFxaTwOGWTyDqsDRLm3fD+QYj
-nOwuxb0Kce+gWI8voWcqC9cyRm09jGzu2Ab3Bhtpg8JJ8L7gS3MRZK4CFEx4UAfY
-Fmsr0W6fHB9nhS4/UXM8
------END DSA PRIVATE KEY-----
-`),
- "ecdsa": []byte(`-----BEGIN EC PRIVATE KEY-----
-MHcCAQEEINGWx0zo6fhJ/0EAfrPzVFyFC9s18lBt3cRoEDhS3ARooAoGCCqGSM49
-AwEHoUQDQgAEi9Hdw6KvZcWxfg2IDhA7UkpDtzzt6ZqJXSsFdLd+Kx4S3Sx4cVO+
-6/ZOXRnPmNAlLUqjShUsUBBngG0u2fqEqA==
------END EC PRIVATE KEY-----
-`),
- "rsa": []byte(`-----BEGIN RSA PRIVATE KEY-----
-MIICXAIBAAKBgQC8A6FGHDiWCSREAXCq6yBfNVr0xCVG2CzvktFNRpue+RXrGs/2
-a6ySEJQb3IYquw7HlJgu6fg3WIWhOmHCjfpG0PrL4CRwbqQ2LaPPXhJErWYejcD8
-Di00cF3677+G10KMZk9RXbmHtuBFZT98wxg8j+ZsBMqGM1+7yrWUvynswQIDAQAB
-AoGAJMCk5vqfSRzyXOTXLGIYCuR4Kj6pdsbNSeuuRGfYBeR1F2c/XdFAg7D/8s5R
-38p/Ih52/Ty5S8BfJtwtvgVY9ecf/JlU/rl/QzhG8/8KC0NG7KsyXklbQ7gJT8UT
-Ojmw5QpMk+rKv17ipDVkQQmPaj+gJXYNAHqImke5mm/K/h0CQQDciPmviQ+DOhOq
-2ZBqUfH8oXHgFmp7/6pXw80DpMIxgV3CwkxxIVx6a8lVH9bT/AFySJ6vXq4zTuV9
-6QmZcZzDAkEA2j/UXJPIs1fQ8z/6sONOkU/BjtoePFIWJlRxdN35cZjXnBraX5UR
-fFHkePv4YwqmXNqrBOvSu+w2WdSDci+IKwJAcsPRc/jWmsrJW1q3Ha0hSf/WG/Bu
-X7MPuXaKpP/DkzGoUmb8ks7yqj6XWnYkPNLjCc8izU5vRwIiyWBRf4mxMwJBAILa
-NDvRS0rjwt6lJGv7zPZoqDc65VfrK2aNyHx2PgFyzwrEOtuF57bu7pnvEIxpLTeM
-z26i6XVMeYXAWZMTloMCQBbpGgEERQpeUknLBqUHhg/wXF6+lFA+vEGnkY+Dwab2
-KCXFGd+SQ5GdUcEMe9isUH6DYj/6/yCDoFrXXmpQb+M=
------END RSA PRIVATE KEY-----
-`),
- "ed25519": []byte(`-----BEGIN OPENSSH PRIVATE KEY-----
-b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
-QyNTUxOQAAACA+3f7hS7g5UWwXOGVTrMfhmxyrjqz7Sxxbx7I1j8DvvwAAAJhAFfkOQBX5
-DgAAAAtzc2gtZWQyNTUxOQAAACA+3f7hS7g5UWwXOGVTrMfhmxyrjqz7Sxxbx7I1j8Dvvw
-AAAEAaYmXltfW6nhRo3iWGglRB48lYq0z0Q3I3KyrdutEr6j7d/uFLuDlRbBc4ZVOsx+Gb
-HKuOrPtLHFvHsjWPwO+/AAAAE2dhcnRvbm1AZ2FydG9ubS14cHMBAg==
------END OPENSSH PRIVATE KEY-----
-`),
- "user": []byte(`-----BEGIN EC PRIVATE KEY-----
-MHcCAQEEILYCAeq8f7V4vSSypRw7pxy8yz3V5W4qg8kSC3zJhqpQoAoGCCqGSM49
-AwEHoUQDQgAEYcO2xNKiRUYOLEHM7VYAp57HNyKbOdYtHD83Z4hzNPVC4tM5mdGD
-PLL8IEwvYu2wq+lpXfGQnNMbzYf9gspG0w==
------END EC PRIVATE KEY-----
-`),
-}
-
-var PEMEncryptedKeys = []struct {
- Name string
- EncryptionKey string
- PEMBytes []byte
-}{
- 0: {
- Name: "rsa-encrypted",
- EncryptionKey: "r54-G0pher_t3st$",
- PEMBytes: []byte(`-----BEGIN RSA PRIVATE KEY-----
-Proc-Type: 4,ENCRYPTED
-DEK-Info: AES-128-CBC,3E1714DE130BC5E81327F36564B05462
-
-MqW88sud4fnWk/Jk3fkjh7ydu51ZkHLN5qlQgA4SkAXORPPMj2XvqZOv1v2LOgUV
-dUevUn8PZK7a9zbZg4QShUSzwE5k6wdB7XKPyBgI39mJ79GBd2U4W3h6KT6jIdWA
-goQpluxkrzr2/X602IaxLEre97FT9mpKC6zxKCLvyFWVIP9n3OSFS47cTTXyFr+l
-7PdRhe60nn6jSBgUNk/Q1lAvEQ9fufdPwDYY93F1wyJ6lOr0F1+mzRrMbH67NyKs
-rG8J1Fa7cIIre7ueKIAXTIne7OAWqpU9UDgQatDtZTbvA7ciqGsSFgiwwW13N+Rr
-hN8MkODKs9cjtONxSKi05s206A3NDU6STtZ3KuPDjFE1gMJODotOuqSM+cxKfyFq
-wxpk/CHYCDdMAVBSwxb/vraOHamylL4uCHpJdBHypzf2HABt+lS8Su23uAmL87DR
-yvyCS/lmpuNTndef6qHPRkoW2EV3xqD3ovosGf7kgwGJUk2ZpCLVteqmYehKlZDK
-r/Jy+J26ooI2jIg9bjvD1PZq+Mv+2dQ1RlDrPG3PB+rEixw6vBaL9x3jatCd4ej7
-XG7lb3qO9xFpLsx89tkEcvpGR+broSpUJ6Mu5LBCVmrvqHjvnDhrZVz1brMiQtU9
-iMZbgXqDLXHd6ERWygk7OTU03u+l1gs+KGMfmS0h0ZYw6KGVLgMnsoxqd6cFSKNB
-8Ohk9ZTZGCiovlXBUepyu8wKat1k8YlHSfIHoRUJRhhcd7DrmojC+bcbMIZBU22T
-Pl2ftVRGtcQY23lYd0NNKfebF7ncjuLWQGy+vZW+7cgfI6wPIbfYfP6g7QAutk6W
-KQx0AoX5woZ6cNxtpIrymaVjSMRRBkKQrJKmRp3pC/lul5E5P2cueMs1fj4OHTbJ
-lAUv88ywr+R+mRgYQlFW/XQ653f6DT4t6+njfO9oBcPrQDASZel3LjXLpjjYG/N5
-+BWnVexuJX9ika8HJiFl55oqaKb+WknfNhk5cPY+x7SDV9ywQeMiDZpr0ffeYAEP
-LlwwiWRDYpO+uwXHSFF3+JjWwjhs8m8g99iFb7U93yKgBB12dCEPPa2ZeH9wUHMJ
-sreYhNuq6f4iWWSXpzN45inQqtTi8jrJhuNLTT543ErW7DtntBO2rWMhff3aiXbn
-Uy3qzZM1nPbuCGuBmP9L2dJ3Z5ifDWB4JmOyWY4swTZGt9AVmUxMIKdZpRONx8vz
-I9u9nbVPGZBcou50Pa0qTLbkWsSL94MNXrARBxzhHC9Zs6XNEtwN7mOuii7uMkVc
-adrxgknBH1J1N+NX/eTKzUwJuPvDtA+Z5ILWNN9wpZT/7ed8zEnKHPNUexyeT5g3
-uw9z9jH7ffGxFYlx87oiVPHGOrCXYZYW5uoZE31SCBkbtNuffNRJRKIFeipmpJ3P
-7bpAG+kGHMelQH6b+5K1Qgsv4tpuSyKeTKpPFH9Av5nN4P1ZBm9N80tzbNWqjSJm
-S7rYdHnuNEVnUGnRmEUMmVuYZnNBEVN/fP2m2SEwXcP3Uh7TiYlcWw10ygaGmOr7
-MvMLGkYgQ4Utwnd98mtqa0jr0hK2TcOSFir3AqVvXN3XJj4cVULkrXe4Im1laWgp
------END RSA PRIVATE KEY-----
-`),
- },
-
- 1: {
- Name: "dsa-encrypted",
- EncryptionKey: "qG0pher-dsa_t3st$",
- PEMBytes: []byte(`-----BEGIN DSA PRIVATE KEY-----
-Proc-Type: 4,ENCRYPTED
-DEK-Info: AES-128-CBC,7CE7A6E4A647DC01AF860210B15ADE3E
-
-hvnBpI99Hceq/55pYRdOzBLntIEis02JFNXuLEydWL+RJBFDn7tA+vXec0ERJd6J
-G8JXlSOAhmC2H4uK3q2xR8/Y3yL95n6OIcjvCBiLsV+o3jj1MYJmErxP6zRtq4w3
-JjIjGHWmaYFSxPKQ6e8fs74HEqaeMV9ONUoTtB+aISmgaBL15Fcoayg245dkBvVl
-h5Kqspe7yvOBmzA3zjRuxmSCqKJmasXM7mqs3vIrMxZE3XPo1/fWKcPuExgpVQoT
-HkJZEoIEIIPnPMwT2uYbFJSGgPJVMDT84xz7yvjCdhLmqrsXgs5Qw7Pw0i0c0BUJ
-b7fDJ2UhdiwSckWGmIhTLlJZzr8K+JpjCDlP+REYBI5meB7kosBnlvCEHdw2EJkH
-0QDc/2F4xlVrHOLbPRFyu1Oi2Gvbeoo9EsM/DThpd1hKAlb0sF5Y0y0d+owv0PnE
-R/4X3HWfIdOHsDUvJ8xVWZ4BZk9Zk9qol045DcFCehpr/3hslCrKSZHakLt9GI58
-vVQJ4L0aYp5nloLfzhViZtKJXRLkySMKdzYkIlNmW1oVGl7tce5UCNI8Nok4j6yn
-IiHM7GBn+0nJoKTXsOGMIBe3ulKlKVxLjEuk9yivh/8=
------END DSA PRIVATE KEY-----
-`),
- },
-}
diff --git a/vendor/golang.org/x/crypto/ssh/testdata_test.go b/vendor/golang.org/x/crypto/ssh/testdata_test.go
deleted file mode 100644
index 2da8c79..0000000
--- a/vendor/golang.org/x/crypto/ssh/testdata_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places:
-// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
-// instances.
-
-package ssh
-
-import (
- "crypto/rand"
- "fmt"
-
- "golang.org/x/crypto/ssh/testdata"
-)
-
-var (
- testPrivateKeys map[string]interface{}
- testSigners map[string]Signer
- testPublicKeys map[string]PublicKey
-)
-
-func init() {
- var err error
-
- n := len(testdata.PEMBytes)
- testPrivateKeys = make(map[string]interface{}, n)
- testSigners = make(map[string]Signer, n)
- testPublicKeys = make(map[string]PublicKey, n)
- for t, k := range testdata.PEMBytes {
- testPrivateKeys[t], err = ParseRawPrivateKey(k)
- if err != nil {
- panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
- }
- testSigners[t], err = NewSignerFromKey(testPrivateKeys[t])
- if err != nil {
- panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
- }
- testPublicKeys[t] = testSigners[t].PublicKey()
- }
-
- // Create a cert and sign it for use in tests.
- testCert := &Certificate{
- Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
- ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
- ValidAfter: 0, // unix epoch
- ValidBefore: CertTimeInfinity, // The end of currently representable time.
- Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
- Key: testPublicKeys["ecdsa"],
- SignatureKey: testPublicKeys["rsa"],
- Permissions: Permissions{
- CriticalOptions: map[string]string{},
- Extensions: map[string]string{},
- },
- }
- testCert.SignCert(rand.Reader, testSigners["rsa"])
- testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
- testSigners["cert"], err = NewCertSigner(testCert, testSigners["ecdsa"])
- if err != nil {
- panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
- }
-}
diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
deleted file mode 100644
index 62fba62..0000000
--- a/vendor/golang.org/x/crypto/ssh/transport.go
+++ /dev/null
@@ -1,333 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bufio"
- "errors"
- "io"
-)
-
-const (
- gcmCipherID = "aes128-gcm@openssh.com"
- aes128cbcID = "aes128-cbc"
- tripledescbcID = "3des-cbc"
-)
-
-// packetConn represents a transport that implements packet based
-// operations.
-type packetConn interface {
- // Encrypt and send a packet of data to the remote peer.
- writePacket(packet []byte) error
-
- // Read a packet from the connection
- readPacket() ([]byte, error)
-
- // Close closes the write-side of the connection.
- Close() error
-}
-
-// transport is the keyingTransport that implements the SSH packet
-// protocol.
-type transport struct {
- reader connectionState
- writer connectionState
-
- bufReader *bufio.Reader
- bufWriter *bufio.Writer
- rand io.Reader
-
- io.Closer
-}
-
-// packetCipher represents a combination of SSH encryption/MAC
-// protocol. A single instance should be used for one direction only.
-type packetCipher interface {
- // writePacket encrypts the packet and writes it to w. The
- // contents of the packet are generally scrambled.
- writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error
-
- // readPacket reads and decrypts a packet of data. The
- // returned packet may be overwritten by future calls of
- // readPacket.
- readPacket(seqnum uint32, r io.Reader) ([]byte, error)
-}
-
-// connectionState represents one side (read or write) of the
-// connection. This is necessary because each direction has its own
-// keys, and can even have its own algorithms
-type connectionState struct {
- packetCipher
- seqNum uint32
- dir direction
- pendingKeyChange chan packetCipher
-}
-
-// prepareKeyChange sets up key material for a keychange. The key changes in
-// both directions are triggered by reading and writing a msgNewKey packet
-// respectively.
-func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error {
- if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil {
- return err
- } else {
- t.reader.pendingKeyChange <- ciph
- }
-
- if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil {
- return err
- } else {
- t.writer.pendingKeyChange <- ciph
- }
-
- return nil
-}
-
-// Read and decrypt next packet.
-func (t *transport) readPacket() ([]byte, error) {
- return t.reader.readPacket(t.bufReader)
-}
-
-func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
- packet, err := s.packetCipher.readPacket(s.seqNum, r)
- s.seqNum++
- if err == nil && len(packet) == 0 {
- err = errors.New("ssh: zero length packet")
- }
-
- if len(packet) > 0 {
- switch packet[0] {
- case msgNewKeys:
- select {
- case cipher := <-s.pendingKeyChange:
- s.packetCipher = cipher
- default:
- return nil, errors.New("ssh: got bogus newkeys message.")
- }
-
- case msgDisconnect:
- // Transform a disconnect message into an
- // error. Since this is lowest level at which
- // we interpret message types, doing it here
- // ensures that we don't have to handle it
- // elsewhere.
- var msg disconnectMsg
- if err := Unmarshal(packet, &msg); err != nil {
- return nil, err
- }
- return nil, &msg
- }
- }
-
- // The packet may point to an internal buffer, so copy the
- // packet out here.
- fresh := make([]byte, len(packet))
- copy(fresh, packet)
-
- return fresh, err
-}
-
-func (t *transport) writePacket(packet []byte) error {
- return t.writer.writePacket(t.bufWriter, t.rand, packet)
-}
-
-func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error {
- changeKeys := len(packet) > 0 && packet[0] == msgNewKeys
-
- err := s.packetCipher.writePacket(s.seqNum, w, rand, packet)
- if err != nil {
- return err
- }
- if err = w.Flush(); err != nil {
- return err
- }
- s.seqNum++
- if changeKeys {
- select {
- case cipher := <-s.pendingKeyChange:
- s.packetCipher = cipher
- default:
- panic("ssh: no key material for msgNewKeys")
- }
- }
- return err
-}
-
-func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport {
- t := &transport{
- bufReader: bufio.NewReader(rwc),
- bufWriter: bufio.NewWriter(rwc),
- rand: rand,
- reader: connectionState{
- packetCipher: &streamPacketCipher{cipher: noneCipher{}},
- pendingKeyChange: make(chan packetCipher, 1),
- },
- writer: connectionState{
- packetCipher: &streamPacketCipher{cipher: noneCipher{}},
- pendingKeyChange: make(chan packetCipher, 1),
- },
- Closer: rwc,
- }
- if isClient {
- t.reader.dir = serverKeys
- t.writer.dir = clientKeys
- } else {
- t.reader.dir = clientKeys
- t.writer.dir = serverKeys
- }
-
- return t
-}
-
-type direction struct {
- ivTag []byte
- keyTag []byte
- macKeyTag []byte
-}
-
-var (
- serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}}
- clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
-)
-
-// generateKeys generates key material for IV, MAC and encryption.
-func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
- cipherMode := cipherModes[algs.Cipher]
- macMode := macModes[algs.MAC]
-
- iv = make([]byte, cipherMode.ivSize)
- key = make([]byte, cipherMode.keySize)
- macKey = make([]byte, macMode.keySize)
-
- generateKeyMaterial(iv, d.ivTag, kex)
- generateKeyMaterial(key, d.keyTag, kex)
- generateKeyMaterial(macKey, d.macKeyTag, kex)
- return
-}
-
-// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
-// described in RFC 4253, section 6.4. direction should either be serverKeys
-// (to setup server->client keys) or clientKeys (for client->server keys).
-func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
- iv, key, macKey := generateKeys(d, algs, kex)
-
- if algs.Cipher == gcmCipherID {
- return newGCMCipher(iv, key, macKey)
- }
-
- if algs.Cipher == aes128cbcID {
- return newAESCBCCipher(iv, key, macKey, algs)
- }
-
- if algs.Cipher == tripledescbcID {
- return newTripleDESCBCCipher(iv, key, macKey, algs)
- }
-
- c := &streamPacketCipher{
- mac: macModes[algs.MAC].new(macKey),
- }
- c.macResult = make([]byte, c.mac.Size())
-
- var err error
- c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
- if err != nil {
- return nil, err
- }
-
- return c, nil
-}
-
-// generateKeyMaterial fills out with key material generated from tag, K, H
-// and sessionId, as specified in RFC 4253, section 7.2.
-func generateKeyMaterial(out, tag []byte, r *kexResult) {
- var digestsSoFar []byte
-
- h := r.Hash.New()
- for len(out) > 0 {
- h.Reset()
- h.Write(r.K)
- h.Write(r.H)
-
- if len(digestsSoFar) == 0 {
- h.Write(tag)
- h.Write(r.SessionID)
- } else {
- h.Write(digestsSoFar)
- }
-
- digest := h.Sum(nil)
- n := copy(out, digest)
- out = out[n:]
- if len(out) > 0 {
- digestsSoFar = append(digestsSoFar, digest...)
- }
- }
-}
-
-const packageVersion = "SSH-2.0-Go"
-
-// Sends and receives a version line. The versionLine string should
-// be US ASCII, start with "SSH-2.0-", and should not include a
-// newline. exchangeVersions returns the other side's version line.
-func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) {
- // Contrary to the RFC, we do not ignore lines that don't
- // start with "SSH-2.0-" to make the library usable with
- // nonconforming servers.
- for _, c := range versionLine {
- // The spec disallows non US-ASCII chars, and
- // specifically forbids null chars.
- if c < 32 {
- return nil, errors.New("ssh: junk character in version line")
- }
- }
- if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil {
- return
- }
-
- them, err = readVersion(rw)
- return them, err
-}
-
-// maxVersionStringBytes is the maximum number of bytes that we'll
-// accept as a version string. RFC 4253 section 4.2 limits this at 255
-// chars
-const maxVersionStringBytes = 255
-
-// Read version string as specified by RFC 4253, section 4.2.
-func readVersion(r io.Reader) ([]byte, error) {
- versionString := make([]byte, 0, 64)
- var ok bool
- var buf [1]byte
-
- for len(versionString) < maxVersionStringBytes {
- _, err := io.ReadFull(r, buf[:])
- if err != nil {
- return nil, err
- }
- // The RFC says that the version should be terminated with \r\n
- // but several SSH servers actually only send a \n.
- if buf[0] == '\n' {
- ok = true
- break
- }
-
- // non ASCII chars are disallowed, but we are lenient,
- // since Go doesn't use null-terminated strings.
-
- // The RFC allows a comment after a space, however,
- // all of it (version and comments) goes into the
- // session hash.
- versionString = append(versionString, buf[0])
- }
-
- if !ok {
- return nil, errors.New("ssh: overflow reading version string")
- }
-
- // There might be a '\r' on the end which we should remove.
- if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
- versionString = versionString[:len(versionString)-1]
- }
- return versionString, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/transport_test.go b/vendor/golang.org/x/crypto/ssh/transport_test.go
deleted file mode 100644
index 92d83ab..0000000
--- a/vendor/golang.org/x/crypto/ssh/transport_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssh
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/binary"
- "strings"
- "testing"
-)
-
-func TestReadVersion(t *testing.T) {
- longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
- cases := map[string]string{
- "SSH-2.0-bla\r\n": "SSH-2.0-bla",
- "SSH-2.0-bla\n": "SSH-2.0-bla",
- longversion + "\r\n": longversion,
- }
-
- for in, want := range cases {
- result, err := readVersion(bytes.NewBufferString(in))
- if err != nil {
- t.Errorf("readVersion(%q): %s", in, err)
- }
- got := string(result)
- if got != want {
- t.Errorf("got %q, want %q", got, want)
- }
- }
-}
-
-func TestReadVersionError(t *testing.T) {
- longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
- cases := []string{
- longversion + "too-long\r\n",
- }
- for _, in := range cases {
- if _, err := readVersion(bytes.NewBufferString(in)); err == nil {
- t.Errorf("readVersion(%q) should have failed", in)
- }
- }
-}
-
-func TestExchangeVersionsBasic(t *testing.T) {
- v := "SSH-2.0-bla"
- buf := bytes.NewBufferString(v + "\r\n")
- them, err := exchangeVersions(buf, []byte("xyz"))
- if err != nil {
- t.Errorf("exchangeVersions: %v", err)
- }
-
- if want := "SSH-2.0-bla"; string(them) != want {
- t.Errorf("got %q want %q for our version", them, want)
- }
-}
-
-func TestExchangeVersions(t *testing.T) {
- cases := []string{
- "not\x000allowed",
- "not allowed\n",
- }
- for _, c := range cases {
- buf := bytes.NewBufferString("SSH-2.0-bla\r\n")
- if _, err := exchangeVersions(buf, []byte(c)); err == nil {
- t.Errorf("exchangeVersions(%q): should have failed", c)
- }
- }
-}
-
-type closerBuffer struct {
- bytes.Buffer
-}
-
-func (b *closerBuffer) Close() error {
- return nil
-}
-
-func TestTransportMaxPacketWrite(t *testing.T) {
- buf := &closerBuffer{}
- tr := newTransport(buf, rand.Reader, true)
- huge := make([]byte, maxPacket+1)
- err := tr.writePacket(huge)
- if err == nil {
- t.Errorf("transport accepted write for a huge packet.")
- }
-}
-
-func TestTransportMaxPacketReader(t *testing.T) {
- var header [5]byte
- huge := make([]byte, maxPacket+128)
- binary.BigEndian.PutUint32(header[0:], uint32(len(huge)))
- // padding.
- header[4] = 0
-
- buf := &closerBuffer{}
- buf.Write(header[:])
- buf.Write(huge)
-
- tr := newTransport(buf, rand.Reader, true)
- _, err := tr.readPacket()
- if err == nil {
- t.Errorf("transport succeeded reading huge packet.")
- } else if !strings.Contains(err.Error(), "large") {
- t.Errorf("got %q, should mention %q", err.Error(), "large")
- }
-}
diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/snappy/LICENSE b/vendor/golang.org/x/net/LICENSE
similarity index 95%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/snappy/LICENSE
rename to vendor/golang.org/x/net/LICENSE
index 6050c10..6a66aea 100644
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/snappy/LICENSE
+++ b/vendor/golang.org/x/net/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
index f143ed6..a3c021d 100644
--- a/vendor/golang.org/x/net/context/context.go
+++ b/vendor/golang.org/x/net/context/context.go
@@ -5,6 +5,8 @@
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
+// As of Go 1.7 this package is available in the standard library under the
+// name context. https://golang.org/pkg/context.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
@@ -36,103 +38,6 @@
// Contexts.
package context // import "golang.org/x/net/context"
-import "time"
-
-// A Context carries a deadline, a cancelation signal, and other values across
-// API boundaries.
-//
-// Context's methods may be called by multiple goroutines simultaneously.
-type Context interface {
- // Deadline returns the time when work done on behalf of this context
- // should be canceled. Deadline returns ok==false when no deadline is
- // set. Successive calls to Deadline return the same results.
- Deadline() (deadline time.Time, ok bool)
-
- // Done returns a channel that's closed when work done on behalf of this
- // context should be canceled. Done may return nil if this context can
- // never be canceled. Successive calls to Done return the same value.
- //
- // WithCancel arranges for Done to be closed when cancel is called;
- // WithDeadline arranges for Done to be closed when the deadline
- // expires; WithTimeout arranges for Done to be closed when the timeout
- // elapses.
- //
- // Done is provided for use in select statements:
- //
- // // Stream generates values with DoSomething and sends them to out
- // // until DoSomething returns an error or ctx.Done is closed.
- // func Stream(ctx context.Context, out chan<- Value) error {
- // for {
- // v, err := DoSomething(ctx)
- // if err != nil {
- // return err
- // }
- // select {
- // case <-ctx.Done():
- // return ctx.Err()
- // case out <- v:
- // }
- // }
- // }
- //
- // See http://blog.golang.org/pipelines for more examples of how to use
- // a Done channel for cancelation.
- Done() <-chan struct{}
-
- // Err returns a non-nil error value after Done is closed. Err returns
- // Canceled if the context was canceled or DeadlineExceeded if the
- // context's deadline passed. No other values for Err are defined.
- // After Done is closed, successive calls to Err return the same value.
- Err() error
-
- // Value returns the value associated with this context for key, or nil
- // if no value is associated with key. Successive calls to Value with
- // the same key returns the same result.
- //
- // Use context values only for request-scoped data that transits
- // processes and API boundaries, not for passing optional parameters to
- // functions.
- //
- // A key identifies a specific value in a Context. Functions that wish
- // to store values in Context typically allocate a key in a global
- // variable then use that key as the argument to context.WithValue and
- // Context.Value. A key can be any type that supports equality;
- // packages should define keys as an unexported type to avoid
- // collisions.
- //
- // Packages that define a Context key should provide type-safe accessors
- // for the values stores using that key:
- //
- // // Package user defines a User type that's stored in Contexts.
- // package user
- //
- // import "golang.org/x/net/context"
- //
- // // User is the type of value stored in the Contexts.
- // type User struct {...}
- //
- // // key is an unexported type for keys defined in this package.
- // // This prevents collisions with keys defined in other packages.
- // type key int
- //
- // // userKey is the key for user.User values in Contexts. It is
- // // unexported; clients use user.NewContext and user.FromContext
- // // instead of using this key directly.
- // var userKey key = 0
- //
- // // NewContext returns a new Context that carries value u.
- // func NewContext(ctx context.Context, u *User) context.Context {
- // return context.WithValue(ctx, userKey, u)
- // }
- //
- // // FromContext returns the User value stored in ctx, if any.
- // func FromContext(ctx context.Context) (*User, bool) {
- // u, ok := ctx.Value(userKey).(*User)
- // return u, ok
- // }
- Value(key interface{}) interface{}
-}
-
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
@@ -149,8 +54,3 @@ func Background() Context {
func TODO() Context {
return todo
}
-
-// A CancelFunc tells an operation to abandon its work.
-// A CancelFunc does not wait for the work to stop.
-// After the first call, subsequent calls to a CancelFunc do nothing.
-type CancelFunc func()
diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go
deleted file mode 100644
index 6284413..0000000
--- a/vendor/golang.org/x/net/context/context_test.go
+++ /dev/null
@@ -1,583 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.7
-
-package context
-
-import (
- "fmt"
- "math/rand"
- "runtime"
- "strings"
- "sync"
- "testing"
- "time"
-)
-
-// otherContext is a Context that's not one of the types defined in context.go.
-// This lets us test code paths that differ based on the underlying type of the
-// Context.
-type otherContext struct {
- Context
-}
-
-func TestBackground(t *testing.T) {
- c := Background()
- if c == nil {
- t.Fatalf("Background returned nil")
- }
- select {
- case x := <-c.Done():
- t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
- default:
- }
- if got, want := fmt.Sprint(c), "context.Background"; got != want {
- t.Errorf("Background().String() = %q want %q", got, want)
- }
-}
-
-func TestTODO(t *testing.T) {
- c := TODO()
- if c == nil {
- t.Fatalf("TODO returned nil")
- }
- select {
- case x := <-c.Done():
- t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
- default:
- }
- if got, want := fmt.Sprint(c), "context.TODO"; got != want {
- t.Errorf("TODO().String() = %q want %q", got, want)
- }
-}
-
-func TestWithCancel(t *testing.T) {
- c1, cancel := WithCancel(Background())
-
- if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
- t.Errorf("c1.String() = %q want %q", got, want)
- }
-
- o := otherContext{c1}
- c2, _ := WithCancel(o)
- contexts := []Context{c1, o, c2}
-
- for i, c := range contexts {
- if d := c.Done(); d == nil {
- t.Errorf("c[%d].Done() == %v want non-nil", i, d)
- }
- if e := c.Err(); e != nil {
- t.Errorf("c[%d].Err() == %v want nil", i, e)
- }
-
- select {
- case x := <-c.Done():
- t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
- default:
- }
- }
-
- cancel()
- time.Sleep(100 * time.Millisecond) // let cancelation propagate
-
- for i, c := range contexts {
- select {
- case <-c.Done():
- default:
- t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
- }
- if e := c.Err(); e != Canceled {
- t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
- }
- }
-}
-
-func TestParentFinishesChild(t *testing.T) {
- // Context tree:
- // parent -> cancelChild
- // parent -> valueChild -> timerChild
- parent, cancel := WithCancel(Background())
- cancelChild, stop := WithCancel(parent)
- defer stop()
- valueChild := WithValue(parent, "key", "value")
- timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
- defer stop()
-
- select {
- case x := <-parent.Done():
- t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
- case x := <-cancelChild.Done():
- t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
- case x := <-timerChild.Done():
- t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
- case x := <-valueChild.Done():
- t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
- default:
- }
-
- // The parent's children should contain the two cancelable children.
- pc := parent.(*cancelCtx)
- cc := cancelChild.(*cancelCtx)
- tc := timerChild.(*timerCtx)
- pc.mu.Lock()
- if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
- t.Errorf("bad linkage: pc.children = %v, want %v and %v",
- pc.children, cc, tc)
- }
- pc.mu.Unlock()
-
- if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
- t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
- }
- if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
- t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
- }
-
- cancel()
-
- pc.mu.Lock()
- if len(pc.children) != 0 {
- t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
- }
- pc.mu.Unlock()
-
- // parent and children should all be finished.
- check := func(ctx Context, name string) {
- select {
- case <-ctx.Done():
- default:
- t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
- }
- if e := ctx.Err(); e != Canceled {
- t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
- }
- }
- check(parent, "parent")
- check(cancelChild, "cancelChild")
- check(valueChild, "valueChild")
- check(timerChild, "timerChild")
-
- // WithCancel should return a canceled context on a canceled parent.
- precanceledChild := WithValue(parent, "key", "value")
- select {
- case <-precanceledChild.Done():
- default:
- t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
- }
- if e := precanceledChild.Err(); e != Canceled {
- t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
- }
-}
-
-func TestChildFinishesFirst(t *testing.T) {
- cancelable, stop := WithCancel(Background())
- defer stop()
- for _, parent := range []Context{Background(), cancelable} {
- child, cancel := WithCancel(parent)
-
- select {
- case x := <-parent.Done():
- t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
- case x := <-child.Done():
- t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
- default:
- }
-
- cc := child.(*cancelCtx)
- pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
- if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
- t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
- }
-
- if pcok {
- pc.mu.Lock()
- if len(pc.children) != 1 || !pc.children[cc] {
- t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
- }
- pc.mu.Unlock()
- }
-
- cancel()
-
- if pcok {
- pc.mu.Lock()
- if len(pc.children) != 0 {
- t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
- }
- pc.mu.Unlock()
- }
-
- // child should be finished.
- select {
- case <-child.Done():
- default:
- t.Errorf("<-child.Done() blocked, but shouldn't have")
- }
- if e := child.Err(); e != Canceled {
- t.Errorf("child.Err() == %v want %v", e, Canceled)
- }
-
- // parent should not be finished.
- select {
- case x := <-parent.Done():
- t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
- default:
- }
- if e := parent.Err(); e != nil {
- t.Errorf("parent.Err() == %v want nil", e)
- }
- }
-}
-
-func testDeadline(c Context, wait time.Duration, t *testing.T) {
- select {
- case <-time.After(wait):
- t.Fatalf("context should have timed out")
- case <-c.Done():
- }
- if e := c.Err(); e != DeadlineExceeded {
- t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
- }
-}
-
-func TestDeadline(t *testing.T) {
- t.Parallel()
- const timeUnit = 500 * time.Millisecond
- c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit))
- if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
- t.Errorf("c.String() = %q want prefix %q", got, prefix)
- }
- testDeadline(c, 2*timeUnit, t)
-
- c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit))
- o := otherContext{c}
- testDeadline(o, 2*timeUnit, t)
-
- c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit))
- o = otherContext{c}
- c, _ = WithDeadline(o, time.Now().Add(3*timeUnit))
- testDeadline(c, 2*timeUnit, t)
-}
-
-func TestTimeout(t *testing.T) {
- t.Parallel()
- const timeUnit = 500 * time.Millisecond
- c, _ := WithTimeout(Background(), 1*timeUnit)
- if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
- t.Errorf("c.String() = %q want prefix %q", got, prefix)
- }
- testDeadline(c, 2*timeUnit, t)
-
- c, _ = WithTimeout(Background(), 1*timeUnit)
- o := otherContext{c}
- testDeadline(o, 2*timeUnit, t)
-
- c, _ = WithTimeout(Background(), 1*timeUnit)
- o = otherContext{c}
- c, _ = WithTimeout(o, 3*timeUnit)
- testDeadline(c, 2*timeUnit, t)
-}
-
-func TestCanceledTimeout(t *testing.T) {
- t.Parallel()
- const timeUnit = 500 * time.Millisecond
- c, _ := WithTimeout(Background(), 2*timeUnit)
- o := otherContext{c}
- c, cancel := WithTimeout(o, 4*timeUnit)
- cancel()
- time.Sleep(1 * timeUnit) // let cancelation propagate
- select {
- case <-c.Done():
- default:
- t.Errorf("<-c.Done() blocked, but shouldn't have")
- }
- if e := c.Err(); e != Canceled {
- t.Errorf("c.Err() == %v want %v", e, Canceled)
- }
-}
-
-type key1 int
-type key2 int
-
-var k1 = key1(1)
-var k2 = key2(1) // same int as k1, different type
-var k3 = key2(3) // same type as k2, different int
-
-func TestValues(t *testing.T) {
- check := func(c Context, nm, v1, v2, v3 string) {
- if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
- t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
- }
- if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
- t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
- }
- if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
- t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
- }
- }
-
- c0 := Background()
- check(c0, "c0", "", "", "")
-
- c1 := WithValue(Background(), k1, "c1k1")
- check(c1, "c1", "c1k1", "", "")
-
- if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
- t.Errorf("c.String() = %q want %q", got, want)
- }
-
- c2 := WithValue(c1, k2, "c2k2")
- check(c2, "c2", "c1k1", "c2k2", "")
-
- c3 := WithValue(c2, k3, "c3k3")
- check(c3, "c2", "c1k1", "c2k2", "c3k3")
-
- c4 := WithValue(c3, k1, nil)
- check(c4, "c4", "", "c2k2", "c3k3")
-
- o0 := otherContext{Background()}
- check(o0, "o0", "", "", "")
-
- o1 := otherContext{WithValue(Background(), k1, "c1k1")}
- check(o1, "o1", "c1k1", "", "")
-
- o2 := WithValue(o1, k2, "o2k2")
- check(o2, "o2", "c1k1", "o2k2", "")
-
- o3 := otherContext{c4}
- check(o3, "o3", "", "c2k2", "c3k3")
-
- o4 := WithValue(o3, k3, nil)
- check(o4, "o4", "", "c2k2", "")
-}
-
-func TestAllocs(t *testing.T) {
- bg := Background()
- for _, test := range []struct {
- desc string
- f func()
- limit float64
- gccgoLimit float64
- }{
- {
- desc: "Background()",
- f: func() { Background() },
- limit: 0,
- gccgoLimit: 0,
- },
- {
- desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
- f: func() {
- c := WithValue(bg, k1, nil)
- c.Value(k1)
- },
- limit: 3,
- gccgoLimit: 3,
- },
- {
- desc: "WithTimeout(bg, 15*time.Millisecond)",
- f: func() {
- c, _ := WithTimeout(bg, 15*time.Millisecond)
- <-c.Done()
- },
- limit: 8,
- gccgoLimit: 16,
- },
- {
- desc: "WithCancel(bg)",
- f: func() {
- c, cancel := WithCancel(bg)
- cancel()
- <-c.Done()
- },
- limit: 5,
- gccgoLimit: 8,
- },
- {
- desc: "WithTimeout(bg, 100*time.Millisecond)",
- f: func() {
- c, cancel := WithTimeout(bg, 100*time.Millisecond)
- cancel()
- <-c.Done()
- },
- limit: 8,
- gccgoLimit: 25,
- },
- } {
- limit := test.limit
- if runtime.Compiler == "gccgo" {
- // gccgo does not yet do escape analysis.
- // TODO(iant): Remove this when gccgo does do escape analysis.
- limit = test.gccgoLimit
- }
- if n := testing.AllocsPerRun(100, test.f); n > limit {
- t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
- }
- }
-}
-
-func TestSimultaneousCancels(t *testing.T) {
- root, cancel := WithCancel(Background())
- m := map[Context]CancelFunc{root: cancel}
- q := []Context{root}
- // Create a tree of contexts.
- for len(q) != 0 && len(m) < 100 {
- parent := q[0]
- q = q[1:]
- for i := 0; i < 4; i++ {
- ctx, cancel := WithCancel(parent)
- m[ctx] = cancel
- q = append(q, ctx)
- }
- }
- // Start all the cancels in a random order.
- var wg sync.WaitGroup
- wg.Add(len(m))
- for _, cancel := range m {
- go func(cancel CancelFunc) {
- cancel()
- wg.Done()
- }(cancel)
- }
- // Wait on all the contexts in a random order.
- for ctx := range m {
- select {
- case <-ctx.Done():
- case <-time.After(1 * time.Second):
- buf := make([]byte, 10<<10)
- n := runtime.Stack(buf, true)
- t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
- }
- }
- // Wait for all the cancel functions to return.
- done := make(chan struct{})
- go func() {
- wg.Wait()
- close(done)
- }()
- select {
- case <-done:
- case <-time.After(1 * time.Second):
- buf := make([]byte, 10<<10)
- n := runtime.Stack(buf, true)
- t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
- }
-}
-
-func TestInterlockedCancels(t *testing.T) {
- parent, cancelParent := WithCancel(Background())
- child, cancelChild := WithCancel(parent)
- go func() {
- parent.Done()
- cancelChild()
- }()
- cancelParent()
- select {
- case <-child.Done():
- case <-time.After(1 * time.Second):
- buf := make([]byte, 10<<10)
- n := runtime.Stack(buf, true)
- t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
- }
-}
-
-func TestLayersCancel(t *testing.T) {
- testLayers(t, time.Now().UnixNano(), false)
-}
-
-func TestLayersTimeout(t *testing.T) {
- testLayers(t, time.Now().UnixNano(), true)
-}
-
-func testLayers(t *testing.T, seed int64, testTimeout bool) {
- rand.Seed(seed)
- errorf := func(format string, a ...interface{}) {
- t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
- }
- const (
- timeout = 200 * time.Millisecond
- minLayers = 30
- )
- type value int
- var (
- vals []*value
- cancels []CancelFunc
- numTimers int
- ctx = Background()
- )
- for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
- switch rand.Intn(3) {
- case 0:
- v := new(value)
- ctx = WithValue(ctx, v, v)
- vals = append(vals, v)
- case 1:
- var cancel CancelFunc
- ctx, cancel = WithCancel(ctx)
- cancels = append(cancels, cancel)
- case 2:
- var cancel CancelFunc
- ctx, cancel = WithTimeout(ctx, timeout)
- cancels = append(cancels, cancel)
- numTimers++
- }
- }
- checkValues := func(when string) {
- for _, key := range vals {
- if val := ctx.Value(key).(*value); key != val {
- errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
- }
- }
- }
- select {
- case <-ctx.Done():
- errorf("ctx should not be canceled yet")
- default:
- }
- if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
- t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
- }
- t.Log(ctx)
- checkValues("before cancel")
- if testTimeout {
- select {
- case <-ctx.Done():
- case <-time.After(timeout + 100*time.Millisecond):
- errorf("ctx should have timed out")
- }
- checkValues("after timeout")
- } else {
- cancel := cancels[rand.Intn(len(cancels))]
- cancel()
- select {
- case <-ctx.Done():
- default:
- errorf("ctx should be canceled")
- }
- checkValues("after cancel")
- }
-}
-
-func TestCancelRemoves(t *testing.T) {
- checkChildren := func(when string, ctx Context, want int) {
- if got := len(ctx.(*cancelCtx).children); got != want {
- t.Errorf("%s: context has %d children, want %d", when, got, want)
- }
- }
-
- ctx, _ := WithCancel(Background())
- checkChildren("after creation", ctx, 0)
- _, cancel := WithCancel(ctx)
- checkChildren("with WithCancel child ", ctx, 1)
- cancel()
- checkChildren("after cancelling WithCancel child", ctx, 0)
-
- ctx, _ = WithCancel(Background())
- checkChildren("after creation", ctx, 0)
- _, cancel = WithTimeout(ctx, 60*time.Minute)
- checkChildren("with WithTimeout child ", ctx, 1)
- cancel()
- checkChildren("after cancelling WithTimeout child", ctx, 0)
-}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
deleted file mode 100644
index 606cf1f..0000000
--- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.7
-
-// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
-package ctxhttp // import "golang.org/x/net/context/ctxhttp"
-
-import (
- "io"
- "net/http"
- "net/url"
- "strings"
-
- "golang.org/x/net/context"
-)
-
-// Do sends an HTTP request with the provided http.Client and returns
-// an HTTP response.
-//
-// If the client is nil, http.DefaultClient is used.
-//
-// The provided ctx must be non-nil. If it is canceled or times out,
-// ctx.Err() will be returned.
-func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
- if client == nil {
- client = http.DefaultClient
- }
- resp, err := client.Do(req.WithContext(ctx))
- // If we got an error, and the context has been canceled,
- // the context's error is probably more useful.
- if err != nil {
- select {
- case <-ctx.Done():
- err = ctx.Err()
- default:
- }
- }
- return resp, err
-}
-
-// Get issues a GET request via the Do function.
-func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Head issues a HEAD request via the Do function.
-func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("HEAD", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Post issues a POST request via the Do function.
-func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
- req, err := http.NewRequest("POST", url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", bodyType)
- return Do(ctx, client, req)
-}
-
-// PostForm issues a POST request via the Do function.
-func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
- return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go
deleted file mode 100644
index 9f0f90f..0000000
--- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,go1.7
-
-package ctxhttp
-
-import (
- "io"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "context"
-)
-
-func TestGo17Context(t *testing.T) {
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, "ok")
- }))
- ctx := context.Background()
- resp, err := Get(ctx, http.DefaultClient, ts.URL)
- if resp == nil || err != nil {
- t.Fatalf("error received from client: %v %v", err, resp)
- }
- resp.Body.Close()
-}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
deleted file mode 100644
index 926870c..0000000
--- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.7
-
-package ctxhttp // import "golang.org/x/net/context/ctxhttp"
-
-import (
- "io"
- "net/http"
- "net/url"
- "strings"
-
- "golang.org/x/net/context"
-)
-
-func nop() {}
-
-var (
- testHookContextDoneBeforeHeaders = nop
- testHookDoReturned = nop
- testHookDidBodyClose = nop
-)
-
-// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
-// If the client is nil, http.DefaultClient is used.
-// If the context is canceled or times out, ctx.Err() will be returned.
-func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
- if client == nil {
- client = http.DefaultClient
- }
-
- // TODO(djd): Respect any existing value of req.Cancel.
- cancel := make(chan struct{})
- req.Cancel = cancel
-
- type responseAndError struct {
- resp *http.Response
- err error
- }
- result := make(chan responseAndError, 1)
-
- // Make local copies of test hooks closed over by goroutines below.
- // Prevents data races in tests.
- testHookDoReturned := testHookDoReturned
- testHookDidBodyClose := testHookDidBodyClose
-
- go func() {
- resp, err := client.Do(req)
- testHookDoReturned()
- result <- responseAndError{resp, err}
- }()
-
- var resp *http.Response
-
- select {
- case <-ctx.Done():
- testHookContextDoneBeforeHeaders()
- close(cancel)
- // Clean up after the goroutine calling client.Do:
- go func() {
- if r := <-result; r.resp != nil {
- testHookDidBodyClose()
- r.resp.Body.Close()
- }
- }()
- return nil, ctx.Err()
- case r := <-result:
- var err error
- resp, err = r.resp, r.err
- if err != nil {
- return resp, err
- }
- }
-
- c := make(chan struct{})
- go func() {
- select {
- case <-ctx.Done():
- close(cancel)
- case <-c:
- // The response's Body is closed.
- }
- }()
- resp.Body = ¬ifyingReader{resp.Body, c}
-
- return resp, nil
-}
-
-// Get issues a GET request via the Do function.
-func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Head issues a HEAD request via the Do function.
-func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("HEAD", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Post issues a POST request via the Do function.
-func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
- req, err := http.NewRequest("POST", url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", bodyType)
- return Do(ctx, client, req)
-}
-
-// PostForm issues a POST request via the Do function.
-func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
- return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
-
-// notifyingReader is an io.ReadCloser that closes the notify channel after
-// Close is called or a Read fails on the underlying ReadCloser.
-type notifyingReader struct {
- io.ReadCloser
- notify chan<- struct{}
-}
-
-func (r *notifyingReader) Read(p []byte) (int, error) {
- n, err := r.ReadCloser.Read(p)
- if err != nil && r.notify != nil {
- close(r.notify)
- r.notify = nil
- }
- return n, err
-}
-
-func (r *notifyingReader) Close() error {
- err := r.ReadCloser.Close()
- if r.notify != nil {
- close(r.notify)
- r.notify = nil
- }
- return err
-}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go
deleted file mode 100644
index 9159cf0..0000000
--- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,!go1.7
-
-package ctxhttp
-
-import (
- "net"
- "net/http"
- "net/http/httptest"
- "sync"
- "testing"
- "time"
-
- "golang.org/x/net/context"
-)
-
-// golang.org/issue/14065
-func TestClosesResponseBodyOnCancel(t *testing.T) {
- defer func() { testHookContextDoneBeforeHeaders = nop }()
- defer func() { testHookDoReturned = nop }()
- defer func() { testHookDidBodyClose = nop }()
-
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
- defer ts.Close()
-
- ctx, cancel := context.WithCancel(context.Background())
-
- // closed when Do enters select case <-ctx.Done()
- enteredDonePath := make(chan struct{})
-
- testHookContextDoneBeforeHeaders = func() {
- close(enteredDonePath)
- }
-
- testHookDoReturned = func() {
- // We now have the result (the Flush'd headers) at least,
- // so we can cancel the request.
- cancel()
-
- // But block the client.Do goroutine from sending
- // until Do enters into the <-ctx.Done() path, since
- // otherwise if both channels are readable, select
- // picks a random one.
- <-enteredDonePath
- }
-
- sawBodyClose := make(chan struct{})
- testHookDidBodyClose = func() { close(sawBodyClose) }
-
- tr := &http.Transport{}
- defer tr.CloseIdleConnections()
- c := &http.Client{Transport: tr}
- req, _ := http.NewRequest("GET", ts.URL, nil)
- _, doErr := Do(ctx, c, req)
-
- select {
- case <-sawBodyClose:
- case <-time.After(5 * time.Second):
- t.Fatal("timeout waiting for body to close")
- }
-
- if doErr != ctx.Err() {
- t.Errorf("Do error = %v; want %v", doErr, ctx.Err())
- }
-}
-
-type noteCloseConn struct {
- net.Conn
- onceClose sync.Once
- closefn func()
-}
-
-func (c *noteCloseConn) Close() error {
- c.onceClose.Do(c.closefn)
- return c.Conn.Close()
-}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
deleted file mode 100644
index 1e41551..0000000
--- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9
-
-package ctxhttp
-
-import (
- "io"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "testing"
- "time"
-
- "golang.org/x/net/context"
-)
-
-const (
- requestDuration = 100 * time.Millisecond
- requestBody = "ok"
-)
-
-func okHandler(w http.ResponseWriter, r *http.Request) {
- time.Sleep(requestDuration)
- io.WriteString(w, requestBody)
-}
-
-func TestNoTimeout(t *testing.T) {
- ts := httptest.NewServer(http.HandlerFunc(okHandler))
- defer ts.Close()
-
- ctx := context.Background()
- res, err := Get(ctx, nil, ts.URL)
- if err != nil {
- t.Fatal(err)
- }
- defer res.Body.Close()
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- t.Fatal(err)
- }
- if string(slurp) != requestBody {
- t.Errorf("body = %q; want %q", slurp, requestBody)
- }
-}
-
-func TestCancelBeforeHeaders(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
-
- blockServer := make(chan struct{})
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- cancel()
- <-blockServer
- io.WriteString(w, requestBody)
- }))
- defer ts.Close()
- defer close(blockServer)
-
- res, err := Get(ctx, nil, ts.URL)
- if err == nil {
- res.Body.Close()
- t.Fatal("Get returned unexpected nil error")
- }
- if err != context.Canceled {
- t.Errorf("err = %v; want %v", err, context.Canceled)
- }
-}
-
-func TestCancelAfterHangingRequest(t *testing.T) {
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusOK)
- w.(http.Flusher).Flush()
- <-w.(http.CloseNotifier).CloseNotify()
- }))
- defer ts.Close()
-
- ctx, cancel := context.WithCancel(context.Background())
- resp, err := Get(ctx, nil, ts.URL)
- if err != nil {
- t.Fatalf("unexpected error in Get: %v", err)
- }
-
- // Cancel befer reading the body.
- // Reading Request.Body should fail, since the request was
- // canceled before anything was written.
- cancel()
-
- done := make(chan struct{})
-
- go func() {
- b, err := ioutil.ReadAll(resp.Body)
- if len(b) != 0 || err == nil {
- t.Errorf(`Read got (%q, %v); want ("", error)`, b, err)
- }
- close(done)
- }()
-
- select {
- case <-time.After(1 * time.Second):
- t.Errorf("Test timed out")
- case <-done:
- }
-}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
index d20f52b..344bd14 100644
--- a/vendor/golang.org/x/net/context/go17.go
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build go1.7
// +build go1.7
package context
diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go
new file mode 100644
index 0000000..64d31ec
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go19.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.9
+// +build go1.9
+
+package context
+
+import "context" // standard library's context, as of Go 1.7
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context = context.Context
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
index 0f35592..5270db5 100644
--- a/vendor/golang.org/x/net/context/pre_go17.go
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !go1.7
// +build !go1.7
package context
diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go
new file mode 100644
index 0000000..1f97153
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go19.go
@@ -0,0 +1,110 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.9
+// +build !go1.9
+
+package context
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go
deleted file mode 100644
index a6754dc..0000000
--- a/vendor/golang.org/x/net/context/withtimeout_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package context_test
-
-import (
- "fmt"
- "time"
-
- "golang.org/x/net/context"
-)
-
-func ExampleWithTimeout() {
- // Pass a context with a timeout to tell a blocking function that it
- // should abandon its work after the timeout elapses.
- ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
- select {
- case <-time.After(200 * time.Millisecond):
- fmt.Println("overslept")
- case <-ctx.Done():
- fmt.Println(ctx.Err()) // prints "context deadline exceeded"
- }
- // Output:
- // context deadline exceeded
-}
diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/sys/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/sys/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/filelock/LICENSE b/vendor/golang.org/x/sys/LICENSE
similarity index 95%
rename from vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/filelock/LICENSE
rename to vendor/golang.org/x/sys/LICENSE
index fec05ce..6a66aea 100644
--- a/vendor/github.com/siddontang/go-mysql/vendor/github.com/siddontang/go/filelock/LICENSE
+++ b/vendor/golang.org/x/sys/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2011 The LevelDB-Go Authors. All rights reserved.
+Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/sys/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
new file mode 100644
index 0000000..e07899b
--- /dev/null
+++ b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
@@ -0,0 +1,30 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unsafeheader contains header declarations for the Go runtime's
+// slice and string implementations.
+//
+// This package allows x/sys to use types equivalent to
+// reflect.SliceHeader and reflect.StringHeader without introducing
+// a dependency on the (relatively heavy) "reflect" package.
+package unsafeheader
+
+import (
+ "unsafe"
+)
+
+// Slice is the runtime representation of a slice.
+// It cannot be used safely or portably and its representation may change in a later release.
+type Slice struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+}
+
+// String is the runtime representation of a string.
+// It cannot be used safely or portably and its representation may change in a later release.
+type String struct {
+ Data unsafe.Pointer
+ Len int
+}
diff --git a/vendor/golang.org/x/sys/plan9/asm.s b/vendor/golang.org/x/sys/plan9/asm.s
new file mode 100644
index 0000000..06449eb
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/asm.s
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·use(SB),NOSPLIT,$0
+ RET
diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_386.s b/vendor/golang.org/x/sys/plan9/asm_plan9_386.s
new file mode 100644
index 0000000..bc5cab1
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/asm_plan9_386.s
@@ -0,0 +1,30 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//
+// System call support for 386, Plan 9
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-32
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-44
+ JMP syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-36
+ JMP syscall·seek(SB)
+
+TEXT ·exit(SB),NOSPLIT,$4-4
+ JMP syscall·exit(SB)
diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s b/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s
new file mode 100644
index 0000000..d3448e6
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s
@@ -0,0 +1,30 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//
+// System call support for amd64, Plan 9
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-64
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-88
+ JMP syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-56
+ JMP syscall·seek(SB)
+
+TEXT ·exit(SB),NOSPLIT,$8-8
+ JMP syscall·exit(SB)
diff --git a/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s b/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s
new file mode 100644
index 0000000..afb7c0a
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/asm_plan9_arm.s
@@ -0,0 +1,25 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// System call support for plan9 on arm
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-32
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-44
+ JMP syscall·Syscall6(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-36
+ JMP syscall·exit(SB)
diff --git a/vendor/golang.org/x/sys/plan9/const_plan9.go b/vendor/golang.org/x/sys/plan9/const_plan9.go
new file mode 100644
index 0000000..b4e85a3
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/const_plan9.go
@@ -0,0 +1,70 @@
+package plan9
+
+// Plan 9 Constants
+
+// Open modes
+const (
+ O_RDONLY = 0
+ O_WRONLY = 1
+ O_RDWR = 2
+ O_TRUNC = 16
+ O_CLOEXEC = 32
+ O_EXCL = 0x1000
+)
+
+// Rfork flags
+const (
+ RFNAMEG = 1 << 0
+ RFENVG = 1 << 1
+ RFFDG = 1 << 2
+ RFNOTEG = 1 << 3
+ RFPROC = 1 << 4
+ RFMEM = 1 << 5
+ RFNOWAIT = 1 << 6
+ RFCNAMEG = 1 << 10
+ RFCENVG = 1 << 11
+ RFCFDG = 1 << 12
+ RFREND = 1 << 13
+ RFNOMNT = 1 << 14
+)
+
+// Qid.Type bits
+const (
+ QTDIR = 0x80
+ QTAPPEND = 0x40
+ QTEXCL = 0x20
+ QTMOUNT = 0x10
+ QTAUTH = 0x08
+ QTTMP = 0x04
+ QTFILE = 0x00
+)
+
+// Dir.Mode bits
+const (
+ DMDIR = 0x80000000
+ DMAPPEND = 0x40000000
+ DMEXCL = 0x20000000
+ DMMOUNT = 0x10000000
+ DMAUTH = 0x08000000
+ DMTMP = 0x04000000
+ DMREAD = 0x4
+ DMWRITE = 0x2
+ DMEXEC = 0x1
+)
+
+const (
+ STATMAX = 65535
+ ERRMAX = 128
+ STATFIXLEN = 49
+)
+
+// Mount and bind flags
+const (
+ MREPL = 0x0000
+ MBEFORE = 0x0001
+ MAFTER = 0x0002
+ MORDER = 0x0003
+ MCREATE = 0x0004
+ MCACHE = 0x0010
+ MMASK = 0x0017
+)
diff --git a/vendor/golang.org/x/sys/plan9/dir_plan9.go b/vendor/golang.org/x/sys/plan9/dir_plan9.go
new file mode 100644
index 0000000..0955e0c
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/dir_plan9.go
@@ -0,0 +1,212 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Plan 9 directory marshalling. See intro(5).
+
+package plan9
+
+import "errors"
+
+var (
+ ErrShortStat = errors.New("stat buffer too short")
+ ErrBadStat = errors.New("malformed stat buffer")
+ ErrBadName = errors.New("bad character in file name")
+)
+
+// A Qid represents a 9P server's unique identification for a file.
+type Qid struct {
+ Path uint64 // the file server's unique identification for the file
+ Vers uint32 // version number for given Path
+ Type uint8 // the type of the file (plan9.QTDIR for example)
+}
+
+// A Dir contains the metadata for a file.
+type Dir struct {
+ // system-modified data
+ Type uint16 // server type
+ Dev uint32 // server subtype
+
+ // file data
+ Qid Qid // unique id from server
+ Mode uint32 // permissions
+ Atime uint32 // last read time
+ Mtime uint32 // last write time
+ Length int64 // file length
+ Name string // last element of path
+ Uid string // owner name
+ Gid string // group name
+ Muid string // last modifier name
+}
+
+var nullDir = Dir{
+ Type: ^uint16(0),
+ Dev: ^uint32(0),
+ Qid: Qid{
+ Path: ^uint64(0),
+ Vers: ^uint32(0),
+ Type: ^uint8(0),
+ },
+ Mode: ^uint32(0),
+ Atime: ^uint32(0),
+ Mtime: ^uint32(0),
+ Length: ^int64(0),
+}
+
+// Null assigns special "don't touch" values to members of d to
+// avoid modifying them during plan9.Wstat.
+func (d *Dir) Null() { *d = nullDir }
+
+// Marshal encodes a 9P stat message corresponding to d into b
+//
+// If there isn't enough space in b for a stat message, ErrShortStat is returned.
+func (d *Dir) Marshal(b []byte) (n int, err error) {
+ n = STATFIXLEN + len(d.Name) + len(d.Uid) + len(d.Gid) + len(d.Muid)
+ if n > len(b) {
+ return n, ErrShortStat
+ }
+
+ for _, c := range d.Name {
+ if c == '/' {
+ return n, ErrBadName
+ }
+ }
+
+ b = pbit16(b, uint16(n)-2)
+ b = pbit16(b, d.Type)
+ b = pbit32(b, d.Dev)
+ b = pbit8(b, d.Qid.Type)
+ b = pbit32(b, d.Qid.Vers)
+ b = pbit64(b, d.Qid.Path)
+ b = pbit32(b, d.Mode)
+ b = pbit32(b, d.Atime)
+ b = pbit32(b, d.Mtime)
+ b = pbit64(b, uint64(d.Length))
+ b = pstring(b, d.Name)
+ b = pstring(b, d.Uid)
+ b = pstring(b, d.Gid)
+ b = pstring(b, d.Muid)
+
+ return n, nil
+}
+
+// UnmarshalDir decodes a single 9P stat message from b and returns the resulting Dir.
+//
+// If b is too small to hold a valid stat message, ErrShortStat is returned.
+//
+// If the stat message itself is invalid, ErrBadStat is returned.
+func UnmarshalDir(b []byte) (*Dir, error) {
+ if len(b) < STATFIXLEN {
+ return nil, ErrShortStat
+ }
+ size, buf := gbit16(b)
+ if len(b) != int(size)+2 {
+ return nil, ErrBadStat
+ }
+ b = buf
+
+ var d Dir
+ d.Type, b = gbit16(b)
+ d.Dev, b = gbit32(b)
+ d.Qid.Type, b = gbit8(b)
+ d.Qid.Vers, b = gbit32(b)
+ d.Qid.Path, b = gbit64(b)
+ d.Mode, b = gbit32(b)
+ d.Atime, b = gbit32(b)
+ d.Mtime, b = gbit32(b)
+
+ n, b := gbit64(b)
+ d.Length = int64(n)
+
+ var ok bool
+ if d.Name, b, ok = gstring(b); !ok {
+ return nil, ErrBadStat
+ }
+ if d.Uid, b, ok = gstring(b); !ok {
+ return nil, ErrBadStat
+ }
+ if d.Gid, b, ok = gstring(b); !ok {
+ return nil, ErrBadStat
+ }
+ if d.Muid, b, ok = gstring(b); !ok {
+ return nil, ErrBadStat
+ }
+
+ return &d, nil
+}
+
+// pbit8 copies the 8-bit number v to b and returns the remaining slice of b.
+func pbit8(b []byte, v uint8) []byte {
+ b[0] = byte(v)
+ return b[1:]
+}
+
+// pbit16 copies the 16-bit number v to b in little-endian order and returns the remaining slice of b.
+func pbit16(b []byte, v uint16) []byte {
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ return b[2:]
+}
+
+// pbit32 copies the 32-bit number v to b in little-endian order and returns the remaining slice of b.
+func pbit32(b []byte, v uint32) []byte {
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+ return b[4:]
+}
+
+// pbit64 copies the 64-bit number v to b in little-endian order and returns the remaining slice of b.
+func pbit64(b []byte, v uint64) []byte {
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+ b[4] = byte(v >> 32)
+ b[5] = byte(v >> 40)
+ b[6] = byte(v >> 48)
+ b[7] = byte(v >> 56)
+ return b[8:]
+}
+
+// pstring copies the string s to b, prepending it with a 16-bit length in little-endian order, and
+// returning the remaining slice of b..
+func pstring(b []byte, s string) []byte {
+ b = pbit16(b, uint16(len(s)))
+ n := copy(b, s)
+ return b[n:]
+}
+
+// gbit8 reads an 8-bit number from b and returns it with the remaining slice of b.
+func gbit8(b []byte) (uint8, []byte) {
+ return uint8(b[0]), b[1:]
+}
+
+// gbit16 reads a 16-bit number in little-endian order from b and returns it with the remaining slice of b.
+func gbit16(b []byte) (uint16, []byte) {
+ return uint16(b[0]) | uint16(b[1])<<8, b[2:]
+}
+
+// gbit32 reads a 32-bit number in little-endian order from b and returns it with the remaining slice of b.
+func gbit32(b []byte) (uint32, []byte) {
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24, b[4:]
+}
+
+// gbit64 reads a 64-bit number in little-endian order from b and returns it with the remaining slice of b.
+func gbit64(b []byte) (uint64, []byte) {
+ lo := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ hi := uint32(b[4]) | uint32(b[5])<<8 | uint32(b[6])<<16 | uint32(b[7])<<24
+ return uint64(lo) | uint64(hi)<<32, b[8:]
+}
+
+// gstring reads a string from b, prefixed with a 16-bit length in little-endian order.
+// It returns the string with the remaining slice of b and a boolean. If the length is
+// greater than the number of bytes in b, the boolean will be false.
+func gstring(b []byte) (string, []byte, bool) {
+ n, b := gbit16(b)
+ if int(n) > len(b) {
+ return "", b, false
+ }
+ return string(b[:n]), b[n:], true
+}
diff --git a/vendor/golang.org/x/sys/plan9/env_plan9.go b/vendor/golang.org/x/sys/plan9/env_plan9.go
new file mode 100644
index 0000000..8f19180
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/env_plan9.go
@@ -0,0 +1,31 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Plan 9 environment variables.
+
+package plan9
+
+import (
+ "syscall"
+)
+
+func Getenv(key string) (value string, found bool) {
+ return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+ return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+ syscall.Clearenv()
+}
+
+func Environ() []string {
+ return syscall.Environ()
+}
+
+func Unsetenv(key string) error {
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/plan9/errors_plan9.go b/vendor/golang.org/x/sys/plan9/errors_plan9.go
new file mode 100644
index 0000000..65fe74d
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/errors_plan9.go
@@ -0,0 +1,50 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package plan9
+
+import "syscall"
+
+// Constants
+const (
+ // Invented values to support what package os expects.
+ O_CREAT = 0x02000
+ O_APPEND = 0x00400
+ O_NOCTTY = 0x00000
+ O_NONBLOCK = 0x00000
+ O_SYNC = 0x00000
+ O_ASYNC = 0x00000
+
+ S_IFMT = 0x1f000
+ S_IFIFO = 0x1000
+ S_IFCHR = 0x2000
+ S_IFDIR = 0x4000
+ S_IFBLK = 0x6000
+ S_IFREG = 0x8000
+ S_IFLNK = 0xa000
+ S_IFSOCK = 0xc000
+)
+
+// Errors
+var (
+ EINVAL = syscall.NewError("bad arg in system call")
+ ENOTDIR = syscall.NewError("not a directory")
+ EISDIR = syscall.NewError("file is a directory")
+ ENOENT = syscall.NewError("file does not exist")
+ EEXIST = syscall.NewError("file already exists")
+ EMFILE = syscall.NewError("no free file descriptors")
+ EIO = syscall.NewError("i/o error")
+ ENAMETOOLONG = syscall.NewError("file name too long")
+ EINTR = syscall.NewError("interrupted")
+ EPERM = syscall.NewError("permission denied")
+ EBUSY = syscall.NewError("no free devices")
+ ETIMEDOUT = syscall.NewError("connection timed out")
+ EPLAN9 = syscall.NewError("not supported by plan 9")
+
+ // The following errors do not correspond to any
+ // Plan 9 system messages. Invented to support
+ // what package os and others expect.
+ EACCES = syscall.NewError("access permission denied")
+ EAFNOSUPPORT = syscall.NewError("address family not supported by protocol")
+)
diff --git a/vendor/golang.org/x/sys/plan9/mkall.sh b/vendor/golang.org/x/sys/plan9/mkall.sh
new file mode 100644
index 0000000..1650fbc
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/mkall.sh
@@ -0,0 +1,150 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# The plan9 package provides access to the raw system call
+# interface of the underlying operating system. Porting Go to
+# a new architecture/operating system combination requires
+# some manual effort, though there are tools that automate
+# much of the process. The auto-generated files have names
+# beginning with z.
+#
+# This script runs or (given -n) prints suggested commands to generate z files
+# for the current system. Running those commands is not automatic.
+# This script is documentation more than anything else.
+#
+# * asm_${GOOS}_${GOARCH}.s
+#
+# This hand-written assembly file implements system call dispatch.
+# There are three entry points:
+#
+# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
+# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+#
+# The first and second are the standard ones; they differ only in
+# how many arguments can be passed to the kernel.
+# The third is for low-level use by the ForkExec wrapper;
+# unlike the first two, it does not call into the scheduler to
+# let it know that a system call is running.
+#
+# * syscall_${GOOS}.go
+#
+# This hand-written Go file implements system calls that need
+# special handling and lists "//sys" comments giving prototypes
+# for ones that can be auto-generated. Mksyscall reads those
+# comments to generate the stubs.
+#
+# * syscall_${GOOS}_${GOARCH}.go
+#
+# Same as syscall_${GOOS}.go except that it contains code specific
+# to ${GOOS} on one particular architecture.
+#
+# * types_${GOOS}.c
+#
+# This hand-written C file includes standard C headers and then
+# creates typedef or enum names beginning with a dollar sign
+# (use of $ in variable names is a gcc extension). The hardest
+# part about preparing this file is figuring out which headers to
+# include and which symbols need to be #defined to get the
+# actual data structures that pass through to the kernel system calls.
+# Some C libraries present alternate versions for binary compatibility
+# and translate them on the way in and out of system calls, but
+# there is almost always a #define that can get the real ones.
+# See types_darwin.c and types_linux.c for examples.
+#
+# * zerror_${GOOS}_${GOARCH}.go
+#
+# This machine-generated file defines the system's error numbers,
+# error strings, and signal numbers. The generator is "mkerrors.sh".
+# Usually no arguments are needed, but mkerrors.sh will pass its
+# arguments on to godefs.
+#
+# * zsyscall_${GOOS}_${GOARCH}.go
+#
+# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
+#
+# * zsysnum_${GOOS}_${GOARCH}.go
+#
+# Generated by mksysnum_${GOOS}.
+#
+# * ztypes_${GOOS}_${GOARCH}.go
+#
+# Generated by godefs; see types_${GOOS}.c above.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="go run mksyscall.go"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+run="sh"
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+plan9_386)
+ mkerrors=
+ mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,386"
+ mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
+ mktypes="XXX"
+ ;;
+plan9_amd64)
+ mkerrors=
+ mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,amd64"
+ mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
+ mktypes="XXX"
+ ;;
+plan9_arm)
+ mkerrors=
+ mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,arm"
+ mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
+ mktypes="XXX"
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ case "$GOOS" in
+ plan9)
+ syscall_goos="syscall_$GOOS.go"
+ if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos |gofmt >zsyscall_$GOOSARCH.go"; fi
+ ;;
+ esac
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |gofmt >ztypes_$GOOSARCH.go"; fi
+) | $run
diff --git a/vendor/golang.org/x/sys/plan9/mkerrors.sh b/vendor/golang.org/x/sys/plan9/mkerrors.sh
new file mode 100644
index 0000000..85309c4
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/mkerrors.sh
@@ -0,0 +1,246 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Generate Go code listing errors and other #defined constant
+# values (ENAMETOOLONG etc.), by asking the preprocessor
+# about the definitions.
+
+unset LANG
+export LC_ALL=C
+export LC_CTYPE=C
+
+CC=${CC:-gcc}
+
+uname=$(uname)
+
+includes='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+'
+
+ccflags="$@"
+
+# Write go tool cgo -godefs input.
+(
+ echo package plan9
+ echo
+ echo '/*'
+ indirect="includes_$(uname)"
+ echo "${!indirect} $includes"
+ echo '*/'
+ echo 'import "C"'
+ echo
+ echo 'const ('
+
+ # The gcc command line prints all the #defines
+ # it encounters while processing the input
+ echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags |
+ awk '
+ $1 != "#define" || $2 ~ /\(/ || $3 == "" {next}
+
+ $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers
+ $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next}
+ $2 ~ /^(SCM_SRCRT)$/ {next}
+ $2 ~ /^(MAP_FAILED)$/ {next}
+
+ $2 !~ /^ETH_/ &&
+ $2 !~ /^EPROC_/ &&
+ $2 !~ /^EQUIV_/ &&
+ $2 !~ /^EXPR_/ &&
+ $2 ~ /^E[A-Z0-9_]+$/ ||
+ $2 ~ /^B[0-9_]+$/ ||
+ $2 ~ /^V[A-Z0-9]+$/ ||
+ $2 ~ /^CS[A-Z0-9]/ ||
+ $2 ~ /^I(SIG|CANON|CRNL|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
+ $2 ~ /^IGN/ ||
+ $2 ~ /^IX(ON|ANY|OFF)$/ ||
+ $2 ~ /^IN(LCR|PCK)$/ ||
+ $2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
+ $2 ~ /^C(LOCAL|READ)$/ ||
+ $2 == "BRKINT" ||
+ $2 == "HUPCL" ||
+ $2 == "PENDIN" ||
+ $2 == "TOSTOP" ||
+ $2 ~ /^PAR/ ||
+ $2 ~ /^SIG[^_]/ ||
+ $2 ~ /^O[CNPFP][A-Z]+[^_][A-Z]+$/ ||
+ $2 ~ /^IN_/ ||
+ $2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
+ $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
+ $2 == "ICMPV6_FILTER" ||
+ $2 == "SOMAXCONN" ||
+ $2 == "NAME_MAX" ||
+ $2 == "IFNAMSIZ" ||
+ $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ ||
+ $2 ~ /^SYSCTL_VERS/ ||
+ $2 ~ /^(MS|MNT)_/ ||
+ $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
+ $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ ||
+ $2 ~ /^LINUX_REBOOT_CMD_/ ||
+ $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
+ $2 !~ "NLA_TYPE_MASK" &&
+ $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ ||
+ $2 ~ /^SIOC/ ||
+ $2 ~ /^TIOC/ ||
+ $2 !~ "RTF_BITS" &&
+ $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
+ $2 ~ /^BIOC/ ||
+ $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
+ $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ ||
+ $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
+ $2 ~ /^CLONE_[A-Z_]+/ ||
+ $2 !~ /^(BPF_TIMEVAL)$/ &&
+ $2 ~ /^(BPF|DLT)_/ ||
+ $2 !~ "WMESGLEN" &&
+ $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)}
+ $2 ~ /^__WCOREFLAG$/ {next}
+ $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
+
+ {next}
+ ' | sort
+
+ echo ')'
+) >_const.go
+
+# Pull out the error names for later.
+errors=$(
+ echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' |
+ sort
+)
+
+# Pull out the signal names for later.
+signals=$(
+ echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
+ sort
+)
+
+# Again, writing regexps to a file.
+echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ sort >_error.grep
+echo '#include ' | $CC -x c - -E -dM $ccflags |
+ awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
+ egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
+ sort >_signal.grep
+
+echo '// mkerrors.sh' "$@"
+echo '// Code generated by the command above; DO NOT EDIT.'
+echo
+go tool cgo -godefs -- "$@" _const.go >_error.out
+cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
+echo
+echo '// Errors'
+echo 'const ('
+cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= Errno(\1)/'
+echo ')'
+
+echo
+echo '// Signals'
+echo 'const ('
+cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= Signal(\1)/'
+echo ')'
+
+# Run C program to print error and syscall strings.
+(
+ echo -E "
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define nelem(x) (sizeof(x)/sizeof((x)[0]))
+
+enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below
+
+int errors[] = {
+"
+ for i in $errors
+ do
+ echo -E ' '$i,
+ done
+
+ echo -E "
+};
+
+int signals[] = {
+"
+ for i in $signals
+ do
+ echo -E ' '$i,
+ done
+
+ # Use -E because on some systems bash builtin interprets \n itself.
+ echo -E '
+};
+
+static int
+intcmp(const void *a, const void *b)
+{
+ return *(int*)a - *(int*)b;
+}
+
+int
+main(void)
+{
+ int i, j, e;
+ char buf[1024], *p;
+
+ printf("\n\n// Error table\n");
+ printf("var errors = [...]string {\n");
+ qsort(errors, nelem(errors), sizeof errors[0], intcmp);
+ for(i=0; i 0 && errors[i-1] == e)
+ continue;
+ strcpy(buf, strerror(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ printf("\t%d: \"%s\",\n", e, buf);
+ }
+ printf("}\n\n");
+
+ printf("\n\n// Signal table\n");
+ printf("var signals = [...]string {\n");
+ qsort(signals, nelem(signals), sizeof signals[0], intcmp);
+ for(i=0; i 0 && signals[i-1] == e)
+ continue;
+ strcpy(buf, strsignal(e));
+ // lowercase first letter: Bad -> bad, but STREAM -> STREAM.
+ if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
+ buf[0] += a - A;
+ // cut trailing : number.
+ p = strrchr(buf, ":"[0]);
+ if(p)
+ *p = '\0';
+ printf("\t%d: \"%s\",\n", e, buf);
+ }
+ printf("}\n\n");
+
+ return 0;
+}
+
+'
+) >_errors.c
+
+$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out
diff --git a/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh b/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh
new file mode 100644
index 0000000..3c3ab05
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+COMMAND="mksysnum_plan9.sh $@"
+
+cat <= 10 {
+ buf[i] = byte(val%10 + '0')
+ i--
+ val /= 10
+ }
+ buf[i] = byte(val + '0')
+ return string(buf[i:])
+}
diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go
new file mode 100644
index 0000000..e7363a2
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/syscall.go
@@ -0,0 +1,116 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build plan9
+
+// Package plan9 contains an interface to the low-level operating system
+// primitives. OS details vary depending on the underlying system, and
+// by default, godoc will display the OS-specific documentation for the current
+// system. If you want godoc to display documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
+// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
+// to freebsd and $GOARCH to arm.
+//
+// The primary use of this package is inside other packages that provide a more
+// portable interface to the system, such as "os", "time" and "net". Use
+// those packages rather than this one if you can.
+//
+// For details of the functions and data types in this package consult
+// the manuals for the appropriate operating system.
+//
+// These calls return err == nil to indicate success; otherwise
+// err represents an operating system error describing the failure and
+// holds a value of type syscall.ErrorString.
+package plan9 // import "golang.org/x/sys/plan9"
+
+import (
+ "bytes"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/internal/unsafeheader"
+)
+
+// ByteSliceFromString returns a NUL-terminated slice of bytes
+// containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func ByteSliceFromString(s string) ([]byte, error) {
+ if strings.IndexByte(s, 0) != -1 {
+ return nil, EINVAL
+ }
+ a := make([]byte, len(s)+1)
+ copy(a, s)
+ return a, nil
+}
+
+// BytePtrFromString returns a pointer to a NUL-terminated array of
+// bytes containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, EINVAL).
+func BytePtrFromString(s string) (*byte, error) {
+ a, err := ByteSliceFromString(s)
+ if err != nil {
+ return nil, err
+ }
+ return &a[0], nil
+}
+
+// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any
+// bytes after the NUL removed.
+func ByteSliceToString(s []byte) string {
+ if i := bytes.IndexByte(s, 0); i != -1 {
+ s = s[:i]
+ }
+ return string(s)
+}
+
+// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string.
+// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated
+// at a zero byte; if the zero byte is not present, the program may crash.
+func BytePtrToString(p *byte) string {
+ if p == nil {
+ return ""
+ }
+ if *p == 0 {
+ return ""
+ }
+
+ // Find NUL terminator.
+ n := 0
+ for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ {
+ ptr = unsafe.Pointer(uintptr(ptr) + 1)
+ }
+
+ var s []byte
+ h := (*unsafeheader.Slice)(unsafe.Pointer(&s))
+ h.Data = unsafe.Pointer(p)
+ h.Len = n
+ h.Cap = n
+
+ return string(s)
+}
+
+// Single-word zero for use when we need a valid pointer to 0 bytes.
+// See mksyscall.pl.
+var _zero uintptr
+
+func (ts *Timespec) Unix() (sec int64, nsec int64) {
+ return int64(ts.Sec), int64(ts.Nsec)
+}
+
+func (tv *Timeval) Unix() (sec int64, nsec int64) {
+ return int64(tv.Sec), int64(tv.Usec) * 1000
+}
+
+func (ts *Timespec) Nano() int64 {
+ return int64(ts.Sec)*1e9 + int64(ts.Nsec)
+}
+
+func (tv *Timeval) Nano() int64 {
+ return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
+}
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+//go:noescape
+func use(p unsafe.Pointer)
diff --git a/vendor/golang.org/x/sys/plan9/syscall_plan9.go b/vendor/golang.org/x/sys/plan9/syscall_plan9.go
new file mode 100644
index 0000000..84e1471
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/syscall_plan9.go
@@ -0,0 +1,349 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Plan 9 system calls.
+// This file is compiled as ordinary Go code,
+// but it is also input to mksyscall,
+// which parses the //sys lines and generates system call stubs.
+// Note that sometimes we use a lowercase //sys name and
+// wrap it in our own nicer implementation.
+
+package plan9
+
+import (
+ "bytes"
+ "syscall"
+ "unsafe"
+)
+
+// A Note is a string describing a process note.
+// It implements the os.Signal interface.
+type Note string
+
+func (n Note) Signal() {}
+
+func (n Note) String() string {
+ return string(n)
+}
+
+var (
+ Stdin = 0
+ Stdout = 1
+ Stderr = 2
+)
+
+// For testing: clients can set this flag to force
+// creation of IPv6 sockets to return EAFNOSUPPORT.
+var SocketDisableIPv6 bool
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.ErrorString)
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.ErrorString)
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+
+func atoi(b []byte) (n uint) {
+ n = 0
+ for i := 0; i < len(b); i++ {
+ n = n*10 + uint(b[i]-'0')
+ }
+ return
+}
+
+func cstring(s []byte) string {
+ i := bytes.IndexByte(s, 0)
+ if i == -1 {
+ i = len(s)
+ }
+ return string(s[:i])
+}
+
+func errstr() string {
+ var buf [ERRMAX]byte
+
+ RawSyscall(SYS_ERRSTR, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)), 0)
+
+ buf[len(buf)-1] = 0
+ return cstring(buf[:])
+}
+
+// Implemented in assembly to import from runtime.
+func exit(code int)
+
+func Exit(code int) { exit(code) }
+
+func readnum(path string) (uint, error) {
+ var b [12]byte
+
+ fd, e := Open(path, O_RDONLY)
+ if e != nil {
+ return 0, e
+ }
+ defer Close(fd)
+
+ n, e := Pread(fd, b[:], 0)
+
+ if e != nil {
+ return 0, e
+ }
+
+ m := 0
+ for ; m < n && b[m] == ' '; m++ {
+ }
+
+ return atoi(b[m : n-1]), nil
+}
+
+func Getpid() (pid int) {
+ n, _ := readnum("#c/pid")
+ return int(n)
+}
+
+func Getppid() (ppid int) {
+ n, _ := readnum("#c/ppid")
+ return int(n)
+}
+
+func Read(fd int, p []byte) (n int, err error) {
+ return Pread(fd, p, -1)
+}
+
+func Write(fd int, p []byte) (n int, err error) {
+ return Pwrite(fd, p, -1)
+}
+
+var ioSync int64
+
+//sys fd2path(fd int, buf []byte) (err error)
+func Fd2path(fd int) (path string, err error) {
+ var buf [512]byte
+
+ e := fd2path(fd, buf[:])
+ if e != nil {
+ return "", e
+ }
+ return cstring(buf[:]), nil
+}
+
+//sys pipe(p *[2]int32) (err error)
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return syscall.ErrorString("bad arg in system call")
+ }
+ var pp [2]int32
+ err = pipe(&pp)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+// Underlying system call writes to newoffset via pointer.
+// Implemented in assembly to avoid allocation.
+func seek(placeholder uintptr, fd int, offset int64, whence int) (newoffset int64, err string)
+
+func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
+ newoffset, e := seek(0, fd, offset, whence)
+
+ if newoffset == -1 {
+ err = syscall.ErrorString(e)
+ }
+ return
+}
+
+func Mkdir(path string, mode uint32) (err error) {
+ fd, err := Create(path, O_RDONLY, DMDIR|mode)
+
+ if fd != -1 {
+ Close(fd)
+ }
+
+ return
+}
+
+type Waitmsg struct {
+ Pid int
+ Time [3]uint32
+ Msg string
+}
+
+func (w Waitmsg) Exited() bool { return true }
+func (w Waitmsg) Signaled() bool { return false }
+
+func (w Waitmsg) ExitStatus() int {
+ if len(w.Msg) == 0 {
+ // a normal exit returns no message
+ return 0
+ }
+ return 1
+}
+
+//sys await(s []byte) (n int, err error)
+func Await(w *Waitmsg) (err error) {
+ var buf [512]byte
+ var f [5][]byte
+
+ n, err := await(buf[:])
+
+ if err != nil || w == nil {
+ return
+ }
+
+ nf := 0
+ p := 0
+ for i := 0; i < n && nf < len(f)-1; i++ {
+ if buf[i] == ' ' {
+ f[nf] = buf[p:i]
+ p = i + 1
+ nf++
+ }
+ }
+ f[nf] = buf[p:]
+ nf++
+
+ if nf != len(f) {
+ return syscall.ErrorString("invalid wait message")
+ }
+ w.Pid = int(atoi(f[0]))
+ w.Time[0] = uint32(atoi(f[1]))
+ w.Time[1] = uint32(atoi(f[2]))
+ w.Time[2] = uint32(atoi(f[3]))
+ w.Msg = cstring(f[4])
+ if w.Msg == "''" {
+ // await() returns '' for no error
+ w.Msg = ""
+ }
+ return
+}
+
+func Unmount(name, old string) (err error) {
+ fixwd()
+ oldp, err := BytePtrFromString(old)
+ if err != nil {
+ return err
+ }
+ oldptr := uintptr(unsafe.Pointer(oldp))
+
+ var r0 uintptr
+ var e syscall.ErrorString
+
+ // bind(2) man page: If name is zero, everything bound or mounted upon old is unbound or unmounted.
+ if name == "" {
+ r0, _, e = Syscall(SYS_UNMOUNT, _zero, oldptr, 0)
+ } else {
+ namep, err := BytePtrFromString(name)
+ if err != nil {
+ return err
+ }
+ r0, _, e = Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(namep)), oldptr, 0)
+ }
+
+ if int32(r0) == -1 {
+ err = e
+ }
+ return
+}
+
+func Fchdir(fd int) (err error) {
+ path, err := Fd2path(fd)
+
+ if err != nil {
+ return
+ }
+
+ return Chdir(path)
+}
+
+type Timespec struct {
+ Sec int32
+ Nsec int32
+}
+
+type Timeval struct {
+ Sec int32
+ Usec int32
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ tv.Sec = int32(nsec / 1e9)
+ return
+}
+
+func nsec() int64 {
+ var scratch int64
+
+ r0, _, _ := Syscall(SYS_NSEC, uintptr(unsafe.Pointer(&scratch)), 0, 0)
+ // TODO(aram): remove hack after I fix _nsec in the pc64 kernel.
+ if r0 == 0 {
+ return scratch
+ }
+ return int64(r0)
+}
+
+func Gettimeofday(tv *Timeval) error {
+ nsec := nsec()
+ *tv = NsecToTimeval(nsec)
+ return nil
+}
+
+func Getpagesize() int { return 0x1000 }
+
+func Getegid() (egid int) { return -1 }
+func Geteuid() (euid int) { return -1 }
+func Getgid() (gid int) { return -1 }
+func Getuid() (uid int) { return -1 }
+
+func Getgroups() (gids []int, err error) {
+ return make([]int, 0), nil
+}
+
+//sys open(path string, mode int) (fd int, err error)
+func Open(path string, mode int) (fd int, err error) {
+ fixwd()
+ return open(path, mode)
+}
+
+//sys create(path string, mode int, perm uint32) (fd int, err error)
+func Create(path string, mode int, perm uint32) (fd int, err error) {
+ fixwd()
+ return create(path, mode, perm)
+}
+
+//sys remove(path string) (err error)
+func Remove(path string) error {
+ fixwd()
+ return remove(path)
+}
+
+//sys stat(path string, edir []byte) (n int, err error)
+func Stat(path string, edir []byte) (n int, err error) {
+ fixwd()
+ return stat(path, edir)
+}
+
+//sys bind(name string, old string, flag int) (err error)
+func Bind(name string, old string, flag int) (err error) {
+ fixwd()
+ return bind(name, old, flag)
+}
+
+//sys mount(fd int, afd int, old string, flag int, aname string) (err error)
+func Mount(fd int, afd int, old string, flag int, aname string) (err error) {
+ fixwd()
+ return mount(fd, afd, old, flag, aname)
+}
+
+//sys wstat(path string, edir []byte) (err error)
+func Wstat(path string, edir []byte) (err error) {
+ fixwd()
+ return wstat(path, edir)
+}
+
+//sys chdir(path string) (err error)
+//sys Dup(oldfd int, newfd int) (fd int, err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error)
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
+//sys Close(fd int) (err error)
+//sys Fstat(fd int, edir []byte) (n int, err error)
+//sys Fwstat(fd int, edir []byte) (err error)
diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
new file mode 100644
index 0000000..6819bc2
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
@@ -0,0 +1,284 @@
+// go run mksyscall.go -l32 -plan9 -tags plan9,386 syscall_plan9.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build plan9,386
+
+package plan9
+
+import "unsafe"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fd2path(fd int, buf []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]int32) (err error) {
+ r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func await(s []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(s) > 0 {
+ _p0 = unsafe.Pointer(&s[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func open(path string, mode int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func create(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func remove(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func stat(path string, edir []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(name string, old string, flag int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(fd int, afd int, old string, flag int, aname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(aname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wstat(path string, edir []byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int, newfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, edir []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fwstat(fd int, edir []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
new file mode 100644
index 0000000..418abbb
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
@@ -0,0 +1,284 @@
+// go run mksyscall.go -l32 -plan9 -tags plan9,amd64 syscall_plan9.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build plan9,amd64
+
+package plan9
+
+import "unsafe"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fd2path(fd int, buf []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]int32) (err error) {
+ r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func await(s []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(s) > 0 {
+ _p0 = unsafe.Pointer(&s[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func open(path string, mode int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func create(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func remove(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func stat(path string, edir []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(name string, old string, flag int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(fd int, afd int, old string, flag int, aname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(aname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wstat(path string, edir []byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int, newfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, edir []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fwstat(fd int, edir []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
new file mode 100644
index 0000000..3e8a1a5
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
@@ -0,0 +1,284 @@
+// go run mksyscall.go -l32 -plan9 -tags plan9,arm syscall_plan9.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build plan9,arm
+
+package plan9
+
+import "unsafe"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fd2path(fd int, buf []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]int32) (err error) {
+ r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func await(s []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(s) > 0 {
+ _p0 = unsafe.Pointer(&s[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func open(path string, mode int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func create(path string, mode int, perm uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func remove(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func stat(path string, edir []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(name string, old string, flag int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(name)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(fd int, afd int, old string, flag int, aname string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(old)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(aname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wstat(path string, edir []byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(edir) > 0 {
+ _p1 = unsafe.Pointer(&edir[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int, newfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0)
+ fd = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, edir []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ n = int(r0)
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fwstat(fd int, edir []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(edir) > 0 {
+ _p0 = unsafe.Pointer(&edir[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
+ if int32(r0) == -1 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go b/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go
new file mode 100644
index 0000000..22e8abd
--- /dev/null
+++ b/vendor/golang.org/x/sys/plan9/zsysnum_plan9.go
@@ -0,0 +1,49 @@
+// mksysnum_plan9.sh /opt/plan9/sys/src/libc/9syscall/sys.h
+// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
+
+package plan9
+
+const (
+ SYS_SYSR1 = 0
+ SYS_BIND = 2
+ SYS_CHDIR = 3
+ SYS_CLOSE = 4
+ SYS_DUP = 5
+ SYS_ALARM = 6
+ SYS_EXEC = 7
+ SYS_EXITS = 8
+ SYS_FAUTH = 10
+ SYS_SEGBRK = 12
+ SYS_OPEN = 14
+ SYS_OSEEK = 16
+ SYS_SLEEP = 17
+ SYS_RFORK = 19
+ SYS_PIPE = 21
+ SYS_CREATE = 22
+ SYS_FD2PATH = 23
+ SYS_BRK_ = 24
+ SYS_REMOVE = 25
+ SYS_NOTIFY = 28
+ SYS_NOTED = 29
+ SYS_SEGATTACH = 30
+ SYS_SEGDETACH = 31
+ SYS_SEGFREE = 32
+ SYS_SEGFLUSH = 33
+ SYS_RENDEZVOUS = 34
+ SYS_UNMOUNT = 35
+ SYS_SEMACQUIRE = 37
+ SYS_SEMRELEASE = 38
+ SYS_SEEK = 39
+ SYS_FVERSION = 40
+ SYS_ERRSTR = 41
+ SYS_STAT = 42
+ SYS_FSTAT = 43
+ SYS_WSTAT = 44
+ SYS_FWSTAT = 45
+ SYS_MOUNT = 46
+ SYS_AWAIT = 47
+ SYS_PREAD = 50
+ SYS_PWRITE = 51
+ SYS_TSEMACQUIRE = 52
+ SYS_NSEC = 53
+)
diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore
new file mode 100644
index 0000000..e3e0fc6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/.gitignore
@@ -0,0 +1,2 @@
+_obj/
+unix.test
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
new file mode 100644
index 0000000..474efad
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -0,0 +1,184 @@
+# Building `sys/unix`
+
+The sys/unix package provides access to the raw system call interface of the
+underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
+
+Porting Go to a new architecture/OS combination or adding syscalls, types, or
+constants to an existing architecture/OS pair requires some manual effort;
+however, there are tools that automate much of the process.
+
+## Build Systems
+
+There are currently two ways we generate the necessary files. We are currently
+migrating the build system to use containers so the builds are reproducible.
+This is being done on an OS-by-OS basis. Please update this documentation as
+components of the build system change.
+
+### Old Build System (currently for `GOOS != "linux"`)
+
+The old build system generates the Go files based on the C header files
+present on your system. This means that files
+for a given GOOS/GOARCH pair must be generated on a system with that OS and
+architecture. This also means that the generated code can differ from system
+to system, based on differences in the header files.
+
+To avoid this, if you are using the old build system, only generate the Go
+files on an installation with unmodified header files. It is also important to
+keep track of which version of the OS the files were generated from (ex.
+Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
+and have each OS upgrade correspond to a single change.
+
+To build the files for your current OS and architecture, make sure GOOS and
+GOARCH are set correctly and run `mkall.sh`. This will generate the files for
+your specific system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, go
+
+### New Build System (currently for `GOOS == "linux"`)
+
+The new build system uses a Docker container to generate the go files directly
+from source checkouts of the kernel and various system libraries. This means
+that on any platform that supports Docker, all the files using the new build
+system can be generated at once, and generated files will not change based on
+what the person running the scripts has installed on their computer.
+
+The OS specific files for the new build system are located in the `${GOOS}`
+directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
+the kernel or system library updates, modify the Dockerfile at
+`${GOOS}/Dockerfile` to checkout the new release of the source.
+
+To build all the files under the new build system, you must be on an amd64/Linux
+system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
+then generate all of the files for all of the GOOS/GOARCH pairs in the new build
+system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, go, docker
+
+## Component files
+
+This section describes the various files used in the code generation process.
+It also contains instructions on how to modify these files to add a new
+architecture/OS or to add additional syscalls, types, or constants. Note that
+if you are using the new build system, the scripts/programs cannot be called normally.
+They must be called from within the docker container.
+
+### asm files
+
+The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
+call dispatch. There are three entry points:
+```
+ func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+ func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+ func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+```
+The first and second are the standard ones; they differ only in how many
+arguments can be passed to the kernel. The third is for low-level use by the
+ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
+let it know that a system call is running.
+
+When porting Go to a new architecture/OS, this file must be implemented for
+each GOOS/GOARCH pair.
+
+### mksysnum
+
+Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
+for the old system). This program takes in a list of header files containing the
+syscall number declarations and parses them to produce the corresponding list of
+Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
+constants.
+
+Adding new syscall numbers is mostly done by running the build on a sufficiently
+new installation of the target OS (or updating the source checkouts for the
+new build system). However, depending on the OS, you may need to update the
+parsing in mksysnum.
+
+### mksyscall.go
+
+The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
+hand-written Go files which implement system calls (for unix, the specific OS,
+or the specific OS/Architecture pair respectively) that need special handling
+and list `//sys` comments giving prototypes for ones that can be generated.
+
+The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
+them into syscalls. This requires the name of the prototype in the comment to
+match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
+prototype can be exported (capitalized) or not.
+
+Adding a new syscall often just requires adding a new `//sys` function prototype
+with the desired arguments and a capitalized name so it is exported. However, if
+you want the interface to the syscall to be different, often one will make an
+unexported `//sys` prototype, and then write a custom wrapper in
+`syscall_${GOOS}.go`.
+
+### types files
+
+For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
+`types_${GOOS}.go` on the old system). This file includes standard C headers and
+creates Go type aliases to the corresponding C types. The file is then fed
+through godef to get the Go compatible definitions. Finally, the generated code
+is fed though mkpost.go to format the code correctly and remove any hidden or
+private identifiers. This cleaned-up code is written to
+`ztypes_${GOOS}_${GOARCH}.go`.
+
+The hardest part about preparing this file is figuring out which headers to
+include and which symbols need to be `#define`d to get the actual data
+structures that pass through to the kernel system calls. Some C libraries
+preset alternate versions for binary compatibility and translate them on the
+way in and out of system calls, but there is almost always a `#define` that can
+get the real ones.
+See `types_darwin.go` and `linux/types.go` for examples.
+
+To add a new type, add in the necessary include statement at the top of the
+file (if it is not already there) and add in a type alias line. Note that if
+your type is significantly different on different architectures, you may need
+some `#if/#elif` macros in your include statements.
+
+### mkerrors.sh
+
+This script is used to generate the system's various constants. This doesn't
+just include the error numbers and error strings, but also the signal numbers
+and a wide variety of miscellaneous constants. The constants come from the list
+of include files in the `includes_${uname}` variable. A regex then picks out
+the desired `#define` statements, and generates the corresponding Go constants.
+The error numbers and strings are generated from `#include `, and the
+signal numbers and strings are generated from `#include `. All of
+these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
+`_errors.c`, which prints out all the constants.
+
+To add a constant, add the header that includes it to the appropriate variable.
+Then, edit the regex (if necessary) to match the desired constant. Avoid making
+the regex too broad to avoid matching unintended constants.
+
+### mkmerge.go
+
+This program is used to extract duplicate const, func, and type declarations
+from the generated architecture-specific files listed below, and merge these
+into a common file for each OS.
+
+The merge is performed in the following steps:
+1. Construct the set of common code that is idential in all architecture-specific files.
+2. Write this common code to the merged file.
+3. Remove the common code from all architecture-specific files.
+
+
+## Generated files
+
+### `zerrors_${GOOS}_${GOARCH}.go`
+
+A file containing all of the system's generated error numbers, error strings,
+signal numbers, and constants. Generated by `mkerrors.sh` (see above).
+
+### `zsyscall_${GOOS}_${GOARCH}.go`
+
+A file containing all the generated syscalls for a specific GOOS and GOARCH.
+Generated by `mksyscall.go` (see above).
+
+### `zsysnum_${GOOS}_${GOARCH}.go`
+
+A list of numeric constants for all the syscall number of the specific GOOS
+and GOARCH. Generated by mksysnum (see above).
+
+### `ztypes_${GOOS}_${GOARCH}.go`
+
+A file containing Go types for passing into (or returning from) syscalls.
+Generated by godefs and the types file (see above).
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
new file mode 100644
index 0000000..6e5c81a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -0,0 +1,86 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// CPU affinity functions
+
+package unix
+
+import (
+ "math/bits"
+ "unsafe"
+)
+
+const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
+
+// CPUSet represents a CPU affinity mask.
+type CPUSet [cpuSetSize]cpuMask
+
+func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
+ _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
+ if e != 0 {
+ return errnoErr(e)
+ }
+ return nil
+}
+
+// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedGetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
+}
+
+// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedSetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
+}
+
+// Zero clears the set s, so that it contains no CPUs.
+func (s *CPUSet) Zero() {
+ for i := range s {
+ s[i] = 0
+ }
+}
+
+func cpuBitsIndex(cpu int) int {
+ return cpu / _NCPUBITS
+}
+
+func cpuBitsMask(cpu int) cpuMask {
+ return cpuMask(1 << (uint(cpu) % _NCPUBITS))
+}
+
+// Set adds cpu to the set s.
+func (s *CPUSet) Set(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] |= cpuBitsMask(cpu)
+ }
+}
+
+// Clear removes cpu from the set s.
+func (s *CPUSet) Clear(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] &^= cpuBitsMask(cpu)
+ }
+}
+
+// IsSet reports whether cpu is in the set s.
+func (s *CPUSet) IsSet(cpu int) bool {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ return s[i]&cpuBitsMask(cpu) != 0
+ }
+ return false
+}
+
+// Count returns the number of CPUs in the set s.
+func (s *CPUSet) Count() int {
+ c := 0
+ for _, b := range s {
+ c += bits.OnesCount64(uint64(b))
+ }
+ return c
+}
diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go
new file mode 100644
index 0000000..abc89c1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/aliases.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+// +build go1.9
+
+package unix
+
+import "syscall"
+
+type Signal = syscall.Signal
+type Errno = syscall.Errno
+type SysProcAttr = syscall.SysProcAttr
diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
new file mode 100644
index 0000000..db9171c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
@@ -0,0 +1,18 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
+//
+
+TEXT ·syscall6(SB),NOSPLIT,$0-88
+ JMP syscall·syscall6(SB)
+
+TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
+ JMP syscall·rawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s
new file mode 100644
index 0000000..e0fcd9b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (freebsd || netbsd || openbsd) && gc
+// +build freebsd netbsd openbsd
+// +build gc
+
+#include "textflag.h"
+
+// System call support for 386 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
new file mode 100644
index 0000000..2b99c34
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc
+// +build darwin dragonfly freebsd netbsd openbsd
+// +build gc
+
+#include "textflag.h"
+
+// System call support for AMD64 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
new file mode 100644
index 0000000..d702d4a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (freebsd || netbsd || openbsd) && gc
+// +build freebsd netbsd openbsd
+// +build gc
+
+#include "textflag.h"
+
+// System call support for ARM BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
new file mode 100644
index 0000000..fe36a73
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (darwin || freebsd || netbsd || openbsd) && gc
+// +build darwin freebsd netbsd openbsd
+// +build gc
+
+#include "textflag.h"
+
+// System call support for ARM64 BSD
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s
new file mode 100644
index 0000000..8fd101d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s
@@ -0,0 +1,66 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for 386, Linux
+//
+
+// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
+// instead of the glibc-specific "CALL 0x10(GS)".
+#define INVOKE_SYSCALL INT $0x80
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ CALL runtime·entersyscall(SB)
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ RET
+
+TEXT ·socketcall(SB),NOSPLIT,$0-36
+ JMP syscall·socketcall(SB)
+
+TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
+ JMP syscall·rawsocketcall(SB)
+
+TEXT ·seek(SB),NOSPLIT,$0-28
+ JMP syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
new file mode 100644
index 0000000..7ed38e4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
@@ -0,0 +1,58 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for AMD64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ RET
+
+TEXT ·gettimeofday(SB),NOSPLIT,$0-16
+ JMP syscall·gettimeofday(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
new file mode 100644
index 0000000..8ef1d51
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
@@ -0,0 +1,57 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for arm, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ BL runtime·entersyscall(SB)
+ MOVW trap+0(FP), R7
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ MOVW $0, R3
+ MOVW $0, R4
+ MOVW $0, R5
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW trap+0(FP), R7 // syscall entry
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ RET
+
+TEXT ·seek(SB),NOSPLIT,$0-28
+ B syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
new file mode 100644
index 0000000..98ae027
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
@@ -0,0 +1,53 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && arm64 && gc
+// +build linux
+// +build arm64
+// +build gc
+
+#include "textflag.h"
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP) // r1
+ MOVD R1, r2+40(FP) // r2
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP)
+ MOVD R1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
new file mode 100644
index 0000000..21231d2
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips64 || mips64le) && gc
+// +build linux
+// +build mips64 mips64le
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for mips64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ JAL runtime·entersyscall(SB)
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
new file mode 100644
index 0000000..6783b26
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
@@ -0,0 +1,55 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips || mipsle) && gc
+// +build linux
+// +build mips mipsle
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for mips, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ JAL runtime·entersyscall(SB)
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW R0, R7
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP) // r1
+ MOVW R3, r2+20(FP) // r2
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP)
+ MOVW R3, r2+20(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
new file mode 100644
index 0000000..19d4989
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
@@ -0,0 +1,45 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (ppc64 || ppc64le) && gc
+// +build linux
+// +build ppc64 ppc64le
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
new file mode 100644
index 0000000..e42eb81
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
@@ -0,0 +1,49 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build riscv64 && gc
+// +build riscv64
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for linux/riscv64.
+//
+// Where available, just jump to package syscall's implementation of
+// these functions.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP) // r1
+ MOV A1, r2+40(FP) // r2
+ CALL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV trap+0(FP), A7 // syscall entry
+ ECALL
+ MOV A0, r1+32(FP)
+ MOV A1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
new file mode 100644
index 0000000..c46aab3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
@@ -0,0 +1,57 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && s390x && gc
+// +build linux
+// +build s390x
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for s390x, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ BR syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ BR syscall·Syscall6(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ BR syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ BR syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
new file mode 100644
index 0000000..5e7a116
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
@@ -0,0 +1,30 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System call support for mips64, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
new file mode 100644
index 0000000..f8c5394
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
@@ -0,0 +1,18 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+#include "textflag.h"
+
+//
+// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
+//
+
+TEXT ·sysvicall6(SB),NOSPLIT,$0-88
+ JMP syscall·sysvicall6(SB)
+
+TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88
+ JMP syscall·rawSysvicall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
new file mode 100644
index 0000000..3b54e18
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
@@ -0,0 +1,426 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x && gc
+// +build zos
+// +build s390x
+// +build gc
+
+#include "textflag.h"
+
+#define PSALAA 1208(R0)
+#define GTAB64(x) 80(x)
+#define LCA64(x) 88(x)
+#define CAA(x) 8(x)
+#define EDCHPXV(x) 1016(x) // in the CAA
+#define SAVSTACK_ASYNC(x) 336(x) // in the LCA
+
+// SS_*, where x=SAVSTACK_ASYNC
+#define SS_LE(x) 0(x)
+#define SS_GO(x) 8(x)
+#define SS_ERRNO(x) 16(x)
+#define SS_ERRNOJR(x) 20(x)
+
+#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6
+
+TEXT ·clearErrno(SB),NOSPLIT,$0-0
+ BL addrerrno<>(SB)
+ MOVD $0, 0(R3)
+ RET
+
+// Returns the address of errno in R3.
+TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get __errno FuncDesc.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ ADD $(0x156*16), R9
+ LMG 0(R9), R5, R6
+
+ // Switch to saved LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Call __errno function.
+ LE_CALL
+ NOPH
+
+ // Switch back to Go stack.
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+ RET
+
+TEXT ·syscall_syscall(SB),NOSPLIT,$0-56
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+32(FP)
+ MOVD R0, r2+40(FP)
+ MOVD R0, err+48(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+48(FP)
+done:
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+32(FP)
+ MOVD R0, r2+40(FP)
+ MOVD R0, err+48(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+48(FP)
+done:
+ RET
+
+TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Fill in parameter list.
+ MOVD a4+32(FP), R12
+ MOVD R12, (2176+24)(R4)
+ MOVD a5+40(FP), R12
+ MOVD R12, (2176+32)(R4)
+ MOVD a6+48(FP), R12
+ MOVD R12, (2176+40)(R4)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+56(FP)
+ MOVD R0, r2+64(FP)
+ MOVD R0, err+72(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+72(FP)
+done:
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Fill in parameter list.
+ MOVD a4+32(FP), R12
+ MOVD R12, (2176+24)(R4)
+ MOVD a5+40(FP), R12
+ MOVD R12, (2176+32)(R4)
+ MOVD a6+48(FP), R12
+ MOVD R12, (2176+40)(R4)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+56(FP)
+ MOVD R0, r2+64(FP)
+ MOVD R0, err+72(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL ·rrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+72(FP)
+done:
+ RET
+
+TEXT ·syscall_syscall9(SB),NOSPLIT,$0
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Fill in parameter list.
+ MOVD a4+32(FP), R12
+ MOVD R12, (2176+24)(R4)
+ MOVD a5+40(FP), R12
+ MOVD R12, (2176+32)(R4)
+ MOVD a6+48(FP), R12
+ MOVD R12, (2176+40)(R4)
+ MOVD a7+56(FP), R12
+ MOVD R12, (2176+48)(R4)
+ MOVD a8+64(FP), R12
+ MOVD R12, (2176+56)(R4)
+ MOVD a9+72(FP), R12
+ MOVD R12, (2176+64)(R4)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+80(FP)
+ MOVD R0, r2+88(FP)
+ MOVD R0, err+96(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+96(FP)
+done:
+ BL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0
+ MOVD a1+8(FP), R1
+ MOVD a2+16(FP), R2
+ MOVD a3+24(FP), R3
+
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get function.
+ MOVD CAA(R8), R9
+ MOVD EDCHPXV(R9), R9
+ MOVD trap+0(FP), R5
+ SLD $4, R5
+ ADD R5, R9
+ LMG 0(R9), R5, R6
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R4
+ MOVD $0, 0(R9)
+
+ // Fill in parameter list.
+ MOVD a4+32(FP), R12
+ MOVD R12, (2176+24)(R4)
+ MOVD a5+40(FP), R12
+ MOVD R12, (2176+32)(R4)
+ MOVD a6+48(FP), R12
+ MOVD R12, (2176+40)(R4)
+ MOVD a7+56(FP), R12
+ MOVD R12, (2176+48)(R4)
+ MOVD a8+64(FP), R12
+ MOVD R12, (2176+56)(R4)
+ MOVD a9+72(FP), R12
+ MOVD R12, (2176+64)(R4)
+
+ // Call function.
+ LE_CALL
+ NOPH
+ XOR R0, R0 // Restore R0 to $0.
+ MOVD R4, 0(R9) // Save stack pointer.
+
+ MOVD R3, r1+80(FP)
+ MOVD R0, r2+88(FP)
+ MOVD R0, err+96(FP)
+ MOVW R3, R4
+ CMP R4, $-1
+ BNE done
+ BL addrerrno<>(SB)
+ MOVWZ 0(R3), R3
+ MOVD R3, err+96(FP)
+done:
+ RET
+
+// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64)
+TEXT ·svcCall(SB),NOSPLIT,$0
+ BL runtime·save_g(SB) // Save g and stack pointer
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD R15, 0(R9)
+
+ MOVD argv+8(FP), R1 // Move function arguments into registers
+ MOVD dsa+16(FP), g
+ MOVD fnptr+0(FP), R15
+
+ BYTE $0x0D // Branch to function
+ BYTE $0xEF
+
+ BL runtime·load_g(SB) // Restore g and stack pointer
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD SAVSTACK_ASYNC(R8), R9
+ MOVD 0(R9), R15
+
+ RET
+
+// func svcLoad(name *byte) unsafe.Pointer
+TEXT ·svcLoad(SB),NOSPLIT,$0
+ MOVD R15, R2 // Save go stack pointer
+ MOVD name+0(FP), R0 // Move SVC args into registers
+ MOVD $0x80000000, R1
+ MOVD $0, R15
+ BYTE $0x0A // SVC 08 LOAD
+ BYTE $0x08
+ MOVW R15, R3 // Save return code from SVC
+ MOVD R2, R15 // Restore go stack pointer
+ CMP R3, $0 // Check SVC return code
+ BNE error
+
+ MOVD $-2, R3 // Reset last bit of entry point to zero
+ AND R0, R3
+ MOVD R3, addr+8(FP) // Return entry point returned by SVC
+ CMP R0, R3 // Check if last bit of entry point was set
+ BNE done
+
+ MOVD R15, R2 // Save go stack pointer
+ MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08)
+ BYTE $0x0A // SVC 09 DELETE
+ BYTE $0x09
+ MOVD R2, R15 // Restore go stack pointer
+
+error:
+ MOVD $0, addr+8(FP) // Return 0 on failure
+done:
+ XOR R0, R0 // Reset r0 to 0
+ RET
+
+// func svcUnload(name *byte, fnptr unsafe.Pointer) int64
+TEXT ·svcUnload(SB),NOSPLIT,$0
+ MOVD R15, R2 // Save go stack pointer
+ MOVD name+0(FP), R0 // Move SVC args into registers
+ MOVD addr+8(FP), R15
+ BYTE $0x0A // SVC 09
+ BYTE $0x09
+ XOR R0, R0 // Reset r0 to 0
+ MOVD R15, R1 // Save SVC return code
+ MOVD R2, R15 // Restore go stack pointer
+ MOVD R1, rc+0(FP) // Return SVC return code
+ RET
+
+// func gettid() uint64
+TEXT ·gettid(SB), NOSPLIT, $0
+ // Get library control area (LCA).
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+
+ // Get CEECAATHDID
+ MOVD CAA(R8), R9
+ MOVD 0x3D0(R9), R9
+ MOVD R9, ret+0(FP)
+
+ RET
diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
new file mode 100644
index 0000000..a178a61
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/bluetooth_linux.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Bluetooth sockets and messages
+
+package unix
+
+// Bluetooth Protocols
+const (
+ BTPROTO_L2CAP = 0
+ BTPROTO_HCI = 1
+ BTPROTO_SCO = 2
+ BTPROTO_RFCOMM = 3
+ BTPROTO_BNEP = 4
+ BTPROTO_CMTP = 5
+ BTPROTO_HIDP = 6
+ BTPROTO_AVDTP = 7
+)
+
+const (
+ HCI_CHANNEL_RAW = 0
+ HCI_CHANNEL_USER = 1
+ HCI_CHANNEL_MONITOR = 2
+ HCI_CHANNEL_CONTROL = 3
+ HCI_CHANNEL_LOGGING = 4
+)
+
+// Socketoption Level
+const (
+ SOL_BLUETOOTH = 0x112
+ SOL_HCI = 0x0
+ SOL_L2CAP = 0x6
+ SOL_RFCOMM = 0x12
+ SOL_SCO = 0x11
+)
diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go
new file mode 100644
index 0000000..0b7c6ad
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go
@@ -0,0 +1,196 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd
+// +build freebsd
+
+package unix
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c
+
+const (
+ // This is the version of CapRights this package understands. See C implementation for parallels.
+ capRightsGoVersion = CAP_RIGHTS_VERSION_00
+ capArSizeMin = CAP_RIGHTS_VERSION_00 + 2
+ capArSizeMax = capRightsGoVersion + 2
+)
+
+var (
+ bit2idx = []int{
+ -1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
+ 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ }
+)
+
+func capidxbit(right uint64) int {
+ return int((right >> 57) & 0x1f)
+}
+
+func rightToIndex(right uint64) (int, error) {
+ idx := capidxbit(right)
+ if idx < 0 || idx >= len(bit2idx) {
+ return -2, fmt.Errorf("index for right 0x%x out of range", right)
+ }
+ return bit2idx[idx], nil
+}
+
+func caprver(right uint64) int {
+ return int(right >> 62)
+}
+
+func capver(rights *CapRights) int {
+ return caprver(rights.Rights[0])
+}
+
+func caparsize(rights *CapRights) int {
+ return capver(rights) + 2
+}
+
+// CapRightsSet sets the permissions in setrights in rights.
+func CapRightsSet(rights *CapRights, setrights []uint64) error {
+ // This is essentially a copy of cap_rights_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errors.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch")
+ }
+ rights.Rights[i] |= right
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsClear clears the permissions in clearrights from rights.
+func CapRightsClear(rights *CapRights, clearrights []uint64) error {
+ // This is essentially a copy of cap_rights_vclear()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errors.New("bad rights size")
+ }
+
+ for _, right := range clearrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch")
+ }
+ rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errors.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsIsSet checks whether all the permissions in setrights are present in rights.
+func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
+ // This is essentially a copy of cap_rights_is_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return false, fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return false, errors.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return false, errors.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return false, err
+ }
+ if i >= n {
+ return false, errors.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return false, errors.New("index mismatch")
+ }
+ if (rights.Rights[i] & right) != right {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func capright(idx uint64, bit uint64) uint64 {
+ return ((1 << (57 + idx)) | bit)
+}
+
+// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights.
+// See man cap_rights_init(3) and rights(4).
+func CapRightsInit(rights []uint64) (*CapRights, error) {
+ var r CapRights
+ r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0)
+ r.Rights[1] = capright(1, 0)
+
+ err := CapRightsSet(&r, rights)
+ if err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights.
+// The capability rights on fd can never be increased by CapRightsLimit.
+// See man cap_rights_limit(2) and rights(4).
+func CapRightsLimit(fd uintptr, rights *CapRights) error {
+ return capRightsLimit(int(fd), rights)
+}
+
+// CapRightsGet returns a CapRights structure containing the operations permitted on fd.
+// See man cap_rights_get(3) and rights(4).
+func CapRightsGet(fd uintptr) (*CapRights, error) {
+ r, err := CapRightsInit(nil)
+ if err != nil {
+ return nil, err
+ }
+ err = capRightsGet(capRightsGoVersion, int(fd), r)
+ if err != nil {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go
new file mode 100644
index 0000000..394a396
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/constants.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package unix
+
+const (
+ R_OK = 0x4
+ W_OK = 0x2
+ X_OK = 0x1
+)
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
new file mode 100644
index 0000000..65a9985
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix && ppc
+// +build aix,ppc
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 16) & 0xffff)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return uint64(((major) << 16) | (minor))
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
new file mode 100644
index 0000000..8fc08ad
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix && ppc64
+// +build aix,ppc64
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x3fffffff00000000) >> 32)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32((dev & 0x00000000ffffffff) >> 0)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ var DEVNO64 uint64
+ DEVNO64 = 0x8000000000000000
+ return ((uint64(major) << 32) | (uint64(minor) & 0x00000000FFFFFFFF) | DEVNO64)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go
new file mode 100644
index 0000000..8d1dc0f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Darwin's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a Darwin device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 24) & 0xff)
+}
+
+// Minor returns the minor component of a Darwin device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffffff)
+}
+
+// Mkdev returns a Darwin device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 24) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
new file mode 100644
index 0000000..8502f20
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Dragonfly's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a DragonFlyBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a DragonFlyBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a DragonFlyBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go
new file mode 100644
index 0000000..eba3b4b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_freebsd.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in FreeBSD's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a FreeBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a FreeBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a FreeBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go
new file mode 100644
index 0000000..d165d6f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_linux.go
@@ -0,0 +1,42 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by the Linux kernel and glibc.
+//
+// The information below is extracted and adapted from bits/sysmacros.h in the
+// glibc sources:
+//
+// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
+// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
+// number and m is a hex digit of the minor number. This is backward compatible
+// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
+// backward compatible with the Linux kernel, which for some architectures uses
+// 32-bit dev_t, encoded as mmmM MMmm.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ major := uint32((dev & 0x00000000000fff00) >> 8)
+ major |= uint32((dev & 0xfffff00000000000) >> 32)
+ return major
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x00000000000000ff) >> 0)
+ minor |= uint32((dev & 0x00000ffffff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) & 0x00000fff) << 8
+ dev |= (uint64(major) & 0xfffff000) << 32
+ dev |= (uint64(minor) & 0x000000ff) << 0
+ dev |= (uint64(minor) & 0xffffff00) << 12
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go
new file mode 100644
index 0000000..b4a203d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_netbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in NetBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a NetBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x000fff00) >> 8)
+}
+
+// Minor returns the minor component of a NetBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xfff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a NetBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x000fff00
+ dev |= (uint64(minor) << 12) & 0xfff00000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go
new file mode 100644
index 0000000..f3430c4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_openbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in OpenBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of an OpenBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x0000ff00) >> 8)
+}
+
+// Minor returns the minor component of an OpenBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xffff0000) >> 8)
+ return minor
+}
+
+// Mkdev returns an OpenBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x0000ff00
+ dev |= (uint64(minor) << 8) & 0xffff0000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go
new file mode 100644
index 0000000..a388e59
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_zos.go
@@ -0,0 +1,29 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+// +build zos,s390x
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by z/OS.
+//
+// The information below is extracted and adapted from macros.
+
+package unix
+
+// Major returns the major component of a z/OS device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 16) & 0x0000FFFF)
+}
+
+// Minor returns the minor component of a z/OS device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0x0000FFFF)
+}
+
+// Mkdev returns a z/OS device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 16) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go
new file mode 100644
index 0000000..e74e5ea
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dirent.go
@@ -0,0 +1,103 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+import "unsafe"
+
+// readInt returns the size-bytes unsigned integer in native byte order at offset off.
+func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
+ if len(b) < int(off+size) {
+ return 0, false
+ }
+ if isBigEndian {
+ return readIntBE(b[off:], size), true
+ }
+ return readIntLE(b[off:], size), true
+}
+
+func readIntBE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[1]) | uint64(b[0])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+func readIntLE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number of
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ count = 0
+ for max != 0 && len(buf) > 0 {
+ reclen, ok := direntReclen(buf)
+ if !ok || reclen > uint64(len(buf)) {
+ return origlen, count, names
+ }
+ rec := buf[:reclen]
+ buf = buf[reclen:]
+ ino, ok := direntIno(rec)
+ if !ok {
+ break
+ }
+ if ino == 0 { // File absent in directory.
+ continue
+ }
+ const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
+ namlen, ok := direntNamlen(rec)
+ if !ok || namoff+namlen > uint64(len(rec)) {
+ break
+ }
+ name := rec[namoff : namoff+namlen]
+ for i, c := range name {
+ if c == 0 {
+ name = name[:i]
+ break
+ }
+ }
+ // Check for useless names before allocating a string.
+ if string(name) == "." || string(name) == ".." {
+ continue
+ }
+ max--
+ count++
+ names = append(names, string(name))
+ }
+ return origlen - len(buf), count, names
+}
diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go
new file mode 100644
index 0000000..a520265
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_big.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
+// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64
+
+package unix
+
+const isBigEndian = true
diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go
new file mode 100644
index 0000000..4362f47
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_little.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
+// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
+
+package unix
+
+const isBigEndian = false
diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go
new file mode 100644
index 0000000..29ccc4d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/env_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+// Unix environment variables.
+
+package unix
+
+import "syscall"
+
+func Getenv(key string) (value string, found bool) {
+ return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+ return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+ syscall.Clearenv()
+}
+
+func Environ() []string {
+ return syscall.Environ()
+}
+
+func Unsetenv(key string) error {
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go
new file mode 100644
index 0000000..cedaf7e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/epoll_zos.go
@@ -0,0 +1,221 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+// +build zos,s390x
+
+package unix
+
+import (
+ "sync"
+)
+
+// This file simulates epoll on z/OS using poll.
+
+// Analogous to epoll_event on Linux.
+// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove?
+type EpollEvent struct {
+ Events uint32
+ Fd int32
+ Pad int32
+}
+
+const (
+ EPOLLERR = 0x8
+ EPOLLHUP = 0x10
+ EPOLLIN = 0x1
+ EPOLLMSG = 0x400
+ EPOLLOUT = 0x4
+ EPOLLPRI = 0x2
+ EPOLLRDBAND = 0x80
+ EPOLLRDNORM = 0x40
+ EPOLLWRBAND = 0x200
+ EPOLLWRNORM = 0x100
+ EPOLL_CTL_ADD = 0x1
+ EPOLL_CTL_DEL = 0x2
+ EPOLL_CTL_MOD = 0x3
+ // The following constants are part of the epoll API, but represent
+ // currently unsupported functionality on z/OS.
+ // EPOLL_CLOEXEC = 0x80000
+ // EPOLLET = 0x80000000
+ // EPOLLONESHOT = 0x40000000
+ // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis
+ // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode
+ // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability
+)
+
+// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL
+// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16).
+
+// epToPollEvt converts epoll event field to poll equivalent.
+// In epoll, Events is a 32-bit field, while poll uses 16 bits.
+func epToPollEvt(events uint32) int16 {
+ var ep2p = map[uint32]int16{
+ EPOLLIN: POLLIN,
+ EPOLLOUT: POLLOUT,
+ EPOLLHUP: POLLHUP,
+ EPOLLPRI: POLLPRI,
+ EPOLLERR: POLLERR,
+ }
+
+ var pollEvts int16 = 0
+ for epEvt, pEvt := range ep2p {
+ if (events & epEvt) != 0 {
+ pollEvts |= pEvt
+ }
+ }
+
+ return pollEvts
+}
+
+// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields.
+func pToEpollEvt(revents int16) uint32 {
+ var p2ep = map[int16]uint32{
+ POLLIN: EPOLLIN,
+ POLLOUT: EPOLLOUT,
+ POLLHUP: EPOLLHUP,
+ POLLPRI: EPOLLPRI,
+ POLLERR: EPOLLERR,
+ }
+
+ var epollEvts uint32 = 0
+ for pEvt, epEvt := range p2ep {
+ if (revents & pEvt) != 0 {
+ epollEvts |= epEvt
+ }
+ }
+
+ return epollEvts
+}
+
+// Per-process epoll implementation.
+type epollImpl struct {
+ mu sync.Mutex
+ epfd2ep map[int]*eventPoll
+ nextEpfd int
+}
+
+// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances.
+// On Linux, this is an in-kernel data structure accessed through a fd.
+type eventPoll struct {
+ mu sync.Mutex
+ fds map[int]*EpollEvent
+}
+
+// epoll impl for this process.
+var impl epollImpl = epollImpl{
+ epfd2ep: make(map[int]*eventPoll),
+ nextEpfd: 0,
+}
+
+func (e *epollImpl) epollcreate(size int) (epfd int, err error) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ epfd = e.nextEpfd
+ e.nextEpfd++
+
+ e.epfd2ep[epfd] = &eventPoll{
+ fds: make(map[int]*EpollEvent),
+ }
+ return epfd, nil
+}
+
+func (e *epollImpl) epollcreate1(flag int) (fd int, err error) {
+ return e.epollcreate(4)
+}
+
+func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ ep, ok := e.epfd2ep[epfd]
+ if !ok {
+
+ return EBADF
+ }
+
+ switch op {
+ case EPOLL_CTL_ADD:
+ // TODO(neeilan): When we make epfds and fds disjoint, detect epoll
+ // loops here (instances watching each other) and return ELOOP.
+ if _, ok := ep.fds[fd]; ok {
+ return EEXIST
+ }
+ ep.fds[fd] = event
+ case EPOLL_CTL_MOD:
+ if _, ok := ep.fds[fd]; !ok {
+ return ENOENT
+ }
+ ep.fds[fd] = event
+ case EPOLL_CTL_DEL:
+ if _, ok := ep.fds[fd]; !ok {
+ return ENOENT
+ }
+ delete(ep.fds, fd)
+
+ }
+ return nil
+}
+
+// Must be called while holding ep.mu
+func (ep *eventPoll) getFds() []int {
+ fds := make([]int, len(ep.fds))
+ for fd := range ep.fds {
+ fds = append(fds, fd)
+ }
+ return fds
+}
+
+func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait
+ ep, ok := e.epfd2ep[epfd]
+
+ if !ok {
+ e.mu.Unlock()
+ return 0, EBADF
+ }
+
+ pollfds := make([]PollFd, 4)
+ for fd, epollevt := range ep.fds {
+ pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)})
+ }
+ e.mu.Unlock()
+
+ n, err = Poll(pollfds, msec)
+ if err != nil {
+ return n, err
+ }
+
+ i := 0
+ for _, pFd := range pollfds {
+ if pFd.Revents != 0 {
+ events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)}
+ i++
+ }
+
+ if i == n {
+ break
+ }
+ }
+
+ return n, nil
+}
+
+func EpollCreate(size int) (fd int, err error) {
+ return impl.epollcreate(size)
+}
+
+func EpollCreate1(flag int) (fd int, err error) {
+ return impl.epollcreate1(flag)
+}
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ return impl.epollctl(epfd, op, fd, event)
+}
+
+// Because EpollWait mutates events, the caller is expected to coordinate
+// concurrent access if calling with the same epfd from multiple goroutines.
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ return impl.epollwait(epfd, events, msec)
+}
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
new file mode 100644
index 0000000..761db66
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
@@ -0,0 +1,233 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ DLT_HHDLC = 0x79
+ IFF_SMART = 0x20
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf2
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_IPXIP = 0xf9
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IPV6_MIN_MEMBERSHIPS = 0x1f
+ IP_FAITH = 0x16
+ IP_MAX_SOURCE_FILTER = 0x400
+ IP_MIN_MEMBERSHIPS = 0x1f
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ RT_CACHING_CONTEXT = 0x1
+ RT_NORTREF = 0x2
+ SIOCADDRT = 0x8030720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8030720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
new file mode 100644
index 0000000..070f44b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
@@ -0,0 +1,233 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ DLT_HHDLC = 0x79
+ IFF_SMART = 0x20
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf2
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_IPXIP = 0xf9
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IPV6_MIN_MEMBERSHIPS = 0x1f
+ IP_FAITH = 0x16
+ IP_MAX_SOURCE_FILTER = 0x400
+ IP_MIN_MEMBERSHIPS = 0x1f
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ RT_CACHING_CONTEXT = 0x1
+ RT_NORTREF = 0x2
+ SIOCADDRT = 0x8040720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8040720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
new file mode 100644
index 0000000..856dca3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
@@ -0,0 +1,226 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+
+ // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go
+ IFF_SMART = 0x20
+ IFT_FAITH = 0xf2
+ IFT_IPXIP = 0xf9
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IP_FAITH = 0x16
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ SIOCADDRT = 0x8030720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8030720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go
new file mode 100644
index 0000000..946dcf3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ DLT_HHDLC = 0x79
+ IPV6_MIN_MEMBERSHIPS = 0x1f
+ IP_MAX_SOURCE_FILTER = 0x400
+ IP_MIN_MEMBERSHIPS = 0x1f
+ RT_CACHING_CONTEXT = 0x1
+ RT_NORTREF = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go
new file mode 100644
index 0000000..e9b9912
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build dragonfly || freebsd || linux || netbsd || openbsd
+// +build dragonfly freebsd linux netbsd openbsd
+
+package unix
+
+import "unsafe"
+
+// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
+// systems by fcntl_linux_32bit.go to be SYS_FCNTL64.
+var fcntl64Syscall uintptr = SYS_FCNTL
+
+func fcntl(fd int, cmd, arg int) (int, error) {
+ valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg))
+ var err error
+ if errno != 0 {
+ err = errno
+ }
+ return int(valptr), err
+}
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
+ if errno == 0 {
+ return nil
+ }
+ return errno
+}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/vendor/golang.org/x/sys/unix/fcntl_darwin.go
new file mode 100644
index 0000000..a9911c7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "unsafe"
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk))))
+ return err
+}
+
+// FcntlFstore performs a fcntl syscall for the F_PREALLOCATE command.
+func FcntlFstore(fd uintptr, cmd int, fstore *Fstore_t) error {
+ _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(fstore))))
+ return err
+}
diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
new file mode 100644
index 0000000..29d4480
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
@@ -0,0 +1,14 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc)
+// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc
+
+package unix
+
+func init() {
+ // On 32-bit Linux systems, the fcntl syscall that matches Go's
+ // Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
+ fcntl64Syscall = SYS_FCNTL64
+}
diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go
new file mode 100644
index 0000000..a8068f9
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fdset.go
@@ -0,0 +1,30 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package unix
+
+// Set adds fd to the set fds.
+func (fds *FdSet) Set(fd int) {
+ fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// Clear removes fd from the set fds.
+func (fds *FdSet) Clear(fd int) {
+ fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// IsSet returns whether fd is in the set fds.
+func (fds *FdSet) IsSet(fd int) bool {
+ return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0
+}
+
+// Zero clears the set fds.
+func (fds *FdSet) Zero() {
+ for i := range fds.Bits {
+ fds.Bits[i] = 0
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go
new file mode 100644
index 0000000..e377cc9
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go
@@ -0,0 +1,164 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+// +build zos,s390x
+
+package unix
+
+import (
+ "unsafe"
+)
+
+// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent.
+
+func Fstatfs(fd int, stat *Statfs_t) (err error) {
+ var stat_v Statvfs_t
+ err = Fstatvfs(fd, &stat_v)
+ if err == nil {
+ // populate stat
+ stat.Type = 0
+ stat.Bsize = stat_v.Bsize
+ stat.Blocks = stat_v.Blocks
+ stat.Bfree = stat_v.Bfree
+ stat.Bavail = stat_v.Bavail
+ stat.Files = stat_v.Files
+ stat.Ffree = stat_v.Ffree
+ stat.Fsid = stat_v.Fsid
+ stat.Namelen = stat_v.Namemax
+ stat.Frsize = stat_v.Frsize
+ stat.Flags = stat_v.Flag
+ for passn := 0; passn < 5; passn++ {
+ switch passn {
+ case 0:
+ err = tryGetmntent64(stat)
+ break
+ case 1:
+ err = tryGetmntent128(stat)
+ break
+ case 2:
+ err = tryGetmntent256(stat)
+ break
+ case 3:
+ err = tryGetmntent512(stat)
+ break
+ case 4:
+ err = tryGetmntent1024(stat)
+ break
+ default:
+ break
+ }
+ //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred)
+ if err == nil || err != nil && err != ERANGE {
+ break
+ }
+ }
+ }
+ return err
+}
+
+func tryGetmntent64(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [64]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
+
+func tryGetmntent128(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [128]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
+
+func tryGetmntent256(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [256]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
+
+func tryGetmntent512(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [512]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
+
+func tryGetmntent1024(stat *Statfs_t) (err error) {
+ var mnt_ent_buffer struct {
+ header W_Mnth
+ filesys_info [1024]W_Mntent
+ }
+ var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer))
+ fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size)
+ if err != nil {
+ return err
+ }
+ err = ERANGE //return ERANGE if no match is found in this batch
+ for i := 0; i < fs_count; i++ {
+ if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) {
+ stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0])
+ err = nil
+ break
+ }
+ }
+ return err
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go
new file mode 100644
index 0000000..0dee232
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo && !aix
+// +build gccgo,!aix
+
+package unix
+
+import "syscall"
+
+// We can't use the gc-syntax .s files for gccgo. On the plus side
+// much of the functionality can be written directly in Go.
+
+func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
+
+func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
+
+func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ syscall.Entersyscall()
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0
+}
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ syscall.Entersyscall()
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+ syscall.Exitsyscall()
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0
+}
+
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
+
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
+ return r, 0, syscall.Errno(errno)
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
new file mode 100644
index 0000000..2cb1fef
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -0,0 +1,45 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo
+// +build !aix
+
+#include
+#include
+#include
+
+#define _STRINGIFY2_(x) #x
+#define _STRINGIFY_(x) _STRINGIFY2_(x)
+#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
+
+// Call syscall from C code because the gccgo support for calling from
+// Go to C does not support varargs functions.
+
+struct ret {
+ uintptr_t r;
+ uintptr_t err;
+};
+
+struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+ __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscall");
+
+struct ret
+gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ struct ret r;
+
+ errno = 0;
+ r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ r.err = errno;
+ return r;
+}
+
+uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+ __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscallNoError");
+
+uintptr_t
+gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
new file mode 100644
index 0000000..e60e49a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gccgo && linux && amd64
+// +build gccgo,linux,amd64
+
+package unix
+
+import "syscall"
+
+//extern gettimeofday
+func realGettimeofday(*Timeval, *byte) int32
+
+func gettimeofday(tv *Timeval) (err syscall.Errno) {
+ r := realGettimeofday(tv, nil)
+ if r < 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go
new file mode 100644
index 0000000..6c7ad05
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl.go
@@ -0,0 +1,75 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req uint, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetPointerInt performs an ioctl operation which sets an
+// integer value on fd, using the specified request number. The ioctl
+// argument is called with a pointer to the integer value, rather than
+// passing the integer value directly.
+func IoctlSetPointerInt(fd int, req uint, value int) error {
+ v := int32(value)
+ return ioctl(fd, req, uintptr(unsafe.Pointer(&v)))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value will usually be TCSETA or TIOCSETA.
+func IoctlSetTermios(fd int, req uint, value *Termios) error {
+ // TODO: if we get the chance, remove the req parameter.
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req uint) (int, error) {
+ var value int
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
+ var value Winsize
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlGetTermios(fd int, req uint) (*Termios, error) {
+ var value Termios
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go
new file mode 100644
index 0000000..48773f7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -0,0 +1,196 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// IoctlRetInt performs an ioctl operation specified by req on a device
+// associated with opened file descriptor fd, and returns a non-negative
+// integer that is returned by the ioctl syscall.
+func IoctlRetInt(fd int, req uint) (int, error) {
+ ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(ret), nil
+}
+
+func IoctlGetUint32(fd int, req uint) (uint32, error) {
+ var value uint32
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
+func IoctlGetRTCTime(fd int) (*RTCTime, error) {
+ var value RTCTime
+ err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlSetRTCTime(fd int, value *RTCTime) error {
+ err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) {
+ var value RTCWkAlrm
+ err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
+ err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+type ifreqEthtool struct {
+ name [IFNAMSIZ]byte
+ data unsafe.Pointer
+}
+
+// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network
+// device specified by ifname.
+func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
+ // Leave room for terminating NULL byte.
+ if len(ifname) >= IFNAMSIZ {
+ return nil, EINVAL
+ }
+
+ value := EthtoolDrvinfo{
+ Cmd: ETHTOOL_GDRVINFO,
+ }
+ ifreq := ifreqEthtool{
+ data: unsafe.Pointer(&value),
+ }
+ copy(ifreq.name[:], ifname)
+ err := ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifreq)))
+ runtime.KeepAlive(ifreq)
+ return &value, err
+}
+
+// IoctlGetWatchdogInfo fetches information about a watchdog device from the
+// Linux watchdog API. For more information, see:
+// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
+func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) {
+ var value WatchdogInfo
+ err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For
+// more information, see:
+// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
+func IoctlWatchdogKeepalive(fd int) error {
+ return ioctl(fd, WDIOC_KEEPALIVE, 0)
+}
+
+// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the
+// range of data conveyed in value to the file associated with the file
+// descriptor destFd. See the ioctl_ficlonerange(2) man page for details.
+func IoctlFileCloneRange(destFd int, value *FileCloneRange) error {
+ err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file
+// associated with the file description srcFd to the file associated with the
+// file descriptor destFd. See the ioctl_ficlone(2) man page for details.
+func IoctlFileClone(destFd, srcFd int) error {
+ return ioctl(destFd, FICLONE, uintptr(srcFd))
+}
+
+type FileDedupeRange struct {
+ Src_offset uint64
+ Src_length uint64
+ Reserved1 uint16
+ Reserved2 uint32
+ Info []FileDedupeRangeInfo
+}
+
+type FileDedupeRangeInfo struct {
+ Dest_fd int64
+ Dest_offset uint64
+ Bytes_deduped uint64
+ Status int32
+ Reserved uint32
+}
+
+// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the
+// range of data conveyed in value from the file associated with the file
+// descriptor srcFd to the value.Info destinations. See the
+// ioctl_fideduperange(2) man page for details.
+func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error {
+ buf := make([]byte, SizeofRawFileDedupeRange+
+ len(value.Info)*SizeofRawFileDedupeRangeInfo)
+ rawrange := (*RawFileDedupeRange)(unsafe.Pointer(&buf[0]))
+ rawrange.Src_offset = value.Src_offset
+ rawrange.Src_length = value.Src_length
+ rawrange.Dest_count = uint16(len(value.Info))
+ rawrange.Reserved1 = value.Reserved1
+ rawrange.Reserved2 = value.Reserved2
+
+ for i := range value.Info {
+ rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
+ uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
+ uintptr(i*SizeofRawFileDedupeRangeInfo)))
+ rawinfo.Dest_fd = value.Info[i].Dest_fd
+ rawinfo.Dest_offset = value.Info[i].Dest_offset
+ rawinfo.Bytes_deduped = value.Info[i].Bytes_deduped
+ rawinfo.Status = value.Info[i].Status
+ rawinfo.Reserved = value.Info[i].Reserved
+ }
+
+ err := ioctl(srcFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(&buf[0])))
+
+ // Output
+ for i := range value.Info {
+ rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
+ uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
+ uintptr(i*SizeofRawFileDedupeRangeInfo)))
+ value.Info[i].Dest_fd = rawinfo.Dest_fd
+ value.Info[i].Dest_offset = rawinfo.Dest_offset
+ value.Info[i].Bytes_deduped = rawinfo.Bytes_deduped
+ value.Info[i].Status = rawinfo.Status
+ value.Info[i].Reserved = rawinfo.Reserved
+ }
+
+ return err
+}
+
+func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error {
+ err := ioctl(fd, HIDIOCGRDESC, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) {
+ var value HIDRawDevInfo
+ err := ioctl(fd, HIDIOCGRAWINFO, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+func IoctlHIDGetRawName(fd int) (string, error) {
+ var value [_HIDIOCGRAWNAME_LEN]byte
+ err := ioctl(fd, _HIDIOCGRAWNAME, uintptr(unsafe.Pointer(&value[0])))
+ return ByteSliceToString(value[:]), err
+}
+
+func IoctlHIDGetRawPhys(fd int) (string, error) {
+ var value [_HIDIOCGRAWPHYS_LEN]byte
+ err := ioctl(fd, _HIDIOCGRAWPHYS, uintptr(unsafe.Pointer(&value[0])))
+ return ByteSliceToString(value[:]), err
+}
+
+func IoctlHIDGetRawUniq(fd int) (string, error) {
+ var value [_HIDIOCGRAWUNIQ_LEN]byte
+ err := ioctl(fd, _HIDIOCGRAWUNIQ, uintptr(unsafe.Pointer(&value[0])))
+ return ByteSliceToString(value[:]), err
+}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go
new file mode 100644
index 0000000..5384e7d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go
@@ -0,0 +1,74 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+// +build zos,s390x
+
+package unix
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req uint, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value is expected to be TCSETS, TCSETSW, or TCSETSF
+func IoctlSetTermios(fd int, req uint, value *Termios) error {
+ if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) {
+ return ENOSYS
+ }
+ err := Tcsetattr(fd, int(req), value)
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req uint) (int, error) {
+ var value int
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
+ var value Winsize
+ err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
+// IoctlGetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value is expected to be TCGETS
+func IoctlGetTermios(fd int, req uint) (*Termios, error) {
+ var value Termios
+ if req != TCGETS {
+ return &value, ENOSYS
+ }
+ err := Tcgetattr(fd, &value)
+ return &value, err
+}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
new file mode 100644
index 0000000..396aadf
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -0,0 +1,231 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This script runs or (given -n) prints suggested commands to generate files for
+# the Architecture/OS specified by the GOARCH and GOOS environment variables.
+# See README.md for more information about how the build system works.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="go run mksyscall.go"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+mkasm=
+run="sh"
+cmd=""
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ # Run the command line that appears in the first line
+ # of the generated file to regenerate it.
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ cmd="echo"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+if [[ "$GOOS" = "linux" ]]; then
+ # Use the Docker-based build system
+ # Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ $cmd docker build --tag generate:$GOOS $GOOS
+ $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS
+ exit
+fi
+
+GOOSARCH_in=syscall_$GOOSARCH.go
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+aix_ppc)
+ mkerrors="$mkerrors -maix32"
+ mksyscall="go run mksyscall_aix_ppc.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+aix_ppc64)
+ mkerrors="$mkerrors -maix64"
+ mksyscall="go run mksyscall_aix_ppc64.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_amd64)
+ mkerrors="$mkerrors -m64"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+darwin_arm64)
+ mkerrors="$mkerrors -m64"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
+ ;;
+dragonfly_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -dragonfly"
+ mksysnum="go run mksysnum.go 'https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -arm"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+freebsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+netbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -netbsd -arm"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+netbsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="go run mksyscall.go -l32 -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="go run mksyscall.go -l32 -openbsd -arm"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_arm64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_mips64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+solaris_amd64)
+ mksyscall="go run mksyscall_solaris.go"
+ mkerrors="$mkerrors -m64"
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+illumos_amd64)
+ mksyscall="go run mksyscall_solaris.go"
+ mkerrors=
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ case "$GOOS" in
+ *)
+ syscall_goos="syscall_$GOOS.go"
+ case "$GOOS" in
+ darwin | dragonfly | freebsd | netbsd | openbsd)
+ syscall_goos="syscall_bsd.go $syscall_goos"
+ ;;
+ esac
+ if [ -n "$mksyscall" ]; then
+ if [ "$GOOSARCH" == "aix_ppc64" ]; then
+ # aix/ppc64 script generates files instead of writing to stdin.
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
+ elif [ "$GOOS" == "darwin" ]; then
+ # 1.12 and later, syscalls via libSystem
+ echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ # 1.13 and later, syscalls via libSystem (including syscallPtr)
+ echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go";
+ elif [ "$GOOS" == "illumos" ]; then
+ # illumos code generation requires a --illumos switch
+ echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";
+ # illumos implies solaris, so solaris code generation is also required
+ echo "$mksyscall -tags solaris,$GOARCH syscall_solaris.go syscall_solaris_$GOARCH.go |gofmt >zsyscall_solaris_$GOARCH.go";
+ else
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ fi
+ fi
+ esac
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
+ if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi
+) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
new file mode 100644
index 0000000..3f670fa
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -0,0 +1,758 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Generate Go code listing errors and other #defined constant
+# values (ENAMETOOLONG etc.), by asking the preprocessor
+# about the definitions.
+
+unset LANG
+export LC_ALL=C
+export LC_CTYPE=C
+
+if test -z "$GOARCH" -o -z "$GOOS"; then
+ echo 1>&2 "GOARCH or GOOS not defined in environment"
+ exit 1
+fi
+
+# Check that we are using the new build system if we should
+if [[ "$GOOS" = "linux" ]] && [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
+ echo 1>&2 "In the Docker based build system, mkerrors should not be called directly."
+ echo 1>&2 "See README.md"
+ exit 1
+fi
+
+if [[ "$GOOS" = "aix" ]]; then
+ CC=${CC:-gcc}
+else
+ CC=${CC:-cc}
+fi
+
+if [[ "$GOOS" = "solaris" ]]; then
+ # Assumes GNU versions of utilities in PATH.
+ export PATH=/usr/gnu/bin:$PATH
+fi
+
+uname=$(uname)
+
+includes_AIX='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define AF_LOCAL AF_UNIX
+'
+
+includes_Darwin='
+#define _DARWIN_C_SOURCE
+#define KERNEL
+#define _DARWIN_USE_64_BIT_INODE
+#define __APPLE_USE_RFC_3542
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include