Merge branch 'master' into tests-varbinary
This commit is contained in:
commit
4fd4be1308
20
.github/workflows/ci.yml
vendored
Normal file
20
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
name: CI
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
|
||||
- name: Set up Go 1.12
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
version: 1.12
|
||||
id: go
|
||||
|
||||
- name: Build
|
||||
run: script/cibuild
|
20
.github/workflows/replica-tests.yml
vendored
Normal file
20
.github/workflows/replica-tests.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
name: migration tests
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
|
||||
- name: Set up Go 1.12
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
version: 1.12
|
||||
id: go
|
||||
|
||||
- name: migration tests
|
||||
run: script/cibuild-gh-ost-replica-tests
|
31
.travis.yml
31
.travis.yml
@ -1,31 +0,0 @@
|
||||
# http://docs.travis-ci.com/user/languages/go/
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "1.9"
|
||||
- "1.10"
|
||||
|
||||
os:
|
||||
- linux
|
||||
|
||||
env:
|
||||
- MYSQL_USER=root
|
||||
- CURRENT_CI_ENV=travis
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- git
|
||||
- numactl
|
||||
- libaio1
|
||||
|
||||
before_install:
|
||||
- mysql -e 'CREATE DATABASE IF NOT EXISTS test;'
|
||||
|
||||
install: true
|
||||
|
||||
script:
|
||||
- script/cibuild
|
||||
|
||||
notifications:
|
||||
email: false
|
22
Dockerfile.packaging
Normal file
22
Dockerfile.packaging
Normal file
@ -0,0 +1,22 @@
|
||||
#
|
||||
|
||||
FROM golang:1.12.6
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y ruby ruby-dev rubygems build-essential
|
||||
RUN gem install --no-ri --no-rdoc fpm
|
||||
ENV GOPATH=/tmp/go
|
||||
|
||||
RUN apt-get install -y curl
|
||||
RUN apt-get install -y rsync
|
||||
RUN apt-get install -y gcc
|
||||
RUN apt-get install -y g++
|
||||
RUN apt-get install -y bash
|
||||
RUN apt-get install -y git
|
||||
RUN apt-get install -y tar
|
||||
RUN apt-get install -y rpm
|
||||
|
||||
RUN mkdir -p $GOPATH/src/github.com/github/gh-ost
|
||||
WORKDIR $GOPATH/src/github.com/github/gh-ost
|
||||
COPY . .
|
||||
RUN bash build.sh
|
11
Dockerfile.test
Normal file
11
Dockerfile.test
Normal file
@ -0,0 +1,11 @@
|
||||
FROM golang:1.12.1
|
||||
LABEL maintainer="github@github.com"
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y lsb-release
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY . /go/src/github.com/github/gh-ost
|
||||
WORKDIR /go/src/github.com/github/gh-ost
|
||||
|
||||
CMD ["script/test"]
|
@ -94,7 +94,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
|
||||
|
||||
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
|
||||
|
||||
`gh-ost` is a Go project; it is built with Go `1.9` and above. To build on your own, use either:
|
||||
`gh-ost` is a Go project; it is built with Go `1.12` and above. To build on your own, use either:
|
||||
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
|
||||
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
|
||||
|
||||
|
@ -1 +1 @@
|
||||
1.0.47
|
||||
1.0.49
|
||||
|
14
build.sh
14
build.sh
@ -18,10 +18,8 @@ function build {
|
||||
GOOS=$3
|
||||
GOARCH=$4
|
||||
|
||||
|
||||
|
||||
if ! go version | egrep -q 'go(1[.]9|1[.]1[0-9])' ; then
|
||||
echo "go version is too low. Must use 1.9 or above"
|
||||
if ! go version | egrep -q 'go(1\.1[234])' ; then
|
||||
echo "go version must be 1.12 or above"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -42,8 +40,8 @@ function build {
|
||||
builddir=$(setuptree)
|
||||
cp $buildpath/$target $builddir/gh-ost/usr/bin
|
||||
cd $buildpath
|
||||
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m shlomi-noach --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
|
||||
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m shlomi-noach --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
|
||||
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
|
||||
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
|
||||
fi
|
||||
}
|
||||
|
||||
@ -63,11 +61,11 @@ main() {
|
||||
|
||||
mkdir -p ${buildpath}
|
||||
rm -rf ${buildpath:?}/*
|
||||
build macOS osx darwin amd64
|
||||
build GNU/Linux linux linux amd64
|
||||
# build macOS osx darwin amd64
|
||||
|
||||
echo "Binaries found in:"
|
||||
ls -1 $buildpath/gh-ost-binary*${timestamp}.tar.gz
|
||||
find $buildpath/gh-ost* -type f -maxdepth 1
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
@ -18,7 +18,7 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c
|
||||
|
||||
### approve-renamed-columns
|
||||
|
||||
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try an associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
|
||||
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
|
||||
|
||||
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
|
||||
|
||||
@ -69,6 +69,10 @@ This is somewhat similar to a Nagios `n`-times test, where `n` in our case is al
|
||||
|
||||
Optional. Default is `safe`. See more discussion in [`cut-over`](cut-over.md)
|
||||
|
||||
### cut-over-lock-timeout-seconds
|
||||
|
||||
Default `3`. Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout).
|
||||
|
||||
### discard-foreign-keys
|
||||
|
||||
**Danger**: this flag will _silently_ discard any foreign keys existing on your table.
|
||||
@ -107,6 +111,18 @@ While the ongoing estimated number of rows is still heuristic, it's almost exact
|
||||
|
||||
Without this parameter, migration is a _noop_: testing table creation and validity of migration, but not touching data.
|
||||
|
||||
### force-named-cut-over
|
||||
|
||||
If given, a `cut-over` command must name the migrated table, or else ignored.
|
||||
|
||||
### force-named-panic
|
||||
|
||||
If given, a `panic` command must name the migrated table, or else ignored.
|
||||
|
||||
### force-table-names
|
||||
|
||||
Table name prefix to be used on the temporary tables.
|
||||
|
||||
### gcp
|
||||
|
||||
Add this flag when executing on a 1st generation Google Cloud Platform (GCP).
|
||||
@ -125,6 +141,10 @@ We think `gh-ost` should not take chances or make assumptions about the user's t
|
||||
|
||||
See [`initially-drop-ghost-table`](#initially-drop-ghost-table)
|
||||
|
||||
### initially-drop-socket-file
|
||||
|
||||
Default False. Should `gh-ost` forcibly delete an existing socket file. Be careful: this might drop the socket file of a running migration!
|
||||
|
||||
### max-lag-millis
|
||||
|
||||
On a replication topology, this is perhaps the most important migration throttling factor: the maximum lag allowed for migration to work. If lag exceeds this value, migration throttles.
|
||||
@ -141,7 +161,7 @@ List of metrics and threshold values; topping the threshold of any will cause th
|
||||
|
||||
### migrate-on-replica
|
||||
|
||||
Typically `gh-ost` is used to migrate tables on a master. If you wish to only perform the migration in full on a replica, connect `gh-ost` to said replica and pass `--migrate-on-replica`. `gh-ost` will briefly connect to the master but other issue no changes on the master. Migration will be fully executed on the replica, while making sure to maintain a small replication lag.
|
||||
Typically `gh-ost` is used to migrate tables on a master. If you wish to only perform the migration in full on a replica, connect `gh-ost` to said replica and pass `--migrate-on-replica`. `gh-ost` will briefly connect to the master but otherwise will make no changes on the master. Migration will be fully executed on the replica, while making sure to maintain a small replication lag.
|
||||
|
||||
### postpone-cut-over-flag-file
|
||||
|
||||
@ -161,14 +181,42 @@ See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the
|
||||
|
||||
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
|
||||
|
||||
### skip-strict-mode
|
||||
|
||||
By default `gh-ost` enforces STRICT_ALL_TABLES sql_mode as a safety measure. In some cases this changes the behaviour of other modes (namely ERROR_FOR_DIVISION_BY_ZERO, NO_ZERO_DATE, and NO_ZERO_IN_DATE) which may lead to errors during migration. Use `--skip-strict-mode` to explicitly tell `gh-ost` not to enforce this. **Danger** This may have some unexpected disastrous side effects.
|
||||
|
||||
### skip-renamed-columns
|
||||
|
||||
See [`approve-renamed-columns`](#approve-renamed-columns)
|
||||
|
||||
### ssl
|
||||
|
||||
By default `gh-ost` does not use ssl/tls connections to the database servers when performing migrations. This flag instructs `gh-ost` to use encrypted connections. If enabled, `gh-ost` will use the system's ca certificate pool for server certificate verification. If a different certificate is needed for server verification, see `--ssl-ca`. If you wish to skip server verification, but still use encrypted connections, use with `--ssl-allow-insecure`.
|
||||
|
||||
### ssl-allow-insecure
|
||||
|
||||
Allows `gh-ost` to connect to the MySQL servers using encrypted connections, but without verifying the validity of the certificate provided by the server during the connection. Requires `--ssl`.
|
||||
|
||||
### ssl-ca
|
||||
|
||||
`--ssl-ca=/path/to/ca-cert.pem`: ca certificate file (in PEM format) to use for server certificate verification. If specified, the default system ca cert pool will not be used for verification, only the ca cert provided here. Requires `--ssl`.
|
||||
|
||||
### ssl-cert
|
||||
|
||||
`--ssl-cert=/path/to/ssl-cert.crt`: SSL public key certificate file (in PEM format).
|
||||
|
||||
### ssl-key
|
||||
|
||||
`--ssl-key=/path/to/ssl-key.key`: SSL private key file (in PEM format).
|
||||
|
||||
### test-on-replica
|
||||
|
||||
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [`testing-on-replica`](testing-on-replica.md)
|
||||
|
||||
### test-on-replica-skip-replica-stop
|
||||
|
||||
Default `False`. When `--test-on-replica` is enabled, do not issue commands stop replication (requires `--test-on-replica`).
|
||||
|
||||
### throttle-control-replicas
|
||||
|
||||
Provide a command delimited list of replicas; `gh-ost` will throttle when any of the given replicas lag beyond [`--max-lag-millis`](#max-lag-millis). The list can be queried and updated dynamically via [interactive commands](interactive-commands.md)
|
||||
|
@ -65,10 +65,14 @@ The following variables are available on all hooks:
|
||||
- `GH_OST_ELAPSED_COPY_SECONDS` - row-copy time (excluding startup, row-count and postpone time)
|
||||
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
|
||||
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
|
||||
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
|
||||
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
|
||||
- `GH_OST_MIGRATED_HOST`
|
||||
- `GH_OST_INSPECTED_HOST`
|
||||
- `GH_OST_EXECUTING_HOST`
|
||||
- `GH_OST_HOOKS_HINT` - copy of `--hooks-hint` value
|
||||
- `GH_OST_HOOKS_HINT_OWNER` - copy of `--hooks-hint-owner` value
|
||||
- `GH_OST_HOOKS_HINT_TOKEN` - copy of `--hooks-hint-token` value
|
||||
- `GH_OST_DRY_RUN` - whether or not the `gh-ost` run is a dry run
|
||||
|
||||
The following variable are available on particular hooks:
|
||||
|
@ -28,3 +28,9 @@ It is therefore unlikely that `gh-ost` will support this behavior.
|
||||
Yes. TL;DR if running all on same replica/master, make sure to provide `--replica-server-id`. [Read more](cheatsheet.md#concurrent-migrations)
|
||||
|
||||
# Why
|
||||
|
||||
### Why Is the "Connect to Replica" mode preferred?
|
||||
|
||||
To avoid placing extra load on the master. `gh-ost` connects as a replication client. Each additional replica adds some load to the master.
|
||||
|
||||
To monitor replication lag from a replica. This makes the replication lag throttle, `--max-lag-millis`, more representative of the lag experienced by other replicas following the master (perhaps N levels deep in a tree of replicas).
|
||||
|
10
doc/rds.md
10
doc/rds.md
@ -26,6 +26,14 @@ If you use `pt-table-checksum` as a part of your data integrity checks, you migh
|
||||
This tool requires binlog_format=STATEMENT, but the current binlog_format is set to ROW and an error occurred while attempting to change it. If running MySQL 5.1.29 or newer, setting binlog_format requires the SUPER privilege. You will need to manually set binlog_format to 'STATEMENT' before running this tool.
|
||||
```
|
||||
|
||||
#### Binlog filtering
|
||||
|
||||
In Aurora, the [binlog filtering feature][aws_replication_docs_bin_log_filtering] is enabled by default. This becomes an issue when gh-ost tries to do the cut-over, because gh-ost waits for an entry in the binlog to proceed but this entry will never end up in the binlog because it gets filtered out by the binlog filtering feature.
|
||||
You need to turn this feature off during the migration process.
|
||||
Set the `aurora_enable_repl_bin_log_filtering` parameter to 0 in the Parameter Group for your cluster.
|
||||
When the migration is done, set it back to 1 (default).
|
||||
|
||||
|
||||
#### Preflight checklist
|
||||
|
||||
Before trying to run any `gh-ost` migrations you will want to confirm the following:
|
||||
@ -35,6 +43,7 @@ Before trying to run any `gh-ost` migrations you will want to confirm the follow
|
||||
- [ ] Executing `SHOW SLAVE STATUS\G` on your replica cluster displays the correct master host, binlog position, etc.
|
||||
- [ ] Database backup retention is greater than 1 day to enable binlogs
|
||||
- [ ] You have setup [`hooks`][ghost_hooks] to issue RDS procedures for stopping and starting replication. (see [github/gh-ost#163][ghost_rds_issue_tracking] for examples)
|
||||
- [ ] The parameter `aurora_enable_repl_bin_log_filtering` is set to 0
|
||||
|
||||
[new_issue]: https://github.com/github/gh-ost/issues/new
|
||||
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
|
||||
@ -43,3 +52,4 @@ Before trying to run any `gh-ost` migrations you will want to confirm the follow
|
||||
[percona_toolkit_patch]: https://github.com/jacobbednarz/percona-toolkit/commit/0271ba6a094da446a5e5bb8d99b5c26f1777f2b9
|
||||
[ghost_hooks]: https://github.com/github/gh-ost/blob/master/doc/hooks.md
|
||||
[ghost_rds_issue_tracking]: https://github.com/github/gh-ost/issues/163
|
||||
[aws_replication_docs_bin_log_filtering]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Replication.html#AuroraMySQL.Replication.Performance
|
@ -22,7 +22,7 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
|
||||
|
||||
### Limitations
|
||||
|
||||
- Foreign keys not supported. They may be supported in the future, to some extent.
|
||||
- Foreign key constraints are not supported. They may be supported in the future, to some extent.
|
||||
|
||||
- Triggers are not supported. They may be supported in the future.
|
||||
|
||||
@ -38,7 +38,7 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
|
||||
- It is not allowed to migrate a table where another table exists with same name and different upper/lower case.
|
||||
- For example, you may not migrate `MyTable` if another table called `MYtable` exists in the same schema.
|
||||
|
||||
- Amazon RDS works, but has it's own [limitations](rds.md).
|
||||
- Amazon RDS works, but has its own [limitations](rds.md).
|
||||
- Google Cloud SQL works, `--gcp` flag required.
|
||||
- Aliyun RDS works, `--aliyun-rds` flag required.
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Shared key
|
||||
|
||||
A requirement for a migration to run is that the two _before_ and _after_ tables have a shared unique key. This is to elaborate and illustrate on the matter.
|
||||
gh-ost requires for every migration that both the _before_ and _after_ versions of the table share the same unique not-null key columns. This page illustrates this rule.
|
||||
|
||||
### Introduction
|
||||
|
||||
Consider a classic, simple migration. The table is any normal:
|
||||
Consider a simple migration, with a normal table,
|
||||
|
||||
```sql
|
||||
CREATE TABLE tbl (
|
||||
@ -15,54 +15,72 @@ CREATE TABLE tbl (
|
||||
)
|
||||
```
|
||||
|
||||
And the migration is a simple `add column ts timestamp`.
|
||||
and the migration `add column ts timestamp`. The _after_ table version would be:
|
||||
|
||||
In such migration there is no change in indexes, and in particular no change to any unique key, and specifically no change to the `PRIMARY KEY`. To run this migration, `gh-ost` would iterate the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` by order of `id`, and then apply binlog events onto `_tbl_gho`.
|
||||
```sql
|
||||
CREATE TABLE tbl (
|
||||
id bigint unsigned not null auto_increment,
|
||||
data varchar(255),
|
||||
more_data int,
|
||||
ts timestamp,
|
||||
PRIMARY KEY(id)
|
||||
)
|
||||
```
|
||||
|
||||
Applying the binlog events assumes the existence of a shared unique key. For example, an `UPDATE` statement in the binary log translate to a `REPLACE` statement which `gh-ost` applies to the _ghost_ table. Such statement expects to add or replace an existing row based on given row data. In particular, it would _replace_ an existing row if a unique key violation is met.
|
||||
(This is also the definition of the _ghost_ table, except that that table would be called `_tbl_gho`).
|
||||
|
||||
So `gh-ost` correlates `tbl` and `_tbl_gho` rows using a unique key. In the above example that would be the `PRIMARY KEY`.
|
||||
In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tble` onto `_tbl_gho`.
|
||||
|
||||
### Rules
|
||||
The applying of the binlog events is what requires the shared unique key. For example, an `UPDATE` statement to `tbl` translates to a `REPLACE` statement which `gh-ost` applies to `_tbl_gho`. A `REPLACE` statement expects to insert or replace an existing row based on its row's values and the table's unique key constraints. In particular, if inserting that row would result in a unique key violation (e.g., a row with that primary key already exists), it would _replace_ that existing row with the new values.
|
||||
|
||||
There must be a shared set of not-null columns for which there is a unique constraint in both the original table and the migration (_ghost_) table.
|
||||
So `gh-ost` correlates `tbl` and `_tbl_gho` rows one to one using a unique key. In the above example that would be the `PRIMARY KEY`.
|
||||
|
||||
### Interpreting the rules
|
||||
### Interpreting the rule
|
||||
|
||||
The same columns must be covered by a unique key in both tables. This doesn't have to be the `PRIMARY KEY`. This doesn't have to be a key of the same name.
|
||||
The _before_ and _after_ versions of the table share the same unique not-null key, but:
|
||||
- the key doesn't have to be the PRIMARY KEY
|
||||
- the key can have a different name between the _before_ and _after_ versions (e.g., renamed via DROP INDEX and ADD INDEX) so long as it contains the exact same column(s)
|
||||
|
||||
Upon migration, `gh-ost` inspects both the original and _ghost_ table and attempts to find at least one such unique key (or rather, a set of columns) that is shared between the two. Typically this would just be the `PRIMARY KEY`, but sometimes you may change the `PRIMARY KEY` itself, in which case `gh-ost` will look for other options.
|
||||
At the start of the migration, `gh-ost` inspects both the original and _ghost_ table it created, and attempts to find at least one such unique key (or rather, a set of columns) that is shared between the two. Typically this would just be the `PRIMARY KEY`, but some tables don't have primary keys, or sometimes it is the primary key that is being modified by the migration. In these cases `gh-ost` will look for other options.
|
||||
|
||||
`gh-ost` expects unique keys where no `NULL` values are found, i.e. all columns covered by the unique key are defined as `NOT NULL`. This is implicitly true for `PRIMARY KEY`s. If no such key can be found, `gh-ost` bails out. In the event there is no such key, but you happen to _know_ your columns have no `NULL` values even though they're `NULL`-able, you may take responsibility and pass the `--allow-nullable-unique-key`. The migration will run well as long as no `NULL` values are found in the unique key's columns. Any actual `NULL`s may corrupt the migration.
|
||||
`gh-ost` expects unique keys where no `NULL` values are found, i.e. all columns contained in the unique key are defined as `NOT NULL`. This is implicitly true for primary keys. If no such key can be found, `gh-ost` bails out.
|
||||
|
||||
### Examples: allowed and not allowed
|
||||
If the table contains a unique key with nullable columns, but you know your columns contain no `NULL` values, use the `--allow-nullable-unique-key` option. The migration will run well as long as no `NULL` values are found in the unique key's columns. **Any actual `NULL`s may corrupt the migration.**
|
||||
|
||||
### Examples: Allowed and Not Allowed
|
||||
|
||||
```sql
|
||||
create table some_table (
|
||||
id int auto_increment,
|
||||
id int not null auto_increment,
|
||||
ts timestamp,
|
||||
name varchar(128) not null,
|
||||
owner_id int not null,
|
||||
loc_id int,
|
||||
loc_id int not null,
|
||||
primary key(id),
|
||||
unique key name_uidx(name)
|
||||
)
|
||||
```
|
||||
|
||||
Following are examples of migrations that are _good to run_:
|
||||
Note the two unique, not-null indexes: the primary key and `name_uidx`.
|
||||
|
||||
Allowed migrations:
|
||||
|
||||
- `add column i int`
|
||||
- `add key owner_idx(owner_id)`
|
||||
- `add unique key owner_name_idx(owner_id, name)` - though you need to make sure to not write conflicting rows while this migration runs
|
||||
- `add key owner_idx (owner_id)`
|
||||
- `add unique key owner_name_idx (owner_id, name)` - **be careful not to write conflicting rows while this migration runs**
|
||||
- `drop key name_uidx` - `primary key` is shared between the tables
|
||||
- `drop primary key, add primary key(owner_id, loc_id)` - `name_uidx` is shared between the tables and is used for migration
|
||||
- `change id bigint unsigned` - the `'primary key` is used. The change of type still makes the `primary key` workable.
|
||||
- `drop primary key, drop key name_uidx, create primary key(name), create unique key id_uidx(id)` - swapping the two keys. `gh-ost` is still happy because `id` is still unique in both tables. So is `name`.
|
||||
- `drop primary key, add primary key(owner_id, loc_id)` - `name_uidx` is shared between the tables
|
||||
- `change id bigint unsigned not null auto_increment` - the `primary key` changes datatype but not value, and can be used
|
||||
- `drop primary key, drop key name_uidx, add primary key(name), add unique key id_uidx(id)` - swapping the two keys. Either `id` or `name` could be used
|
||||
|
||||
Not allowed:
|
||||
|
||||
- `drop primary key, drop key name_uidx` - the _ghost_ table has no unique key
|
||||
- `drop primary key, drop key name_uidx, create primary key(name, owner_id)` - no shared columns to the unique keys on both tables. Even though `name` exists in the _ghost_ table's `primary key`, it is only part of the key and in itself does not guarantee uniqueness in the _ghost_ table.
|
||||
|
||||
|
||||
Following are examples of migrations that _cannot run_:
|
||||
### Workarounds
|
||||
|
||||
- `drop primary key, drop key name_uidx` - no unique key to _ghost_ table, so clearly cannot run
|
||||
- `drop primary key, drop key name_uidx, create primary key(name, owner_id)` - no shared columns to both tables. Even though `name` exists in the _ghost_ table's `primary key`, it is only part of the key and in itself does not guarantee uniqueness in the _ghost_ table.
|
||||
|
||||
Also, you cannot run a migration on a table that doesn't have some form of `unique key` in the first place, such as `some_table (id int, ts timestamp)`
|
||||
If you need to change your primary key or only not-null unique index to use different columns, you will want to do it as two separate migrations:
|
||||
1. `ADD UNIQUE KEY temp_pk (temp_pk_column,...)`
|
||||
1. `DROP PRIMARY KEY, DROP KEY temp_pk, ADD PRIMARY KEY (temp_pk_column,...)`
|
||||
|
@ -46,6 +46,14 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
|
||||
|
||||
An example query could be: `--throttle-query="select hour(now()) between 8 and 17"` which implies throttling auto-starts `8:00am` and migration auto-resumes at `18:00pm`.
|
||||
|
||||
#### HTTP Throttle
|
||||
|
||||
The `--throttle-http` flag allows for throttling via HTTP. Every 100ms `gh-ost` issues a `HEAD` request to the provided URL. If the response status code is not `200` throttling will kick in until a `200` response status code is returned.
|
||||
|
||||
If no URL is provided or the URL provided doesn't contain the scheme then the HTTP check will be disabled. For example `--throttle-http="http://1.2.3.4:6789/throttle"` will enable the HTTP check/throttling, but `--throttle-http="1.2.3.4:6789/throttle"` will not.
|
||||
|
||||
The URL can be queried and updated dynamically via [interactive interface](interactive-commands.md).
|
||||
|
||||
#### Manual control
|
||||
|
||||
In addition to the above, you are able to take control and throttle the operation any time you like.
|
||||
|
7
docker-compose.yml
Normal file
7
docker-compose.yml
Normal file
@ -0,0 +1,7 @@
|
||||
version: "3.5"
|
||||
services:
|
||||
app:
|
||||
image: app
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.test
|
@ -7,6 +7,7 @@ package base
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
@ -86,6 +87,7 @@ type MigrationContext struct {
|
||||
SwitchToRowBinlogFormat bool
|
||||
AssumeRBR bool
|
||||
SkipForeignKeyChecks bool
|
||||
SkipStrictMode bool
|
||||
NullableUniqueKeyAllowed bool
|
||||
ApproveRenamedColumns bool
|
||||
SkipRenamedColumns bool
|
||||
@ -99,6 +101,11 @@ type MigrationContext struct {
|
||||
ConfigFile string
|
||||
CliUser string
|
||||
CliPassword string
|
||||
UseTLS bool
|
||||
TLSAllowInsecure bool
|
||||
TLSCACertificate string
|
||||
TLSCertificate string
|
||||
TLSKey string
|
||||
CliMasterUser string
|
||||
CliMasterPassword string
|
||||
|
||||
@ -123,9 +130,12 @@ type MigrationContext struct {
|
||||
CutOverExponentialBackoff bool
|
||||
ExponentialBackoffMaxInterval int64
|
||||
ForceNamedCutOverCommand bool
|
||||
ForceNamedPanicCommand bool
|
||||
PanicFlagFile string
|
||||
HooksPath string
|
||||
HooksHintMessage string
|
||||
HooksHintOwner string
|
||||
HooksHintToken string
|
||||
|
||||
DropServeSocket bool
|
||||
ServeSocketFile string
|
||||
@ -165,6 +175,7 @@ type MigrationContext struct {
|
||||
pointOfInterestTime time.Time
|
||||
pointOfInterestTimeMutex *sync.Mutex
|
||||
CurrentLag int64
|
||||
currentProgress uint64
|
||||
ThrottleHTTPStatusCode int64
|
||||
controlReplicasLagResult mysql.ReplicationLagResult
|
||||
TotalRowsCopied int64
|
||||
@ -419,6 +430,20 @@ func (this *MigrationContext) MarkRowCopyEndTime() {
|
||||
this.RowCopyEndTime = time.Now()
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
|
||||
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetProgressPct() float64 {
|
||||
return math.Float64frombits(atomic.LoadUint64(&this.currentProgress))
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetProgressPct(progressPct float64) {
|
||||
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
|
||||
}
|
||||
|
||||
// math.Float64bits([f=0..100])
|
||||
|
||||
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
|
||||
// This is not exactly the same as the rows being iterated via chunks, but potentially close enough
|
||||
func (this *MigrationContext) GetTotalRowsCopied() int64 {
|
||||
@ -695,6 +720,13 @@ func (this *MigrationContext) ApplyCredentials() {
|
||||
}
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetupTLS() error {
|
||||
if this.UseTLS {
|
||||
return this.InspectorConnectionConfig.UseTLS(this.TLSCACertificate, this.TLSCertificate, this.TLSKey, this.TLSAllowInsecure)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadConfigFile attempts to read the config file, if it exists
|
||||
func (this *MigrationContext) ReadConfigFile() error {
|
||||
this.configMutex.Lock()
|
||||
|
@ -26,7 +26,7 @@ func NewBinlogEntry(logFile string, logPos uint64) *BinlogEntry {
|
||||
return binlogEntry
|
||||
}
|
||||
|
||||
// NewBinlogEntry creates an empty, ready to go BinlogEntry object
|
||||
// NewBinlogEntryAt creates an empty, ready to go BinlogEntry object
|
||||
func NewBinlogEntryAt(coordinates mysql.BinlogCoordinates) *BinlogEntry {
|
||||
binlogEntry := &BinlogEntry{
|
||||
Coordinates: coordinates,
|
||||
@ -41,7 +41,7 @@ func (this *BinlogEntry) Duplicate() *BinlogEntry {
|
||||
return binlogEntry
|
||||
}
|
||||
|
||||
// Duplicate creates and returns a new binlog entry, with some of the attributes pre-assigned
|
||||
// String() returns a string representation of this binlog entry
|
||||
func (this *BinlogEntry) String() string {
|
||||
return fmt.Sprintf("[BinlogEntry at %+v; dml:%+v]", this.Coordinates, this.DmlEvent)
|
||||
}
|
||||
|
@ -40,12 +40,14 @@ func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *Go
|
||||
serverId := uint32(migrationContext.ReplicaServerId)
|
||||
|
||||
binlogSyncerConfig := replication.BinlogSyncerConfig{
|
||||
ServerID: serverId,
|
||||
Flavor: "mysql",
|
||||
Host: binlogReader.connectionConfig.Key.Hostname,
|
||||
Port: uint16(binlogReader.connectionConfig.Key.Port),
|
||||
User: binlogReader.connectionConfig.User,
|
||||
Password: binlogReader.connectionConfig.Password,
|
||||
ServerID: serverId,
|
||||
Flavor: "mysql",
|
||||
Host: binlogReader.connectionConfig.Key.Hostname,
|
||||
Port: uint16(binlogReader.connectionConfig.Key.Port),
|
||||
User: binlogReader.connectionConfig.User,
|
||||
Password: binlogReader.connectionConfig.Password,
|
||||
TLSConfig: binlogReader.connectionConfig.TLSConfig(),
|
||||
UseDecimal: true,
|
||||
}
|
||||
binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
|
||||
|
||||
@ -111,7 +113,7 @@ func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEven
|
||||
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
|
||||
}
|
||||
}
|
||||
// The channel will do the throttling. Whoever is reding from the channel
|
||||
// The channel will do the throttling. Whoever is reading from the channel
|
||||
// decides whether action is taken synchronously (meaning we wait before
|
||||
// next iteration) or asynchronously (we keep pushing more events)
|
||||
// In reality, reads will be synchronous
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
"github.com/github/gh-ost/go/logic"
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/outbrain/golib/log"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
@ -54,6 +55,12 @@ func main() {
|
||||
flag.StringVar(&migrationContext.ConfigFile, "conf", "", "Config file")
|
||||
askPass := flag.Bool("ask-pass", false, "prompt for MySQL password")
|
||||
|
||||
flag.BoolVar(&migrationContext.UseTLS, "ssl", false, "Enable SSL encrypted connections to MySQL hosts")
|
||||
flag.StringVar(&migrationContext.TLSCACertificate, "ssl-ca", "", "CA certificate in PEM format for TLS connections to MySQL hosts. Requires --ssl")
|
||||
flag.StringVar(&migrationContext.TLSCertificate, "ssl-cert", "", "Certificate in PEM format for TLS connections to MySQL hosts. Requires --ssl")
|
||||
flag.StringVar(&migrationContext.TLSKey, "ssl-key", "", "Key in PEM format for TLS connections to MySQL hosts. Requires --ssl")
|
||||
flag.BoolVar(&migrationContext.TLSAllowInsecure, "ssl-allow-insecure", false, "Skips verification of MySQL hosts' certificate chain and host name. Requires --ssl")
|
||||
|
||||
flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)")
|
||||
flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)")
|
||||
flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)")
|
||||
@ -67,6 +74,7 @@ func main() {
|
||||
flag.BoolVar(&migrationContext.IsTungsten, "tungsten", false, "explicitly let gh-ost know that you are running on a tungsten-replication based topology (you are likely to also provide --assume-master-host)")
|
||||
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
|
||||
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
|
||||
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
|
||||
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
|
||||
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
|
||||
|
||||
@ -81,6 +89,7 @@ func main() {
|
||||
flag.BoolVar(&migrationContext.TimestampOldTable, "timestamp-old-table", false, "Use a timestamp in old table name. This makes old table names unique and non conflicting cross migrations")
|
||||
cutOver := flag.String("cut-over", "atomic", "choose cut-over type (default|atomic, two-step)")
|
||||
flag.BoolVar(&migrationContext.ForceNamedCutOverCommand, "force-named-cut-over", false, "When true, the 'unpostpone|cut-over' interactive command must name the migrated table")
|
||||
flag.BoolVar(&migrationContext.ForceNamedPanicCommand, "force-named-panic", false, "When true, the 'panic' interactive command must name the migrated table")
|
||||
|
||||
flag.BoolVar(&migrationContext.SwitchToRowBinlogFormat, "switch-to-rbr", false, "let this tool automatically switch binary log format to 'ROW' on the replica, if needed. The format will NOT be switched back. I'm too scared to do that, and wish to protect you if you happen to execute another migration while this one is running")
|
||||
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
|
||||
@ -109,6 +118,8 @@ func main() {
|
||||
|
||||
flag.StringVar(&migrationContext.HooksPath, "hooks-path", "", "directory where hook files are found (default: empty, ie. hooks disabled). Hook files found on this path, and conforming to hook naming conventions will be executed")
|
||||
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
|
||||
flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience")
|
||||
flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience")
|
||||
|
||||
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
|
||||
|
||||
@ -194,6 +205,18 @@ func main() {
|
||||
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
|
||||
log.Fatalf("--master-password requires --assume-master-host")
|
||||
}
|
||||
if migrationContext.TLSCACertificate != "" && !migrationContext.UseTLS {
|
||||
log.Fatalf("--ssl-ca requires --ssl")
|
||||
}
|
||||
if migrationContext.TLSCertificate != "" && !migrationContext.UseTLS {
|
||||
log.Fatalf("--ssl-cert requires --ssl")
|
||||
}
|
||||
if migrationContext.TLSKey != "" && !migrationContext.UseTLS {
|
||||
log.Fatalf("--ssl-key requires --ssl")
|
||||
}
|
||||
if migrationContext.TLSAllowInsecure && !migrationContext.UseTLS {
|
||||
log.Fatalf("--ssl-allow-insecure requires --ssl")
|
||||
}
|
||||
if *replicationLagQuery != "" {
|
||||
log.Warningf("--replication-lag-query is deprecated")
|
||||
}
|
||||
@ -238,6 +261,9 @@ func main() {
|
||||
migrationContext.SetThrottleHTTP(*throttleHTTP)
|
||||
migrationContext.SetDefaultNumRetries(*defaultRetries)
|
||||
migrationContext.ApplyCredentials()
|
||||
if err := migrationContext.SetupTLS(); err != nil {
|
||||
log.Fatale(err)
|
||||
}
|
||||
if err := migrationContext.SetCutOverLockTimeoutSeconds(*cutOverLockTimeoutSeconds); err != nil {
|
||||
log.Errore(err)
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ func (this *Applier) InitDBConnections() (err error) {
|
||||
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
|
||||
return err
|
||||
}
|
||||
singletonApplierUri := fmt.Sprintf("%s?timeout=0", applierUri)
|
||||
singletonApplierUri := fmt.Sprintf("%s&timeout=0", applierUri)
|
||||
if this.singletonDB, _, err = mysql.GetDB(this.migrationContext.Uuid, singletonApplierUri); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -126,7 +126,6 @@ func (this *Applier) readTableColumns() (err error) {
|
||||
|
||||
// showTableStatus returns the output of `show table status like '...'` command
|
||||
func (this *Applier) showTableStatus(tableName string) (rowMap sqlutils.RowMap) {
|
||||
rowMap = nil
|
||||
query := fmt.Sprintf(`show /* gh-ost */ table status from %s like '%s'`, sql.EscapeName(this.migrationContext.DatabaseName), tableName)
|
||||
sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
||||
rowMap = m
|
||||
@ -482,10 +481,14 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sessionQuery := fmt.Sprintf(`SET
|
||||
SESSION time_zone = '%s',
|
||||
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
|
||||
`, this.migrationContext.ApplierTimeZone)
|
||||
defer tx.Rollback()
|
||||
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
|
||||
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
|
||||
if !this.migrationContext.SkipStrictMode {
|
||||
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
|
||||
}
|
||||
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
|
||||
|
||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -977,59 +980,6 @@ func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (result
|
||||
return append(results, newDmlBuildResultError(fmt.Errorf("Unknown dml event type: %+v", dmlEvent.DML)))
|
||||
}
|
||||
|
||||
// ApplyDMLEventQuery writes an entry to the ghost table, in response to an intercepted
|
||||
// original-table binlog event
|
||||
func (this *Applier) ApplyDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) error {
|
||||
for _, buildResult := range this.buildDMLEventQuery(dmlEvent) {
|
||||
if buildResult.err != nil {
|
||||
return buildResult.err
|
||||
}
|
||||
// TODO The below is in preparation for transactional writes on the ghost tables.
|
||||
// Such writes would be, for example:
|
||||
// - prepended with sql_mode setup
|
||||
// - prepended with time zone setup
|
||||
// - prepended with SET SQL_LOG_BIN=0
|
||||
// - prepended with SET FK_CHECKS=0
|
||||
// etc.
|
||||
//
|
||||
// a known problem: https://github.com/golang/go/issues/9373 -- bitint unsigned values, not supported in database/sql
|
||||
// is solved by silently converting unsigned bigints to string values.
|
||||
//
|
||||
|
||||
err := func() error {
|
||||
tx, err := this.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sessionQuery := `SET
|
||||
SESSION time_zone = '+00:00',
|
||||
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
|
||||
`
|
||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args)
|
||||
return log.Errore(err)
|
||||
}
|
||||
// no error
|
||||
atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, 1)
|
||||
if this.migrationContext.CountTableRows {
|
||||
atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, buildResult.rowsDelta)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table
|
||||
func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error {
|
||||
|
||||
@ -1046,10 +996,14 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
|
||||
return err
|
||||
}
|
||||
|
||||
sessionQuery := `SET
|
||||
SESSION time_zone = '+00:00',
|
||||
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
|
||||
`
|
||||
sessionQuery := "SET SESSION time_zone = '+00:00'"
|
||||
|
||||
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
|
||||
if !this.migrationContext.SkipStrictMode {
|
||||
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
|
||||
}
|
||||
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
|
||||
|
||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
||||
return rollback(err)
|
||||
}
|
||||
|
@ -63,7 +63,11 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
|
||||
env = append(env, fmt.Sprintf("GH_OST_MIGRATED_HOST=%s", this.migrationContext.GetApplierHostname()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
|
||||
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
|
||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
|
||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
|
||||
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
|
||||
|
||||
for _, variable := range extraVariables {
|
||||
|
@ -173,8 +173,7 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
||||
// This additional step looks at which columns are unsigned. We could have merged this within
|
||||
// the `getTableColumns()` function, but it's a later patch and introduces some complexity; I feel
|
||||
// comfortable in doing this as a separate step.
|
||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns)
|
||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &this.migrationContext.UniqueKey.Columns)
|
||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, &this.migrationContext.UniqueKey.Columns)
|
||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.GhostTableColumns, this.migrationContext.MappedSharedColumns)
|
||||
|
||||
for i := range this.migrationContext.SharedColumns.Columns() {
|
||||
@ -233,6 +232,9 @@ func (this *Inspector) validateGrants() error {
|
||||
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", this.migrationContext.DatabaseName)) {
|
||||
foundDBAll = true
|
||||
}
|
||||
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", strings.Replace(this.migrationContext.DatabaseName, "_", "\\_", -1))) {
|
||||
foundDBAll = true
|
||||
}
|
||||
if base.StringContainsAll(grant, `ALTER`, `CREATE`, `DELETE`, `DROP`, `INDEX`, `INSERT`, `LOCK TABLES`, `SELECT`, `TRIGGER`, `UPDATE`, ` ON *.*`) {
|
||||
foundDBAll = true
|
||||
}
|
||||
@ -552,44 +554,35 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
|
||||
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
||||
columnName := m.GetString("COLUMN_NAME")
|
||||
columnType := m.GetString("COLUMN_TYPE")
|
||||
if strings.Contains(columnType, "unsigned") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.SetUnsigned(columnName)
|
||||
for _, columnsList := range columnsLists {
|
||||
column := columnsList.GetColumn(columnName)
|
||||
if column == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if strings.Contains(columnType, "mediumint") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.GetColumn(columnName).Type = sql.MediumIntColumnType
|
||||
|
||||
if strings.Contains(columnType, "unsigned") {
|
||||
column.IsUnsigned = true
|
||||
}
|
||||
}
|
||||
if strings.Contains(columnType, "timestamp") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.GetColumn(columnName).Type = sql.TimestampColumnType
|
||||
if strings.Contains(columnType, "mediumint") {
|
||||
column.Type = sql.MediumIntColumnType
|
||||
}
|
||||
}
|
||||
if strings.Contains(columnType, "datetime") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.GetColumn(columnName).Type = sql.DateTimeColumnType
|
||||
if strings.Contains(columnType, "timestamp") {
|
||||
column.Type = sql.TimestampColumnType
|
||||
}
|
||||
}
|
||||
if strings.Contains(columnType, "json") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.GetColumn(columnName).Type = sql.JSONColumnType
|
||||
if strings.Contains(columnType, "datetime") {
|
||||
column.Type = sql.DateTimeColumnType
|
||||
}
|
||||
}
|
||||
if strings.Contains(columnType, "float") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.GetColumn(columnName).Type = sql.FloatColumnType
|
||||
if strings.Contains(columnType, "json") {
|
||||
column.Type = sql.JSONColumnType
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(columnType, "enum") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.GetColumn(columnName).Type = sql.EnumColumnType
|
||||
if strings.Contains(columnType, "float") {
|
||||
column.Type = sql.FloatColumnType
|
||||
}
|
||||
}
|
||||
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.SetCharset(columnName, charset)
|
||||
if strings.HasPrefix(columnType, "enum") {
|
||||
column.Type = sql.EnumColumnType
|
||||
}
|
||||
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
|
||||
column.Charset = charset
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -629,8 +622,6 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*
|
||||
GROUP BY TABLE_SCHEMA, TABLE_NAME, INDEX_NAME
|
||||
) AS UNIQUES
|
||||
ON (
|
||||
COLUMNS.TABLE_SCHEMA = UNIQUES.TABLE_SCHEMA AND
|
||||
COLUMNS.TABLE_NAME = UNIQUES.TABLE_NAME AND
|
||||
COLUMNS.COLUMN_NAME = UNIQUES.FIRST_COLUMN_NAME
|
||||
)
|
||||
WHERE
|
||||
@ -699,14 +690,17 @@ func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.Colum
|
||||
for _, ghostColumn := range ghostColumns.Names() {
|
||||
if strings.EqualFold(originalColumn, ghostColumn) {
|
||||
isSharedColumn = true
|
||||
break
|
||||
}
|
||||
if strings.EqualFold(columnRenameMap[originalColumn], ghostColumn) {
|
||||
isSharedColumn = true
|
||||
break
|
||||
}
|
||||
}
|
||||
for droppedColumn := range this.migrationContext.DroppedColumnsMap {
|
||||
if strings.EqualFold(originalColumn, droppedColumn) {
|
||||
isSharedColumn = false
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, virtualColumn := range originalVirtualColumns.Names() {
|
||||
@ -765,9 +759,8 @@ func (this *Inspector) getMasterConnectionConfig() (applierConfig *mysql.Connect
|
||||
}
|
||||
|
||||
func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err error) {
|
||||
replicationLag, err = mysql.GetReplicationLag(
|
||||
replicationLag, err = mysql.GetReplicationLagFromSlaveStatus(
|
||||
this.informationSchemaDb,
|
||||
this.migrationContext.InspectorConnectionConfig,
|
||||
)
|
||||
return replicationLag, err
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ type Migrator struct {
|
||||
|
||||
rowCopyCompleteFlag int64
|
||||
// copyRowsQueue should not be buffered; if buffered some non-damaging but
|
||||
// excessive work happens at the end of the iteration as new copy-jobs arrive befroe realizing the copy is complete
|
||||
// excessive work happens at the end of the iteration as new copy-jobs arrive before realizing the copy is complete
|
||||
copyRowsQueue chan tableWriteFunc
|
||||
applyEventsQueue chan *applyEventStruct
|
||||
|
||||
@ -895,6 +895,8 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
|
||||
} else {
|
||||
progressPct = 100.0 * float64(totalRowsCopied) / float64(rowsEstimate)
|
||||
}
|
||||
// we take the opportunity to update migration context with progressPct
|
||||
this.migrationContext.SetProgressPct(progressPct)
|
||||
// Before status, let's see if we should print a nice reminder for what exactly we're doing here.
|
||||
shouldPrintMigrationStatusHint := (elapsedSeconds%600 == 0)
|
||||
if rule == ForcePrintStatusAndHintRule {
|
||||
@ -911,7 +913,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
|
||||
eta := "N/A"
|
||||
if progressPct >= 100.0 {
|
||||
eta = "due"
|
||||
} else if progressPct >= 1.0 {
|
||||
} else if progressPct >= 0.1 {
|
||||
elapsedRowCopySeconds := this.migrationContext.ElapsedRowCopyTime().Seconds()
|
||||
totalExpectedSeconds := elapsedRowCopySeconds * float64(rowsEstimate) / float64(totalRowsCopied)
|
||||
etaSeconds = totalExpectedSeconds - elapsedRowCopySeconds
|
||||
@ -958,12 +960,13 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
|
||||
|
||||
currentBinlogCoordinates := *this.eventsStreamer.GetCurrentBinlogCoordinates()
|
||||
|
||||
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; State: %s; ETA: %s",
|
||||
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, State: %s; ETA: %s",
|
||||
totalRowsCopied, rowsEstimate, progressPct,
|
||||
atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied),
|
||||
len(this.applyEventsQueue), cap(this.applyEventsQueue),
|
||||
base.PrettifyDurationOutput(elapsedTime), base.PrettifyDurationOutput(this.migrationContext.ElapsedRowCopyTime()),
|
||||
currentBinlogCoordinates,
|
||||
this.migrationContext.GetCurrentLagDuration().Seconds(),
|
||||
state,
|
||||
eta,
|
||||
)
|
||||
|
@ -144,7 +144,7 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
|
||||
switch command {
|
||||
case "help":
|
||||
{
|
||||
fmt.Fprintln(writer, `available commands:
|
||||
fmt.Fprint(writer, `available commands:
|
||||
status # Print a detailed status message
|
||||
sup # Print a short status message
|
||||
coordinates # Print the currently inspected coordinates
|
||||
@ -292,12 +292,22 @@ help # This message
|
||||
}
|
||||
case "throttle", "pause", "suspend":
|
||||
{
|
||||
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||
err := fmt.Errorf("User commanded 'throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
|
||||
fmt.Fprintf(writer, throttleHint)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
case "no-throttle", "unthrottle", "resume", "continue":
|
||||
{
|
||||
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||
err := fmt.Errorf("User commanded 'no-throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 0)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
@ -322,7 +332,16 @@ help # This message
|
||||
}
|
||||
case "panic":
|
||||
{
|
||||
err := fmt.Errorf("User commanded 'panic'. I will now panic, without cleanup. PANIC!")
|
||||
if arg == "" && this.migrationContext.ForceNamedPanicCommand {
|
||||
err := fmt.Errorf("User commanded 'panic' without specifying table name, but --force-named-panic is set")
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||
err := fmt.Errorf("User commanded 'panic' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
err := fmt.Errorf("User commanded 'panic'. The migration will be aborted without cleanup. Please drop the gh-ost tables before trying again.")
|
||||
this.migrationContext.PanicAbort <- err
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
|
@ -140,8 +140,8 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
|
||||
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
|
||||
// when running on replica, the heartbeat injection is also done on the replica.
|
||||
// This means we will always get a good heartbeat value.
|
||||
// When runnign on replica, we should instead check the `SHOW SLAVE STATUS` output.
|
||||
if lag, err := mysql.GetReplicationLag(this.inspector.informationSchemaDb, this.inspector.connectionConfig); err != nil {
|
||||
// When running on replica, we should instead check the `SHOW SLAVE STATUS` output.
|
||||
if lag, err := mysql.GetReplicationLagFromSlaveStatus(this.inspector.informationSchemaDb); err != nil {
|
||||
return log.Errore(err)
|
||||
} else {
|
||||
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
||||
|
@ -6,8 +6,18 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
)
|
||||
|
||||
const (
|
||||
TLS_CONFIG_KEY = "ghost"
|
||||
)
|
||||
|
||||
// ConnectionConfig is the minimal configuration required to connect to a MySQL server
|
||||
@ -16,6 +26,7 @@ type ConnectionConfig struct {
|
||||
User string
|
||||
Password string
|
||||
ImpliedKey *InstanceKey
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
func NewConnectionConfig() *ConnectionConfig {
|
||||
@ -29,9 +40,10 @@ func NewConnectionConfig() *ConnectionConfig {
|
||||
// DuplicateCredentials creates a new connection config with given key and with same credentials as this config
|
||||
func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionConfig {
|
||||
config := &ConnectionConfig{
|
||||
Key: key,
|
||||
User: this.User,
|
||||
Password: this.Password,
|
||||
Key: key,
|
||||
User: this.User,
|
||||
Password: this.Password,
|
||||
tlsConfig: this.tlsConfig,
|
||||
}
|
||||
config.ImpliedKey = &config.Key
|
||||
return config
|
||||
@ -42,13 +54,54 @@ func (this *ConnectionConfig) Duplicate() *ConnectionConfig {
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) String() string {
|
||||
return fmt.Sprintf("%s, user=%s", this.Key.DisplayString(), this.User)
|
||||
return fmt.Sprintf("%s, user=%s, usingTLS=%t", this.Key.DisplayString(), this.User, this.tlsConfig != nil)
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) Equals(other *ConnectionConfig) bool {
|
||||
return this.Key.Equals(&other.Key) || this.ImpliedKey.Equals(other.ImpliedKey)
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clientKey string, allowInsecure bool) error {
|
||||
var rootCertPool *x509.CertPool
|
||||
var certs []tls.Certificate
|
||||
var err error
|
||||
|
||||
if caCertificatePath == "" {
|
||||
rootCertPool, err = x509.SystemCertPool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
rootCertPool = x509.NewCertPool()
|
||||
pem, err := ioutil.ReadFile(caCertificatePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
|
||||
return errors.New("could not add ca certificate to cert pool")
|
||||
}
|
||||
}
|
||||
if clientCertificate != "" || clientKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(clientCertificate, clientKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
certs = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
this.tlsConfig = &tls.Config{
|
||||
Certificates: certs,
|
||||
RootCAs: rootCertPool,
|
||||
InsecureSkipVerify: allowInsecure,
|
||||
}
|
||||
|
||||
return mysql.RegisterTLSConfig(TLS_CONFIG_KEY, this.tlsConfig)
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) TLSConfig() *tls.Config {
|
||||
return this.tlsConfig
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) GetDBUri(databaseName string) string {
|
||||
hostname := this.Key.Hostname
|
||||
var ip = net.ParseIP(hostname)
|
||||
@ -57,5 +110,11 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string {
|
||||
hostname = fmt.Sprintf("[%s]", hostname)
|
||||
}
|
||||
interpolateParams := true
|
||||
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?interpolateParams=%t&autocommit=true&charset=utf8mb4,utf8,latin1", this.User, this.Password, hostname, this.Key.Port, databaseName, interpolateParams)
|
||||
// go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to
|
||||
// simplify construction of the DSN below.
|
||||
tlsOption := "false"
|
||||
if this.tlsConfig != nil {
|
||||
tlsOption = TLS_CONFIG_KEY
|
||||
}
|
||||
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?interpolateParams=%t&autocommit=true&charset=utf8mb4,utf8,latin1&tls=%s", this.User, this.Password, hostname, this.Key.Port, databaseName, interpolateParams, tlsOption)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"testing"
|
||||
|
||||
"github.com/outbrain/golib/log"
|
||||
@ -31,6 +32,10 @@ func TestDuplicateCredentials(t *testing.T) {
|
||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||
c.User = "gromit"
|
||||
c.Password = "penguin"
|
||||
c.tlsConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: "feathers",
|
||||
}
|
||||
|
||||
dup := c.DuplicateCredentials(InstanceKey{Hostname: "otherhost", Port: 3310})
|
||||
test.S(t).ExpectEquals(dup.Key.Hostname, "otherhost")
|
||||
@ -39,6 +44,7 @@ func TestDuplicateCredentials(t *testing.T) {
|
||||
test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3310)
|
||||
test.S(t).ExpectEquals(dup.User, "gromit")
|
||||
test.S(t).ExpectEquals(dup.Password, "penguin")
|
||||
test.S(t).ExpectEquals(dup.tlsConfig, c.tlsConfig)
|
||||
}
|
||||
|
||||
func TestDuplicate(t *testing.T) {
|
||||
@ -63,5 +69,16 @@ func TestGetDBUri(t *testing.T) {
|
||||
c.Password = "penguin"
|
||||
|
||||
uri := c.GetDBUri("test")
|
||||
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1")
|
||||
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=false")
|
||||
}
|
||||
|
||||
func TestGetDBUriWithTLSSetup(t *testing.T) {
|
||||
c := NewConnectionConfig()
|
||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||
c.User = "gromit"
|
||||
c.Password = "penguin"
|
||||
c.tlsConfig = &tls.Config{}
|
||||
|
||||
uri := c.GetDBUri("test")
|
||||
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=ghost")
|
||||
}
|
||||
|
@ -58,9 +58,8 @@ func GetDB(migrationUuid string, mysql_uri string) (*gosql.DB, bool, error) {
|
||||
return knownDBs[cacheKey], exists, nil
|
||||
}
|
||||
|
||||
// GetReplicationLag returns replication lag for a given connection config; either by explicit query
|
||||
// or via SHOW SLAVE STATUS
|
||||
func GetReplicationLag(informationSchemaDb *gosql.DB, connectionConfig *ConnectionConfig) (replicationLag time.Duration, err error) {
|
||||
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
|
||||
func GetReplicationLagFromSlaveStatus(informationSchemaDb *gosql.DB) (replicationLag time.Duration, err error) {
|
||||
err = sqlutils.QueryRowsMap(informationSchemaDb, `show slave status`, func(m sqlutils.RowMap) error {
|
||||
slaveIORunning := m.GetString("Slave_IO_Running")
|
||||
slaveSQLRunning := m.GetString("Slave_SQL_Running")
|
||||
@ -84,9 +83,6 @@ func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = sqlutils.QueryRowsMap(db, `show slave status`, func(rowMap sqlutils.RowMap) error {
|
||||
// We wish to recognize the case where the topology's master actually has replication configuration.
|
||||
// This can happen when a DBA issues a `RESET SLAVE` instead of `RESET SLAVE ALL`.
|
||||
@ -99,7 +95,6 @@ func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey
|
||||
slaveIORunning := rowMap.GetString("Slave_IO_Running")
|
||||
slaveSQLRunning := rowMap.GetString("Slave_SQL_Running")
|
||||
|
||||
//
|
||||
if slaveIORunning != "Yes" || slaveSQLRunning != "Yes" {
|
||||
return fmt.Errorf("Replication on %+v is broken: Slave_IO_Running: %s, Slave_SQL_Running: %s. Please make sure replication runs before using gh-ost.",
|
||||
connectionConfig.Key,
|
||||
|
@ -140,13 +140,12 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
|
||||
comparisons := []string{}
|
||||
|
||||
for i, column := range columns {
|
||||
//
|
||||
value := values[i]
|
||||
rangeComparison, err := BuildValueComparison(column, value, comparisonSign)
|
||||
if err != nil {
|
||||
return "", explodedArgs, err
|
||||
}
|
||||
if len(columns[0:i]) > 0 {
|
||||
if i > 0 {
|
||||
equalitiesComparison, err := BuildEqualsComparison(columns[0:i], values[0:i])
|
||||
if err != nil {
|
||||
return "", explodedArgs, err
|
||||
|
9
localtests/autoinc-zero-value/create.sql
Normal file
9
localtests/autoinc-zero-value/create.sql
Normal file
@ -0,0 +1,9 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
set session sql_mode='NO_AUTO_VALUE_ON_ZERO';
|
||||
insert into gh_ost_test values (0, 23);
|
20
localtests/bit-add/create.sql
Normal file
20
localtests/bit-add/create.sql
Normal file
@ -0,0 +1,20 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11);
|
||||
insert into gh_ost_test values (null, 13);
|
||||
end ;;
|
1
localtests/bit-add/extra_args
Normal file
1
localtests/bit-add/extra_args
Normal file
@ -0,0 +1 @@
|
||||
--alter="add column is_good bit null default 0"
|
1
localtests/bit-add/ghost_columns
Normal file
1
localtests/bit-add/ghost_columns
Normal file
@ -0,0 +1 @@
|
||||
id, i
|
1
localtests/bit-add/orig_columns
Normal file
1
localtests/bit-add/orig_columns
Normal file
@ -0,0 +1 @@
|
||||
id, i
|
24
localtests/bit-dml/create.sql
Normal file
24
localtests/bit-dml/create.sql
Normal file
@ -0,0 +1,24 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
is_good bit null default 0,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11, 0);
|
||||
insert into gh_ost_test values (null, 13, 1);
|
||||
insert into gh_ost_test values (null, 17, 1);
|
||||
|
||||
update gh_ost_test set is_good=0 where i=13 order by id desc limit 1;
|
||||
end ;;
|
1
localtests/bit-dml/extra_args
Normal file
1
localtests/bit-dml/extra_args
Normal file
@ -0,0 +1 @@
|
||||
--alter="modify column is_good bit not null default 0" --approve-renamed-columns
|
31
localtests/convert-utf8mb4/create.sql
Normal file
31
localtests/convert-utf8mb4/create.sql
Normal file
@ -0,0 +1,31 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
t varchar(128) charset utf8 collate utf8_general_ci,
|
||||
tl varchar(128) charset latin1 not null,
|
||||
ta varchar(128) charset ascii not null,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
insert into gh_ost_test values (null, 'átesting');
|
||||
|
||||
|
||||
insert into gh_ost_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial');
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, md5(rand()), 'átesting-a', 'a');
|
||||
insert into gh_ost_test values (null, 'novo proprietário', 'átesting-b', 'b');
|
||||
insert into gh_ost_test values (null, '2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm', 'átesting-c', 'c');
|
||||
insert into gh_ost_test values (null, 'usuário', 'átesting-x', 'x');
|
||||
|
||||
delete from gh_ost_test where ta='x' order by id desc limit 1;
|
||||
end ;;
|
1
localtests/convert-utf8mb4/extra_args
Normal file
1
localtests/convert-utf8mb4/extra_args
Normal file
@ -0,0 +1 @@
|
||||
--alter='convert to character set utf8mb4'
|
@ -17,7 +17,7 @@ create event gh_ost_test
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
disable on slave
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11, now(), now(), now(), 0);
|
||||
|
23
localtests/decimal/create.sql
Normal file
23
localtests/decimal/create.sql
Normal file
@ -0,0 +1,23 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
dec0 decimal(65,30) unsigned NOT NULL DEFAULT '0.000000000000000000000000000000',
|
||||
dec1 decimal(65,30) unsigned NOT NULL DEFAULT '1.000000000000000000000000000000',
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 0.0, 0.0);
|
||||
insert into gh_ost_test values (null, 2.0, 4.0);
|
||||
insert into gh_ost_test values (null, 99999999999999999999999999999999999.000, 6.0);
|
||||
update gh_ost_test set dec1=4.5 where dec2=4.0 order by id desc limit 1;
|
||||
end ;;
|
@ -1,8 +1,8 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id bigint,
|
||||
id bigint not null,
|
||||
i int not null,
|
||||
ts timestamp(6),
|
||||
ts timestamp(6) not null,
|
||||
unique key id_uidx(id),
|
||||
unique key its_uidx(i, ts)
|
||||
) ;
|
||||
|
@ -50,6 +50,9 @@ verify_master_and_replica() {
|
||||
original_sql_mode="$(gh-ost-test-mysql-master -e "select @@global.sql_mode" -s -s)"
|
||||
echo "sql_mode on master is ${original_sql_mode}"
|
||||
|
||||
echo "Gracefully sleeping for 3 seconds while replica is setting up..."
|
||||
sleep 3
|
||||
|
||||
if [ "$(gh-ost-test-mysql-replica -e "select 1" -ss)" != "1" ] ; then
|
||||
echo "Cannot verify gh-ost-test-mysql-replica"
|
||||
exit 1
|
||||
@ -152,7 +155,7 @@ test_single() {
|
||||
--serve-socket-file=/tmp/gh-ost.test.sock \
|
||||
--initially-drop-socket-file \
|
||||
--test-on-replica \
|
||||
--default-retries=1 \
|
||||
--default-retries=3 \
|
||||
--chunk-size=10 \
|
||||
--verbose \
|
||||
--debug \
|
||||
@ -213,6 +216,7 @@ test_single() {
|
||||
diff $orig_content_output_file $ghost_content_output_file
|
||||
|
||||
echo "diff $orig_content_output_file $ghost_content_output_file"
|
||||
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ set -e
|
||||
|
||||
# Make sure we have the version of Go we want to depend on, either from the
|
||||
# system or one we grab ourselves.
|
||||
# If executing from within Dockerfile then this assumption is inherently true, since we use a `golang` docker image.
|
||||
. script/ensure-go-installed
|
||||
|
||||
# Since we want to be able to build this outside of GOPATH, we set it
|
||||
|
35
script/build-deploy-tarball
Executable file
35
script/build-deploy-tarball
Executable file
@ -0,0 +1,35 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
script/build
|
||||
|
||||
# Get a fresh directory and make sure to delete it afterwards
|
||||
build_dir=tmp/build
|
||||
rm -rf $build_dir
|
||||
mkdir -p $build_dir
|
||||
trap "rm -rf $build_dir" EXIT
|
||||
|
||||
commit_sha=$(git rev-parse HEAD)
|
||||
|
||||
if [ $(uname -s) = "Darwin" ]; then
|
||||
build_arch="$(uname -sr | tr -d ' ' | tr '[:upper:]' '[:lower:]')-$(uname -m)"
|
||||
else
|
||||
build_arch="$(lsb_release -sc | tr -d ' ' | tr '[:upper:]' '[:lower:]')-$(uname -m)"
|
||||
fi
|
||||
|
||||
tarball=$build_dir/${commit_sha}-${build_arch}.tar
|
||||
|
||||
# Create the tarball
|
||||
tar cvf $tarball --mode="ugo=rx" bin/
|
||||
|
||||
# Compress it and copy it to the directory for the CI to upload it
|
||||
gzip $tarball
|
||||
mkdir -p "$BUILD_ARTIFACT_DIR"/gh-ost
|
||||
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR"/gh-ost/
|
||||
|
||||
### HACK HACK HACK ###
|
||||
# blame @carlosmn and @mattr-
|
||||
# Allow builds on stretch to also be used for jessie
|
||||
jessie_tarball_name=$(echo $(basename "${tarball}") | sed s/-stretch-/-jessie-/)
|
||||
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${jessie_tarball_name}.gz"
|
@ -1,17 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
. script/bootstrap
|
||||
|
||||
echo "Verifying code is formatted via 'gofmt -s -w go/'"
|
||||
gofmt -s -w go/
|
||||
git diff --exit-code --quiet
|
||||
|
||||
echo "Building"
|
||||
script/build
|
||||
|
||||
cd .gopath/src/github.com/github/gh-ost
|
||||
|
||||
echo "Running unit tests"
|
||||
go test ./go/...
|
||||
script/test
|
||||
|
@ -1,37 +1,47 @@
|
||||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
output_fold() {
|
||||
# Exit early if no label provided
|
||||
if [ -z "$1" ]; then
|
||||
echo "output_fold(): requires a label argument."
|
||||
return
|
||||
fi
|
||||
|
||||
script/cibuild
|
||||
exit_value=0 # exit_value is used to record exit status of the given command
|
||||
label=$1 # human-readable label describing what's being folded up
|
||||
shift 1 # having retrieved the output_fold()-specific arguments, strip them off $@
|
||||
|
||||
# Get a fresh directory and make sure to delete it afterwards
|
||||
build_dir=tmp/build
|
||||
rm -rf $build_dir
|
||||
mkdir -p $build_dir
|
||||
trap "rm -rf $build_dir" EXIT
|
||||
# Only echo the tags when in CI_MODE
|
||||
if [ "$CI_MODE" ]; then
|
||||
echo "%%%FOLD {$label}%%%"
|
||||
fi
|
||||
|
||||
commit_sha=$(git rev-parse HEAD)
|
||||
# run the remaining arguments. If the command exits non-0, the `||` will
|
||||
# prevent the `-e` flag from seeing the failure exit code, and we'll see
|
||||
# the second echo execute
|
||||
"$@" || exit_value=$?
|
||||
|
||||
if [ $(uname -s) = "Darwin" ]; then
|
||||
build_arch="$(uname -sr | tr -d ' ' | tr '[:upper:]' '[:lower:]')-$(uname -m)"
|
||||
else
|
||||
build_arch="$(lsb_release -sc | tr -d ' ' | tr '[:upper:]' '[:lower:]')-$(uname -m)"
|
||||
fi
|
||||
# Only echo the tags when in CI_MODE
|
||||
if [ "$CI_MODE" ]; then
|
||||
echo "%%%END FOLD%%%"
|
||||
fi
|
||||
|
||||
tarball=$build_dir/${commit_sha}-${build_arch}.tar
|
||||
# preserve the exit code from the subcommand.
|
||||
return $exit_value
|
||||
}
|
||||
|
||||
# Create the tarball
|
||||
tar cvf $tarball --mode="ugo=rx" bin/
|
||||
function cleanup() {
|
||||
echo
|
||||
echo "%%%FOLD {Shutting down services...}%%%"
|
||||
docker-compose down
|
||||
echo "%%%END FOLD%%%"
|
||||
}
|
||||
|
||||
# Compress it and copy it to the directory for the CI to upload it
|
||||
gzip $tarball
|
||||
mkdir -p "$BUILD_ARTIFACT_DIR"/gh-ost
|
||||
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR"/gh-ost/
|
||||
trap cleanup EXIT
|
||||
|
||||
### HACK HACK HACK ###
|
||||
# Blame @carlosmn. In the good way.
|
||||
# We don't have any jessie machines for building, but a pure-Go binary depends
|
||||
# on a version of libc and ld which are widely available, so we can copy the
|
||||
# tarball over with jessie in its name so we can deploy it on jessie machines.
|
||||
jessie_tarball_name=$(echo $(basename "${tarball}") | sed s/-precise-/-jessie-/)
|
||||
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${jessie_tarball_name}.gz"
|
||||
export CI_MODE=true
|
||||
|
||||
output_fold "Bootstrapping container..." docker-compose build
|
||||
output_fold "Running tests..." docker-compose run --rm app
|
||||
|
||||
docker-compose run -e BUILD_ARTIFACT_DIR=$BUILD_ARTIFACT_DIR -v $BUILD_ARTIFACT_DIR:$BUILD_ARTIFACT_DIR app script/build-deploy-tarball
|
||||
|
@ -50,7 +50,8 @@ test_mysql_version() {
|
||||
|
||||
export PATH="${PWD}/gh-ost-ci-env/bin/:${PATH}"
|
||||
|
||||
gh-ost-test-mysql-master -uroot -e "grant all on *.* to 'gh-ost'@'%' identified by 'gh-ost'"
|
||||
gh-ost-test-mysql-master -uroot -e "create user 'gh-ost'@'%' identified by 'gh-ost'"
|
||||
gh-ost-test-mysql-master -uroot -e "grant all on *.* to 'gh-ost'@'%'"
|
||||
|
||||
echo "### Running gh-ost tests for $mysql_version"
|
||||
./localtests/test.sh -b bin/gh-ost
|
||||
@ -61,6 +62,9 @@ test_mysql_version() {
|
||||
echo "Building..."
|
||||
. script/build
|
||||
# Test all versions:
|
||||
find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.gz" | while read f ; do basename $f ".tar.gz" ; done | sort -r | while read mysql_version ; do
|
||||
echo "found MySQL version: $mysql_version"
|
||||
done
|
||||
find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.gz" | while read f ; do basename $f ".tar.gz" ; done | sort -r | while read mysql_version ; do
|
||||
test_mysql_version "$mysql_version"
|
||||
done
|
||||
|
25
script/dock
Executable file
25
script/dock
Executable file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Usage:
|
||||
# dock <test|packages> [arg]
|
||||
# dock test: build gh-ost & run unit and integration tests
|
||||
# docker pkg [target-path]: build gh-ost release packages and copy to target path (default path: /tmp/gh-ost-release)
|
||||
|
||||
command="$1"
|
||||
|
||||
case "$command" in
|
||||
"test")
|
||||
docker_target="gh-ost-test"
|
||||
docker build . -f Dockerfile.test -t "${docker_target}" && docker run --rm -it "${docker_target}:latest"
|
||||
;;
|
||||
"pkg")
|
||||
packages_path="${2:-/tmp/gh-ost-release}"
|
||||
docker_target="gh-ost-packaging"
|
||||
docker build . -f Dockerfile.packaging -t "${docker_target}" && docker run --rm -it -v "${packages_path}:/tmp/pkg" "${docker_target}:latest" bash -c 'find /tmp/gh-ost-release/ -maxdepth 1 -type f | xargs cp -t /tmp/pkg'
|
||||
echo "packages generated on ${packages_path}:"
|
||||
ls -l "${packages_path}"
|
||||
;;
|
||||
*)
|
||||
>&2 echo "Usage: dock dock <test|alpine|packages> [arg]"
|
||||
exit 1
|
||||
esac
|
@ -1,20 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
PREFERRED_GO_VERSION=go1.9.2
|
||||
SUPPORTED_GO_VERSIONS='go1.[89]'
|
||||
PREFERRED_GO_VERSION=go1.12.6
|
||||
SUPPORTED_GO_VERSIONS='go1.1[234]'
|
||||
|
||||
GO_PKG_DARWIN=${PREFERRED_GO_VERSION}.darwin-amd64.pkg
|
||||
GO_PKG_DARWIN_SHA=73fd5840d55f5566d8db6c0ffdd187577e8ebe650c783f68bd27cbf95bde6743
|
||||
GO_PKG_DARWIN_SHA=ea78245e43de2996fa0973033064b33f48820cfe39f4f3c6e953040925cc5815
|
||||
|
||||
GO_PKG_LINUX=${PREFERRED_GO_VERSION}.linux-amd64.tar.gz
|
||||
GO_PKG_LINUX_SHA=de874549d9a8d8d8062be05808509c09a88a248e77ec14eb77453530829ac02b
|
||||
GO_PKG_LINUX_SHA=dbcf71a3c1ea53b8d54ef1b48c85a39a6c9a935d01fc8291ff2b92028e59913c
|
||||
|
||||
export ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
cd $ROOTDIR
|
||||
|
||||
# If Go isn't installed globally, setup environment variables for local install.
|
||||
if [ -z "$(which go)" ] || [ -z "$(go version | grep "$SUPPORTED_GO_VERSIONS")" ]; then
|
||||
GODIR="$ROOTDIR/.vendor/go19"
|
||||
GODIR="$ROOTDIR/.vendor/golocal"
|
||||
|
||||
if [ $(uname -s) = "Darwin" ]; then
|
||||
export GOROOT="$GODIR/usr/local/go"
|
||||
@ -32,12 +32,12 @@ if [ -z "$(which go)" ] || [ -z "$(go version | grep "$SUPPORTED_GO_VERSIONS")"
|
||||
cd "$GODIR";
|
||||
|
||||
if [ $(uname -s) = "Darwin" ]; then
|
||||
curl -L -O https://storage.googleapis.com/golang/$GO_PKG_DARWIN
|
||||
curl -L -O https://dl.google.com/go/$GO_PKG_DARWIN
|
||||
shasum -a256 $GO_PKG_DARWIN | grep $GO_PKG_DARWIN_SHA
|
||||
xar -xf $GO_PKG_DARWIN
|
||||
cpio -i < com.googlecode.go.pkg/Payload
|
||||
else
|
||||
curl -L -O https://storage.googleapis.com/golang/$GO_PKG_LINUX
|
||||
curl -L -O https://dl.google.com/go/$GO_PKG_LINUX
|
||||
shasum -a256 $GO_PKG_LINUX | grep $GO_PKG_LINUX_SHA
|
||||
tar xf $GO_PKG_LINUX
|
||||
fi
|
||||
|
17
script/test
Executable file
17
script/test
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
. script/bootstrap
|
||||
|
||||
echo "Verifying code is formatted via 'gofmt -s -w go/'"
|
||||
gofmt -s -w go/
|
||||
git diff --exit-code --quiet
|
||||
|
||||
echo "Building"
|
||||
script/build
|
||||
|
||||
cd .gopath/src/github.com/github/gh-ost
|
||||
|
||||
echo "Running unit tests"
|
||||
go test ./go/...
|
21
vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
21
vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
### Issue description
|
||||
Tell us what should happen and what happens instead
|
||||
|
||||
### Example code
|
||||
```go
|
||||
If possible, please enter some example code here to reproduce the issue.
|
||||
```
|
||||
|
||||
### Error log
|
||||
```
|
||||
If you have an error log, please paste it here.
|
||||
```
|
||||
|
||||
### Configuration
|
||||
*Driver version (or git SHA):*
|
||||
|
||||
*Go version:* run `go version` in your console
|
||||
|
||||
*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
|
||||
|
||||
*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10
|
9
vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
9
vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
### Description
|
||||
Please explain the changes you made here.
|
||||
|
||||
### Checklist
|
||||
- [ ] Code compiles correctly
|
||||
- [ ] Created tests which fail without the change (if possible)
|
||||
- [ ] All tests passing
|
||||
- [ ] Extended the README / documentation, if necessary
|
||||
- [ ] Added myself / the copyright holder to the AUTHORS file
|
1
vendor/github.com/go-sql-driver/mysql/.gitignore
generated
vendored
1
vendor/github.com/go-sql-driver/mysql/.gitignore
generated
vendored
@ -6,3 +6,4 @@
|
||||
Icon?
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
.idea
|
||||
|
126
vendor/github.com/go-sql-driver/mysql/.travis.yml
generated
vendored
126
vendor/github.com/go-sql-driver/mysql/.travis.yml
generated
vendored
@ -1,10 +1,128 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- tip
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- master
|
||||
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
before_script:
|
||||
- echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
|
||||
- sudo service mysql restart
|
||||
- .travis/wait_mysql.sh
|
||||
- mysql -e 'create database gotest;'
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- env: DB=MYSQL8
|
||||
sudo: required
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
- docker pull mysql:8.0
|
||||
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
|
||||
mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
|
||||
- cp .travis/docker.cnf ~/.my.cnf
|
||||
- .travis/wait_mysql.sh
|
||||
before_script:
|
||||
- export MYSQL_TEST_USER=gotest
|
||||
- export MYSQL_TEST_PASS=secret
|
||||
- export MYSQL_TEST_ADDR=127.0.0.1:3307
|
||||
- export MYSQL_TEST_CONCURRENT=1
|
||||
|
||||
- env: DB=MYSQL57
|
||||
sudo: required
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
- docker pull mysql:5.7
|
||||
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
|
||||
mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
|
||||
- cp .travis/docker.cnf ~/.my.cnf
|
||||
- .travis/wait_mysql.sh
|
||||
before_script:
|
||||
- export MYSQL_TEST_USER=gotest
|
||||
- export MYSQL_TEST_PASS=secret
|
||||
- export MYSQL_TEST_ADDR=127.0.0.1:3307
|
||||
- export MYSQL_TEST_CONCURRENT=1
|
||||
|
||||
- env: DB=MARIA55
|
||||
sudo: required
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
- docker pull mariadb:5.5
|
||||
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
|
||||
mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
|
||||
- cp .travis/docker.cnf ~/.my.cnf
|
||||
- .travis/wait_mysql.sh
|
||||
before_script:
|
||||
- export MYSQL_TEST_USER=gotest
|
||||
- export MYSQL_TEST_PASS=secret
|
||||
- export MYSQL_TEST_ADDR=127.0.0.1:3307
|
||||
- export MYSQL_TEST_CONCURRENT=1
|
||||
|
||||
- env: DB=MARIA10_1
|
||||
sudo: required
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
- docker pull mariadb:10.1
|
||||
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
|
||||
mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
|
||||
- cp .travis/docker.cnf ~/.my.cnf
|
||||
- .travis/wait_mysql.sh
|
||||
before_script:
|
||||
- export MYSQL_TEST_USER=gotest
|
||||
- export MYSQL_TEST_PASS=secret
|
||||
- export MYSQL_TEST_ADDR=127.0.0.1:3307
|
||||
- export MYSQL_TEST_CONCURRENT=1
|
||||
|
||||
- os: osx
|
||||
osx_image: xcode10.1
|
||||
addons:
|
||||
homebrew:
|
||||
packages:
|
||||
- mysql
|
||||
go: 1.12.x
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
before_script:
|
||||
- echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB\nlocal_infile=1" >> /usr/local/etc/my.cnf
|
||||
- mysql.server start
|
||||
- mysql -uroot -e 'CREATE USER gotest IDENTIFIED BY "secret"'
|
||||
- mysql -uroot -e 'GRANT ALL ON *.* TO gotest'
|
||||
- mysql -uroot -e 'create database gotest;'
|
||||
- export MYSQL_TEST_USER=gotest
|
||||
- export MYSQL_TEST_PASS=secret
|
||||
- export MYSQL_TEST_ADDR=127.0.0.1:3306
|
||||
- export MYSQL_TEST_CONCURRENT=1
|
||||
|
||||
script:
|
||||
- go test -v -covermode=count -coverprofile=coverage.out
|
||||
- go vet ./...
|
||||
- .travis/gofmt.sh
|
||||
after_script:
|
||||
- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
|
||||
|
5
vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
generated
vendored
Normal file
5
vendor/github.com/go-sql-driver/mysql/.travis/docker.cnf
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
[client]
|
||||
user = gotest
|
||||
password = secret
|
||||
host = 127.0.0.1
|
||||
port = 3307
|
7
vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
generated
vendored
Executable file
7
vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh
generated
vendored
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
set -ev
|
||||
|
||||
# Only check for go1.10+ since the gofmt style changed
|
||||
if [[ $(go version) =~ go1\.([0-9]+) ]] && ((${BASH_REMATCH[1]} >= 10)); then
|
||||
test -z "$(gofmt -d -s . | tee /dev/stderr)"
|
||||
fi
|
8
vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
generated
vendored
Executable file
8
vendor/github.com/go-sql-driver/mysql/.travis/wait_mysql.sh
generated
vendored
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
while :
|
||||
do
|
||||
if mysql -e 'select version()' 2>&1 | grep 'version()\|ERROR 2059 (HY000):'; then
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
57
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
57
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
@ -12,35 +12,90 @@
|
||||
# Individual Persons
|
||||
|
||||
Aaron Hopkins <go-sql-driver at die.net>
|
||||
Achille Roussel <achille.roussel at gmail.com>
|
||||
Alexey Palazhchenko <alexey.palazhchenko at gmail.com>
|
||||
Andrew Reid <andrew.reid at tixtrack.com>
|
||||
Arne Hormann <arnehormann at gmail.com>
|
||||
Asta Xie <xiemengjun at gmail.com>
|
||||
Bulat Gaifullin <gaifullinbf at gmail.com>
|
||||
Carlos Nieto <jose.carlos at menteslibres.net>
|
||||
Chris Moos <chris at tech9computers.com>
|
||||
Craig Wilson <craiggwilson at gmail.com>
|
||||
Daniel Montoya <dsmontoyam at gmail.com>
|
||||
Daniel Nichter <nil at codenode.com>
|
||||
Daniël van Eeden <git at myname.nl>
|
||||
Dave Protasowski <dprotaso at gmail.com>
|
||||
DisposaBoy <disposaboy at dby.me>
|
||||
Egor Smolyakov <egorsmkv at gmail.com>
|
||||
Erwan Martin <hello at erwan.io>
|
||||
Evan Shaw <evan at vendhq.com>
|
||||
Frederick Mayle <frederickmayle at gmail.com>
|
||||
Gustavo Kristic <gkristic at gmail.com>
|
||||
Hajime Nakagami <nakagami at gmail.com>
|
||||
Hanno Braun <mail at hannobraun.com>
|
||||
Henri Yandell <flamefew at gmail.com>
|
||||
Hirotaka Yamamoto <ymmt2005 at gmail.com>
|
||||
Huyiguang <hyg at webterren.com>
|
||||
ICHINOSE Shogo <shogo82148 at gmail.com>
|
||||
Ilia Cimpoes <ichimpoesh at gmail.com>
|
||||
INADA Naoki <songofacandy at gmail.com>
|
||||
Jacek Szwec <szwec.jacek at gmail.com>
|
||||
James Harr <james.harr at gmail.com>
|
||||
Jeff Hodges <jeff at somethingsimilar.com>
|
||||
Jeffrey Charles <jeffreycharles at gmail.com>
|
||||
Jerome Meyer <jxmeyer at gmail.com>
|
||||
Jian Zhen <zhenjl at gmail.com>
|
||||
Joshua Prunier <joshua.prunier at gmail.com>
|
||||
Julien Lefevre <julien.lefevr at gmail.com>
|
||||
Julien Schmidt <go-sql-driver at julienschmidt.com>
|
||||
Justin Li <jli at j-li.net>
|
||||
Justin Nuß <nuss.justin at gmail.com>
|
||||
Kamil Dziedzic <kamil at klecza.pl>
|
||||
Kevin Malachowski <kevin at chowski.com>
|
||||
Kieron Woodhouse <kieron.woodhouse at infosum.com>
|
||||
Lennart Rudolph <lrudolph at hmc.edu>
|
||||
Leonardo YongUk Kim <dalinaum at gmail.com>
|
||||
Linh Tran Tuan <linhduonggnu at gmail.com>
|
||||
Lion Yang <lion at aosc.xyz>
|
||||
Luca Looz <luca.looz92 at gmail.com>
|
||||
Lucas Liu <extrafliu at gmail.com>
|
||||
Luke Scott <luke at webconnex.com>
|
||||
Maciej Zimnoch <maciej.zimnoch at codilime.com>
|
||||
Michael Woolnough <michael.woolnough at gmail.com>
|
||||
Nicola Peduzzi <thenikso at gmail.com>
|
||||
Olivier Mengué <dolmen at cpan.org>
|
||||
oscarzhao <oscarzhaosl at gmail.com>
|
||||
Paul Bonser <misterpib at gmail.com>
|
||||
Peter Schultz <peter.schultz at classmarkets.com>
|
||||
Rebecca Chin <rchin at pivotal.io>
|
||||
Reed Allman <rdallman10 at gmail.com>
|
||||
Richard Wilkes <wilkes at me.com>
|
||||
Robert Russell <robert at rrbrussell.com>
|
||||
Runrioter Wung <runrioter at gmail.com>
|
||||
Shuode Li <elemount at qq.com>
|
||||
Simon J Mudd <sjmudd at pobox.com>
|
||||
Soroush Pour <me at soroushjp.com>
|
||||
Stan Putrya <root.vagner at gmail.com>
|
||||
Stanley Gunawan <gunawan.stanley at gmail.com>
|
||||
Steven Hartland <steven.hartland at multiplay.co.uk>
|
||||
Thomas Wodarek <wodarekwebpage at gmail.com>
|
||||
Tim Ruffles <timruffles at gmail.com>
|
||||
Tom Jenkinson <tom at tjenkinson.me>
|
||||
Xiangyu Hu <xiangyu.hu at outlook.com>
|
||||
Xiaobing Jiang <s7v7nislands at gmail.com>
|
||||
Xiuming Chen <cc at cxm.cc>
|
||||
Julien Lefevre <julien.lefevr at gmail.com>
|
||||
Zhenye Xie <xiezhenye at gmail.com>
|
||||
|
||||
# Organizations
|
||||
|
||||
Barracuda Networks, Inc.
|
||||
Counting Ltd.
|
||||
Facebook Inc.
|
||||
GitHub Inc.
|
||||
Google Inc.
|
||||
InfoSum Ltd.
|
||||
Keybase Inc.
|
||||
Multiplay Ltd.
|
||||
Percona LLC
|
||||
Pivotal Inc.
|
||||
Stripe Inc.
|
||||
|
89
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
89
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
@ -1,21 +1,96 @@
|
||||
## HEAD
|
||||
## Version 1.4 (2018-06-03)
|
||||
|
||||
Changes:
|
||||
|
||||
- Documentation fixes (#530, #535, #567)
|
||||
- Refactoring (#575, #579, #580, #581, #603, #615, #704)
|
||||
- Cache column names (#444)
|
||||
- Sort the DSN parameters in DSNs generated from a config (#637)
|
||||
- Allow native password authentication by default (#644)
|
||||
- Use the default port if it is missing in the DSN (#668)
|
||||
- Removed the `strict` mode (#676)
|
||||
- Do not query `max_allowed_packet` by default (#680)
|
||||
- Dropped support Go 1.6 and lower (#696)
|
||||
- Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
|
||||
- Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
|
||||
- Improved the compatibility of the authentication system (#807)
|
||||
|
||||
New Features:
|
||||
|
||||
- Multi-Results support (#537)
|
||||
- `rejectReadOnly` DSN option (#604)
|
||||
- `context.Context` support (#608, #612, #627, #761)
|
||||
- Transaction isolation level support (#619, #744)
|
||||
- Read-Only transactions support (#618, #634)
|
||||
- `NewConfig` function which initializes a config with default values (#679)
|
||||
- Implemented the `ColumnType` interfaces (#667, #724)
|
||||
- Support for custom string types in `ConvertValue` (#623)
|
||||
- Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
|
||||
- `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
|
||||
- Implemented `driver.SessionResetter` (#779)
|
||||
- `sha256_password` authentication plugin support (#808)
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
|
||||
- Fixed LOAD LOCAL DATA INFILE for empty files (#590)
|
||||
- Removed columns definition cache since it sometimes cached invalid data (#592)
|
||||
- Don't mutate registered TLS configs (#600)
|
||||
- Make RegisterTLSConfig concurrency-safe (#613)
|
||||
- Handle missing auth data in the handshake packet correctly (#646)
|
||||
- Do not retry queries when data was written to avoid data corruption (#302, #736)
|
||||
- Cache the connection pointer for error handling before invalidating it (#678)
|
||||
- Fixed imports for appengine/cloudsql (#700)
|
||||
- Fix sending STMT_LONG_DATA for 0 byte data (#734)
|
||||
- Set correct capacity for []bytes read from length-encoded strings (#766)
|
||||
- Make RegisterDial concurrency-safe (#773)
|
||||
|
||||
|
||||
## Version 1.3 (2016-12-01)
|
||||
|
||||
Changes:
|
||||
|
||||
- Go 1.1 is no longer supported
|
||||
- Use decimals field from MySQL to format time types (#249)
|
||||
- Use decimals fields in MySQL to format time types (#249)
|
||||
- Buffer optimizations (#269)
|
||||
- TLS ServerName defaults to the host (#283)
|
||||
- Refactoring (#400, #410, #437)
|
||||
- Adjusted documentation for second generation CloudSQL (#485)
|
||||
- Documented DSN system var quoting rules (#502)
|
||||
- Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
|
||||
|
||||
New Features:
|
||||
|
||||
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
|
||||
- Support for returning table alias on Columns() (#289, #359, #382)
|
||||
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
|
||||
- Support for uint64 parameters with high bit set (#332, #345)
|
||||
- Cleartext authentication plugin support (#327)
|
||||
- Exported ParseDSN function and the Config struct (#403, #419, #429)
|
||||
- Read / Write timeouts (#401)
|
||||
- Support for JSON field type (#414)
|
||||
- Support for multi-statements and multi-results (#411, #431)
|
||||
- DSN parameter to set the driver-side max_allowed_packet value manually (#489)
|
||||
- Native password authentication plugin support (#494, #524)
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
|
||||
- Fixed handling of queries without columns and rows (#255)
|
||||
- Fixed a panic when SetKeepAlive() failed (#298)
|
||||
|
||||
New Features:
|
||||
- Support for returning table alias on Columns() (#289)
|
||||
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318)
|
||||
- Handle ERR packets while reading rows (#321)
|
||||
- Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
|
||||
- Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
|
||||
- Actually zero out bytes in handshake response (#378)
|
||||
- Fixed race condition in registering LOAD DATA INFILE handler (#383)
|
||||
- Fixed tests with MySQL 5.7.9+ (#380)
|
||||
- QueryUnescape TLS config names (#397)
|
||||
- Fixed "broken pipe" error by writing to closed socket (#390)
|
||||
- Fixed LOAD LOCAL DATA INFILE buffering (#424)
|
||||
- Fixed parsing of floats into float64 when placeholders are used (#434)
|
||||
- Fixed DSN tests with Go 1.7+ (#459)
|
||||
- Handle ERR packets while waiting for EOF (#473)
|
||||
- Invalidate connection on error while discarding additional results (#513)
|
||||
- Allow terminating packets of length 0 (#516)
|
||||
|
||||
|
||||
## Version 1.2 (2014-06-03)
|
||||
|
17
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
generated
vendored
17
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
generated
vendored
@ -4,28 +4,11 @@
|
||||
|
||||
Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
|
||||
|
||||
Please provide the following minimum information:
|
||||
* Your Go-MySQL-Driver version (or git SHA)
|
||||
* Your Go version (run `go version` in your console)
|
||||
* A detailed issue description
|
||||
* Error Log if present
|
||||
* If possible, a short example
|
||||
|
||||
|
||||
## Contributing Code
|
||||
|
||||
By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
|
||||
Don't forget to add yourself to the AUTHORS file.
|
||||
|
||||
### Pull Requests Checklist
|
||||
|
||||
Please check the following points before submitting your pull request:
|
||||
- [x] Code compiles correctly
|
||||
- [x] Created tests, if possible
|
||||
- [x] All tests pass
|
||||
- [x] Extended the README / documentation, if necessary
|
||||
- [x] Added yourself to the AUTHORS file
|
||||
|
||||
### Code Review
|
||||
|
||||
Everyone is invited to review and comment on pull requests.
|
||||
|
197
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
197
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
@ -1,13 +1,9 @@
|
||||
# Go-MySQL-Driver
|
||||
|
||||
A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
|
||||
A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
|
||||
|
||||
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
|
||||
|
||||
**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases)
|
||||
|
||||
[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql)
|
||||
|
||||
---------------------------------------
|
||||
* [Features](#features)
|
||||
* [Requirements](#requirements)
|
||||
@ -19,6 +15,9 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
|
||||
* [Address](#address)
|
||||
* [Parameters](#parameters)
|
||||
* [Examples](#examples)
|
||||
* [Connection pool and timeouts](#connection-pool-and-timeouts)
|
||||
* [context.Context Support](#contextcontext-support)
|
||||
* [ColumnType Support](#columntype-support)
|
||||
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
|
||||
* [time.Time support](#timetime-support)
|
||||
* [Unicode support](#unicode-support)
|
||||
@ -30,31 +29,31 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
|
||||
## Features
|
||||
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
|
||||
* Native Go implementation. No C-bindings, just pure Go
|
||||
* Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
|
||||
* Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
|
||||
* Automatic handling of broken connections
|
||||
* Automatic Connection Pooling *(by database/sql package)*
|
||||
* Supports queries larger than 16MB
|
||||
* Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
|
||||
* Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
|
||||
* Intelligent `LONG DATA` handling in prepared statements
|
||||
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
|
||||
* Optional `time.Time` parsing
|
||||
* Optional placeholder interpolation
|
||||
|
||||
## Requirements
|
||||
* Go 1.2 or higher
|
||||
* Go 1.9 or higher. We aim to support the 3 latest versions of Go.
|
||||
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
|
||||
|
||||
---------------------------------------
|
||||
|
||||
## Installation
|
||||
Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
|
||||
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
|
||||
```bash
|
||||
$ go get github.com/go-sql-driver/mysql
|
||||
$ go get -u github.com/go-sql-driver/mysql
|
||||
```
|
||||
Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
|
||||
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
|
||||
|
||||
## Usage
|
||||
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
|
||||
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
|
||||
|
||||
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
|
||||
```go
|
||||
@ -93,17 +92,20 @@ This has the same effect as an empty DSN string:
|
||||
|
||||
```
|
||||
|
||||
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
|
||||
|
||||
#### Password
|
||||
Passwords can consist of any character. Escaping is **not** necessary.
|
||||
|
||||
#### Protocol
|
||||
See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
|
||||
See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
|
||||
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
|
||||
|
||||
#### Address
|
||||
For TCP and UDP networks, addresses have the form `host:port`.
|
||||
For TCP and UDP networks, addresses have the form `host[:port]`.
|
||||
If `port` is omitted, the default port will be used.
|
||||
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
|
||||
The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
|
||||
The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
|
||||
|
||||
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
|
||||
|
||||
@ -133,6 +135,15 @@ Default: false
|
||||
|
||||
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
|
||||
|
||||
##### `allowNativePasswords`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: true
|
||||
```
|
||||
`allowNativePasswords=false` disallows the usage of MySQL native password method.
|
||||
|
||||
##### `allowOldPasswords`
|
||||
|
||||
```
|
||||
@ -160,13 +171,18 @@ Unless you need the fallback behavior, please use `collation` instead.
|
||||
```
|
||||
Type: string
|
||||
Valid Values: <name>
|
||||
Default: utf8_general_ci
|
||||
Default: utf8mb4_general_ci
|
||||
```
|
||||
|
||||
Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
|
||||
|
||||
A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
|
||||
|
||||
The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
|
||||
|
||||
Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
|
||||
|
||||
|
||||
##### `clientFoundRows`
|
||||
|
||||
```
|
||||
@ -213,12 +229,31 @@ Valid Values: <escaped name>
|
||||
Default: UTC
|
||||
```
|
||||
|
||||
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
|
||||
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
|
||||
|
||||
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
|
||||
|
||||
Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
|
||||
Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
|
||||
|
||||
##### `maxAllowedPacket`
|
||||
```
|
||||
Type: decimal number
|
||||
Default: 4194304
|
||||
```
|
||||
|
||||
Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
|
||||
|
||||
##### `multiStatements`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
|
||||
|
||||
When `multiStatements` is used, `?` parameters must only be used in the first statement.
|
||||
|
||||
##### `parseTime`
|
||||
|
||||
@ -229,9 +264,19 @@ Default: false
|
||||
```
|
||||
|
||||
`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
|
||||
The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
|
||||
|
||||
|
||||
##### `strict`
|
||||
##### `readTimeout`
|
||||
|
||||
```
|
||||
Type: duration
|
||||
Default: 0
|
||||
```
|
||||
|
||||
I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
|
||||
|
||||
##### `rejectReadOnly`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
@ -239,41 +284,89 @@ Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`strict=true` enables the strict mode in which MySQL warnings are treated as errors.
|
||||
|
||||
By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example.
|
||||
`rejectReadOnly=true` causes the driver to reject read-only connections. This
|
||||
is for a possible race condition during an automatic failover, where the mysql
|
||||
client gets connected to a read-only replica after the failover.
|
||||
|
||||
Note that this should be a fairly rare case, as an automatic failover normally
|
||||
happens when the primary is down, and the race condition shouldn't happen
|
||||
unless it comes back up online as soon as the failover is kicked off. On the
|
||||
other hand, when this happens, a MySQL application can get stuck on a
|
||||
read-only connection until restarted. It is however fairly easy to reproduce,
|
||||
for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
|
||||
|
||||
If you are not relying on read-only transactions to reject writes that aren't
|
||||
supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
|
||||
is safer for failovers.
|
||||
|
||||
Note that ERROR 1290 can be returned for a `read-only` server and this option will
|
||||
cause a retry for that error. However the same error number is used for some
|
||||
other cases. You should ensure your application will never cause an ERROR 1290
|
||||
except for `read-only` mode when enabling this option.
|
||||
|
||||
|
||||
##### `serverPubKey`
|
||||
|
||||
```
|
||||
Type: string
|
||||
Valid Values: <name>
|
||||
Default: none
|
||||
```
|
||||
|
||||
Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
|
||||
Public keys are used to transmit encrypted data, e.g. for authentication.
|
||||
If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
|
||||
|
||||
|
||||
##### `timeout`
|
||||
|
||||
```
|
||||
Type: decimal number
|
||||
Type: duration
|
||||
Default: OS default
|
||||
```
|
||||
|
||||
*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
|
||||
Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
|
||||
|
||||
|
||||
##### `tls`
|
||||
|
||||
```
|
||||
Type: bool / string
|
||||
Valid Values: true, false, skip-verify, <name>
|
||||
Valid Values: true, false, skip-verify, preferred, <name>
|
||||
Default: false
|
||||
```
|
||||
|
||||
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
|
||||
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
|
||||
|
||||
|
||||
##### `writeTimeout`
|
||||
|
||||
```
|
||||
Type: duration
|
||||
Default: 0
|
||||
```
|
||||
|
||||
I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
|
||||
|
||||
|
||||
##### System Variables
|
||||
|
||||
All other parameters are interpreted as system variables:
|
||||
* `autocommit`: `"SET autocommit=<value>"`
|
||||
* [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone=<value>"`
|
||||
* [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation=<value>"`
|
||||
* `param`: `"SET <param>=<value>"`
|
||||
Any other parameters are interpreted as system variables:
|
||||
* `<boolean_var>=<value>`: `SET <boolean_var>=<value>`
|
||||
* `<enum_var>=<value>`: `SET <enum_var>=<value>`
|
||||
* `<string_var>=%27<value>%27`: `SET <string_var>='<value>'`
|
||||
|
||||
Rules:
|
||||
* The values for string variables must be quoted with `'`.
|
||||
* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
|
||||
(which implies values of string variables must be wrapped with `%27`).
|
||||
|
||||
Examples:
|
||||
* `autocommit=1`: `SET autocommit=1`
|
||||
* [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
|
||||
* [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
|
||||
|
||||
*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!*
|
||||
|
||||
#### Examples
|
||||
```
|
||||
@ -288,9 +381,9 @@ root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
|
||||
user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
|
||||
```
|
||||
|
||||
Use the [strict mode](#strict) but ignore notes:
|
||||
Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
|
||||
```
|
||||
user:password@/dbname?strict=true&sql_notes=false
|
||||
user:password@/dbname?sql_mode=TRADITIONAL
|
||||
```
|
||||
|
||||
TCP via IPv6:
|
||||
@ -303,11 +396,16 @@ TCP on a remote host, e.g. Amazon RDS:
|
||||
id:password@tcp(your-amazonaws-uri.com:3306)/dbname
|
||||
```
|
||||
|
||||
Google Cloud SQL on App Engine:
|
||||
Google Cloud SQL on App Engine (First Generation MySQL Server):
|
||||
```
|
||||
user@cloudsql(project-id:instance-name)/dbname
|
||||
```
|
||||
|
||||
Google Cloud SQL on App Engine (Second Generation MySQL Server):
|
||||
```
|
||||
user@cloudsql(project-id:regionname:instance-name)/dbname
|
||||
```
|
||||
|
||||
TCP using default port (3306) on localhost:
|
||||
```
|
||||
user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
|
||||
@ -323,6 +421,18 @@ No Database preselected:
|
||||
user:password@/
|
||||
```
|
||||
|
||||
|
||||
### Connection pool and timeouts
|
||||
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
|
||||
|
||||
## `ColumnType` Support
|
||||
This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
|
||||
|
||||
## `context.Context` Support
|
||||
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
|
||||
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
|
||||
|
||||
|
||||
### `LOAD DATA LOCAL INFILE` support
|
||||
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
|
||||
```go
|
||||
@ -333,17 +443,17 @@ Files must be whitelisted by registering them with `mysql.RegisterLocalFile(file
|
||||
|
||||
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
|
||||
|
||||
See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
|
||||
See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
|
||||
|
||||
|
||||
### `time.Time` support
|
||||
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
|
||||
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
|
||||
|
||||
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
|
||||
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
|
||||
|
||||
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
|
||||
|
||||
Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
|
||||
Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
|
||||
|
||||
|
||||
### Unicode support
|
||||
@ -355,7 +465,6 @@ Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAM
|
||||
|
||||
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
|
||||
|
||||
|
||||
## Testing / Development
|
||||
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
|
||||
|
||||
@ -374,13 +483,13 @@ Mozilla summarizes the license scope as follows:
|
||||
|
||||
|
||||
That means:
|
||||
* You can **use** the **unchanged** source code both in private and commercially
|
||||
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
|
||||
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
|
||||
* You can **use** the **unchanged** source code both in private and commercially.
|
||||
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
|
||||
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
|
||||
|
||||
Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
|
||||
Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
|
||||
|
||||
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
|
||||
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
|
||||
|
||||
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
|
||||
|
||||
|
10
vendor/github.com/go-sql-driver/mysql/appengine.go
generated
vendored
10
vendor/github.com/go-sql-driver/mysql/appengine.go
generated
vendored
@ -11,9 +11,15 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"appengine/cloudsql"
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/appengine/cloudsql"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterDial("cloudsql", cloudsql.Dial)
|
||||
RegisterDialContext("cloudsql", func(_ context.Context, instance string) (net.Conn, error) {
|
||||
// XXX: the cloudsql driver still does not export a Context-aware dialer.
|
||||
return cloudsql.Dial(instance)
|
||||
})
|
||||
}
|
||||
|
422
vendor/github.com/go-sql-driver/mysql/auth.go
generated
vendored
Normal file
422
vendor/github.com/go-sql-driver/mysql/auth.go
generated
vendored
Normal file
@ -0,0 +1,422 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// server pub keys registry
|
||||
var (
|
||||
serverPubKeyLock sync.RWMutex
|
||||
serverPubKeyRegistry map[string]*rsa.PublicKey
|
||||
)
|
||||
|
||||
// RegisterServerPubKey registers a server RSA public key which can be used to
|
||||
// send data in a secure manner to the server without receiving the public key
|
||||
// in a potentially insecure way from the server first.
|
||||
// Registered keys can afterwards be used adding serverPubKey=<name> to the DSN.
|
||||
//
|
||||
// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
|
||||
// after registering it and may not be modified.
|
||||
//
|
||||
// data, err := ioutil.ReadFile("mykey.pem")
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// block, _ := pem.Decode(data)
|
||||
// if block == nil || block.Type != "PUBLIC KEY" {
|
||||
// log.Fatal("failed to decode PEM block containing public key")
|
||||
// }
|
||||
//
|
||||
// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
|
||||
// mysql.RegisterServerPubKey("mykey", rsaPubKey)
|
||||
// } else {
|
||||
// log.Fatal("not a RSA public key")
|
||||
// }
|
||||
//
|
||||
func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
|
||||
serverPubKeyLock.Lock()
|
||||
if serverPubKeyRegistry == nil {
|
||||
serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
|
||||
}
|
||||
|
||||
serverPubKeyRegistry[name] = pubKey
|
||||
serverPubKeyLock.Unlock()
|
||||
}
|
||||
|
||||
// DeregisterServerPubKey removes the public key registered with the given name.
|
||||
func DeregisterServerPubKey(name string) {
|
||||
serverPubKeyLock.Lock()
|
||||
if serverPubKeyRegistry != nil {
|
||||
delete(serverPubKeyRegistry, name)
|
||||
}
|
||||
serverPubKeyLock.Unlock()
|
||||
}
|
||||
|
||||
func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
|
||||
serverPubKeyLock.RLock()
|
||||
if v, ok := serverPubKeyRegistry[name]; ok {
|
||||
pubKey = v
|
||||
}
|
||||
serverPubKeyLock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Hash password using pre 4.1 (old password) method
|
||||
// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
|
||||
type myRnd struct {
|
||||
seed1, seed2 uint32
|
||||
}
|
||||
|
||||
const myRndMaxVal = 0x3FFFFFFF
|
||||
|
||||
// Pseudo random number generator
|
||||
func newMyRnd(seed1, seed2 uint32) *myRnd {
|
||||
return &myRnd{
|
||||
seed1: seed1 % myRndMaxVal,
|
||||
seed2: seed2 % myRndMaxVal,
|
||||
}
|
||||
}
|
||||
|
||||
// Tested to be equivalent to MariaDB's floating point variant
|
||||
// http://play.golang.org/p/QHvhd4qved
|
||||
// http://play.golang.org/p/RG0q4ElWDx
|
||||
func (r *myRnd) NextByte() byte {
|
||||
r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
|
||||
r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
|
||||
|
||||
return byte(uint64(r.seed1) * 31 / myRndMaxVal)
|
||||
}
|
||||
|
||||
// Generate binary hash from byte string using insecure pre 4.1 method
|
||||
func pwHash(password []byte) (result [2]uint32) {
|
||||
var add uint32 = 7
|
||||
var tmp uint32
|
||||
|
||||
result[0] = 1345345333
|
||||
result[1] = 0x12345671
|
||||
|
||||
for _, c := range password {
|
||||
// skip spaces and tabs in password
|
||||
if c == ' ' || c == '\t' {
|
||||
continue
|
||||
}
|
||||
|
||||
tmp = uint32(c)
|
||||
result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
|
||||
result[1] += (result[1] << 8) ^ result[0]
|
||||
add += tmp
|
||||
}
|
||||
|
||||
// Remove sign bit (1<<31)-1)
|
||||
result[0] &= 0x7FFFFFFF
|
||||
result[1] &= 0x7FFFFFFF
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Hash password using insecure pre 4.1 method
|
||||
func scrambleOldPassword(scramble []byte, password string) []byte {
|
||||
if len(password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
scramble = scramble[:8]
|
||||
|
||||
hashPw := pwHash([]byte(password))
|
||||
hashSc := pwHash(scramble)
|
||||
|
||||
r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
|
||||
|
||||
var out [8]byte
|
||||
for i := range out {
|
||||
out[i] = r.NextByte() + 64
|
||||
}
|
||||
|
||||
mask := r.NextByte()
|
||||
for i := range out {
|
||||
out[i] ^= mask
|
||||
}
|
||||
|
||||
return out[:]
|
||||
}
|
||||
|
||||
// Hash password using 4.1+ method (SHA1)
|
||||
func scramblePassword(scramble []byte, password string) []byte {
|
||||
if len(password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stage1Hash = SHA1(password)
|
||||
crypt := sha1.New()
|
||||
crypt.Write([]byte(password))
|
||||
stage1 := crypt.Sum(nil)
|
||||
|
||||
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
|
||||
// inner Hash
|
||||
crypt.Reset()
|
||||
crypt.Write(stage1)
|
||||
hash := crypt.Sum(nil)
|
||||
|
||||
// outer Hash
|
||||
crypt.Reset()
|
||||
crypt.Write(scramble)
|
||||
crypt.Write(hash)
|
||||
scramble = crypt.Sum(nil)
|
||||
|
||||
// token = scrambleHash XOR stage1Hash
|
||||
for i := range scramble {
|
||||
scramble[i] ^= stage1[i]
|
||||
}
|
||||
return scramble
|
||||
}
|
||||
|
||||
// Hash password using MySQL 8+ method (SHA256)
|
||||
func scrambleSHA256Password(scramble []byte, password string) []byte {
|
||||
if len(password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
|
||||
|
||||
crypt := sha256.New()
|
||||
crypt.Write([]byte(password))
|
||||
message1 := crypt.Sum(nil)
|
||||
|
||||
crypt.Reset()
|
||||
crypt.Write(message1)
|
||||
message1Hash := crypt.Sum(nil)
|
||||
|
||||
crypt.Reset()
|
||||
crypt.Write(message1Hash)
|
||||
crypt.Write(scramble)
|
||||
message2 := crypt.Sum(nil)
|
||||
|
||||
for i := range message1 {
|
||||
message1[i] ^= message2[i]
|
||||
}
|
||||
|
||||
return message1
|
||||
}
|
||||
|
||||
func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
|
||||
plain := make([]byte, len(password)+1)
|
||||
copy(plain, password)
|
||||
for i := range plain {
|
||||
j := i % len(seed)
|
||||
plain[i] ^= seed[j]
|
||||
}
|
||||
sha1 := sha1.New()
|
||||
return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
|
||||
enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return mc.writeAuthSwitchPacket(enc)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
|
||||
switch plugin {
|
||||
case "caching_sha2_password":
|
||||
authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
|
||||
return authResp, nil
|
||||
|
||||
case "mysql_old_password":
|
||||
if !mc.cfg.AllowOldPasswords {
|
||||
return nil, ErrOldPassword
|
||||
}
|
||||
// Note: there are edge cases where this should work but doesn't;
|
||||
// this is currently "wontfix":
|
||||
// https://github.com/go-sql-driver/mysql/issues/184
|
||||
authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
|
||||
return authResp, nil
|
||||
|
||||
case "mysql_clear_password":
|
||||
if !mc.cfg.AllowCleartextPasswords {
|
||||
return nil, ErrCleartextPassword
|
||||
}
|
||||
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
|
||||
// http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
|
||||
return append([]byte(mc.cfg.Passwd), 0), nil
|
||||
|
||||
case "mysql_native_password":
|
||||
if !mc.cfg.AllowNativePasswords {
|
||||
return nil, ErrNativePassword
|
||||
}
|
||||
// https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
|
||||
// Native password authentication only need and will need 20-byte challenge.
|
||||
authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
|
||||
return authResp, nil
|
||||
|
||||
case "sha256_password":
|
||||
if len(mc.cfg.Passwd) == 0 {
|
||||
return []byte{0}, nil
|
||||
}
|
||||
if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
|
||||
// write cleartext auth packet
|
||||
return append([]byte(mc.cfg.Passwd), 0), nil
|
||||
}
|
||||
|
||||
pubKey := mc.cfg.pubKey
|
||||
if pubKey == nil {
|
||||
// request public key from server
|
||||
return []byte{1}, nil
|
||||
}
|
||||
|
||||
// encrypted password
|
||||
enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
|
||||
return enc, err
|
||||
|
||||
default:
|
||||
errLog.Print("unknown auth plugin:", plugin)
|
||||
return nil, ErrUnknownPlugin
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
|
||||
// Read Result Packet
|
||||
authData, newPlugin, err := mc.readAuthResult()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// handle auth plugin switch, if requested
|
||||
if newPlugin != "" {
|
||||
// If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
|
||||
// sent and we have to keep using the cipher sent in the init packet.
|
||||
if authData == nil {
|
||||
authData = oldAuthData
|
||||
} else {
|
||||
// copy data from read buffer to owned slice
|
||||
copy(oldAuthData, authData)
|
||||
}
|
||||
|
||||
plugin = newPlugin
|
||||
|
||||
authResp, err := mc.auth(authData, plugin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = mc.writeAuthSwitchPacket(authResp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read Result Packet
|
||||
authData, newPlugin, err = mc.readAuthResult()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do not allow to change the auth plugin more than once
|
||||
if newPlugin != "" {
|
||||
return ErrMalformPkt
|
||||
}
|
||||
}
|
||||
|
||||
switch plugin {
|
||||
|
||||
// https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
|
||||
case "caching_sha2_password":
|
||||
switch len(authData) {
|
||||
case 0:
|
||||
return nil // auth successful
|
||||
case 1:
|
||||
switch authData[0] {
|
||||
case cachingSha2PasswordFastAuthSuccess:
|
||||
if err = mc.readResultOK(); err == nil {
|
||||
return nil // auth successful
|
||||
}
|
||||
|
||||
case cachingSha2PasswordPerformFullAuthentication:
|
||||
if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
|
||||
// write cleartext auth packet
|
||||
err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
pubKey := mc.cfg.pubKey
|
||||
if pubKey == nil {
|
||||
// request public key from server
|
||||
data, err := mc.buf.takeSmallBuffer(4 + 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data[4] = cachingSha2PasswordRequestPublicKey
|
||||
mc.writePacket(data)
|
||||
|
||||
// parse public key
|
||||
if data, err = mc.readPacket(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block, _ := pem.Decode(data[1:])
|
||||
pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pubKey = pkix.(*rsa.PublicKey)
|
||||
}
|
||||
|
||||
// send encrypted password
|
||||
err = mc.sendEncryptedPassword(oldAuthData, pubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return mc.readResultOK()
|
||||
|
||||
default:
|
||||
return ErrMalformPkt
|
||||
}
|
||||
default:
|
||||
return ErrMalformPkt
|
||||
}
|
||||
|
||||
case "sha256_password":
|
||||
switch len(authData) {
|
||||
case 0:
|
||||
return nil // auth successful
|
||||
default:
|
||||
block, _ := pem.Decode(authData)
|
||||
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// send encrypted password
|
||||
err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return mc.readResultOK()
|
||||
}
|
||||
|
||||
default:
|
||||
return nil // auth successful
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
1330
vendor/github.com/go-sql-driver/mysql/auth_test.go
generated
vendored
Normal file
1330
vendor/github.com/go-sql-driver/mysql/auth_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
373
vendor/github.com/go-sql-driver/mysql/benchmark_test.go
generated
vendored
Normal file
373
vendor/github.com/go-sql-driver/mysql/benchmark_test.go
generated
vendored
Normal file
@ -0,0 +1,373 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TB testing.B
|
||||
|
||||
func (tb *TB) check(err error) {
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB {
|
||||
tb.check(err)
|
||||
return db
|
||||
}
|
||||
|
||||
func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows {
|
||||
tb.check(err)
|
||||
return rows
|
||||
}
|
||||
|
||||
func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
|
||||
tb.check(err)
|
||||
return stmt
|
||||
}
|
||||
|
||||
func initDB(b *testing.B, queries ...string) *sql.DB {
|
||||
tb := (*TB)(b)
|
||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
||||
for _, query := range queries {
|
||||
if _, err := db.Exec(query); err != nil {
|
||||
b.Fatalf("error on %q: %v", query, err)
|
||||
}
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
const concurrencyLevel = 10
|
||||
|
||||
func BenchmarkQuery(b *testing.B) {
|
||||
tb := (*TB)(b)
|
||||
b.StopTimer()
|
||||
b.ReportAllocs()
|
||||
db := initDB(b,
|
||||
"DROP TABLE IF EXISTS foo",
|
||||
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
|
||||
`INSERT INTO foo VALUES (1, "one")`,
|
||||
`INSERT INTO foo VALUES (2, "two")`,
|
||||
)
|
||||
db.SetMaxIdleConns(concurrencyLevel)
|
||||
defer db.Close()
|
||||
|
||||
stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?"))
|
||||
defer stmt.Close()
|
||||
|
||||
remain := int64(b.N)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(concurrencyLevel)
|
||||
defer wg.Wait()
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < concurrencyLevel; i++ {
|
||||
go func() {
|
||||
for {
|
||||
if atomic.AddInt64(&remain, -1) < 0 {
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
|
||||
var got string
|
||||
tb.check(stmt.QueryRow(1).Scan(&got))
|
||||
if got != "one" {
|
||||
b.Errorf("query = %q; want one", got)
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExec(b *testing.B) {
|
||||
tb := (*TB)(b)
|
||||
b.StopTimer()
|
||||
b.ReportAllocs()
|
||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
||||
db.SetMaxIdleConns(concurrencyLevel)
|
||||
defer db.Close()
|
||||
|
||||
stmt := tb.checkStmt(db.Prepare("DO 1"))
|
||||
defer stmt.Close()
|
||||
|
||||
remain := int64(b.N)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(concurrencyLevel)
|
||||
defer wg.Wait()
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < concurrencyLevel; i++ {
|
||||
go func() {
|
||||
for {
|
||||
if atomic.AddInt64(&remain, -1) < 0 {
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := stmt.Exec(); err != nil {
|
||||
b.Fatal(err.Error())
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// data, but no db writes
|
||||
var roundtripSample []byte
|
||||
|
||||
func initRoundtripBenchmarks() ([]byte, int, int) {
|
||||
if roundtripSample == nil {
|
||||
roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024))
|
||||
}
|
||||
return roundtripSample, 16, len(roundtripSample)
|
||||
}
|
||||
|
||||
func BenchmarkRoundtripTxt(b *testing.B) {
|
||||
b.StopTimer()
|
||||
sample, min, max := initRoundtripBenchmarks()
|
||||
sampleString := string(sample)
|
||||
b.ReportAllocs()
|
||||
tb := (*TB)(b)
|
||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
||||
defer db.Close()
|
||||
b.StartTimer()
|
||||
var result string
|
||||
for i := 0; i < b.N; i++ {
|
||||
length := min + i
|
||||
if length > max {
|
||||
length = max
|
||||
}
|
||||
test := sampleString[0:length]
|
||||
rows := tb.checkRows(db.Query(`SELECT "` + test + `"`))
|
||||
if !rows.Next() {
|
||||
rows.Close()
|
||||
b.Fatalf("crashed")
|
||||
}
|
||||
err := rows.Scan(&result)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
b.Fatalf("crashed")
|
||||
}
|
||||
if result != test {
|
||||
rows.Close()
|
||||
b.Errorf("mismatch")
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRoundtripBin(b *testing.B) {
|
||||
b.StopTimer()
|
||||
sample, min, max := initRoundtripBenchmarks()
|
||||
b.ReportAllocs()
|
||||
tb := (*TB)(b)
|
||||
db := tb.checkDB(sql.Open("mysql", dsn))
|
||||
defer db.Close()
|
||||
stmt := tb.checkStmt(db.Prepare("SELECT ?"))
|
||||
defer stmt.Close()
|
||||
b.StartTimer()
|
||||
var result sql.RawBytes
|
||||
for i := 0; i < b.N; i++ {
|
||||
length := min + i
|
||||
if length > max {
|
||||
length = max
|
||||
}
|
||||
test := sample[0:length]
|
||||
rows := tb.checkRows(stmt.Query(test))
|
||||
if !rows.Next() {
|
||||
rows.Close()
|
||||
b.Fatalf("crashed")
|
||||
}
|
||||
err := rows.Scan(&result)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
b.Fatalf("crashed")
|
||||
}
|
||||
if !bytes.Equal(result, test) {
|
||||
rows.Close()
|
||||
b.Errorf("mismatch")
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInterpolation(b *testing.B) {
|
||||
mc := &mysqlConn{
|
||||
cfg: &Config{
|
||||
InterpolateParams: true,
|
||||
Loc: time.UTC,
|
||||
},
|
||||
maxAllowedPacket: maxPacketSize,
|
||||
maxWriteSize: maxPacketSize - 1,
|
||||
buf: newBuffer(nil),
|
||||
}
|
||||
|
||||
args := []driver.Value{
|
||||
int64(42424242),
|
||||
float64(math.Pi),
|
||||
false,
|
||||
time.Unix(1423411542, 807015000),
|
||||
[]byte("bytes containing special chars ' \" \a \x00"),
|
||||
"string containing special chars ' \" \a \x00",
|
||||
}
|
||||
q := "SELECT ?, ?, ?, ?, ?, ?"
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := mc.interpolateParams(q, args)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
|
||||
|
||||
tb := (*TB)(b)
|
||||
stmt := tb.checkStmt(db.PrepareContext(ctx, "SELECT val FROM foo WHERE id=?"))
|
||||
defer stmt.Close()
|
||||
|
||||
b.SetParallelism(p)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var got string
|
||||
for pb.Next() {
|
||||
tb.check(stmt.QueryRow(1).Scan(&got))
|
||||
if got != "one" {
|
||||
b.Fatalf("query = %q; want one", got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkQueryContext(b *testing.B) {
|
||||
db := initDB(b,
|
||||
"DROP TABLE IF EXISTS foo",
|
||||
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
|
||||
`INSERT INTO foo VALUES (1, "one")`,
|
||||
`INSERT INTO foo VALUES (2, "two")`,
|
||||
)
|
||||
defer db.Close()
|
||||
for _, p := range []int{1, 2, 3, 4} {
|
||||
b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
|
||||
benchmarkQueryContext(b, db, p)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
db.SetMaxIdleConns(p * runtime.GOMAXPROCS(0))
|
||||
|
||||
tb := (*TB)(b)
|
||||
stmt := tb.checkStmt(db.PrepareContext(ctx, "DO 1"))
|
||||
defer stmt.Close()
|
||||
|
||||
b.SetParallelism(p)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if _, err := stmt.ExecContext(ctx); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkExecContext(b *testing.B) {
|
||||
db := initDB(b,
|
||||
"DROP TABLE IF EXISTS foo",
|
||||
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
|
||||
`INSERT INTO foo VALUES (1, "one")`,
|
||||
`INSERT INTO foo VALUES (2, "two")`,
|
||||
)
|
||||
defer db.Close()
|
||||
for _, p := range []int{1, 2, 3, 4} {
|
||||
b.Run(fmt.Sprintf("%d", p), func(b *testing.B) {
|
||||
benchmarkQueryContext(b, db, p)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkQueryRawBytes benchmarks fetching 100 blobs using sql.RawBytes.
|
||||
// "size=" means size of each blobs.
|
||||
func BenchmarkQueryRawBytes(b *testing.B) {
|
||||
var sizes []int = []int{100, 1000, 2000, 4000, 8000, 12000, 16000, 32000, 64000, 256000}
|
||||
db := initDB(b,
|
||||
"DROP TABLE IF EXISTS bench_rawbytes",
|
||||
"CREATE TABLE bench_rawbytes (id INT PRIMARY KEY, val LONGBLOB)",
|
||||
)
|
||||
defer db.Close()
|
||||
|
||||
blob := make([]byte, sizes[len(sizes)-1])
|
||||
for i := range blob {
|
||||
blob[i] = 42
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
_, err := db.Exec("INSERT INTO bench_rawbytes VALUES (?, ?)", i, blob)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range sizes {
|
||||
b.Run(fmt.Sprintf("size=%v", s), func(b *testing.B) {
|
||||
db.SetMaxIdleConns(0)
|
||||
db.SetMaxIdleConns(1)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for j := 0; j < b.N; j++ {
|
||||
rows, err := db.Query("SELECT LEFT(val, ?) as v FROM bench_rawbytes", s)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
nrows := 0
|
||||
for rows.Next() {
|
||||
var buf sql.RawBytes
|
||||
err := rows.Scan(&buf)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if len(buf) != s {
|
||||
b.Fatalf("size mismatch: expected %v, got %v", s, len(buf))
|
||||
}
|
||||
nrows++
|
||||
}
|
||||
rows.Close()
|
||||
if nrows != 100 {
|
||||
b.Fatalf("numbers of rows mismatch: expected %v, got %v", 100, nrows)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
122
vendor/github.com/go-sql-driver/mysql/buffer.go
generated
vendored
122
vendor/github.com/go-sql-driver/mysql/buffer.go
generated
vendored
@ -8,53 +8,86 @@
|
||||
|
||||
package mysql
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
const defaultBufSize = 4096
|
||||
const maxCachedBufSize = 256 * 1024
|
||||
|
||||
// A buffer which is used for both reading and writing.
|
||||
// This is possible since communication on each connection is synchronous.
|
||||
// In other words, we can't write and read simultaneously on the same connection.
|
||||
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
|
||||
// Also highly optimized for this particular use case.
|
||||
// This buffer is backed by two byte slices in a double-buffering scheme
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
rd io.Reader
|
||||
idx int
|
||||
length int
|
||||
buf []byte // buf is a byte buffer who's length and capacity are equal.
|
||||
nc net.Conn
|
||||
idx int
|
||||
length int
|
||||
timeout time.Duration
|
||||
dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
|
||||
flipcnt uint // flipccnt is the current buffer counter for double-buffering
|
||||
}
|
||||
|
||||
func newBuffer(rd io.Reader) buffer {
|
||||
var b [defaultBufSize]byte
|
||||
// newBuffer allocates and returns a new buffer.
|
||||
func newBuffer(nc net.Conn) buffer {
|
||||
fg := make([]byte, defaultBufSize)
|
||||
return buffer{
|
||||
buf: b[:],
|
||||
rd: rd,
|
||||
buf: fg,
|
||||
nc: nc,
|
||||
dbuf: [2][]byte{fg, nil},
|
||||
}
|
||||
}
|
||||
|
||||
// flip replaces the active buffer with the background buffer
|
||||
// this is a delayed flip that simply increases the buffer counter;
|
||||
// the actual flip will be performed the next time we call `buffer.fill`
|
||||
func (b *buffer) flip() {
|
||||
b.flipcnt += 1
|
||||
}
|
||||
|
||||
// fill reads into the buffer until at least _need_ bytes are in it
|
||||
func (b *buffer) fill(need int) error {
|
||||
n := b.length
|
||||
// fill data into its double-buffering target: if we've called
|
||||
// flip on this buffer, we'll be copying to the background buffer,
|
||||
// and then filling it with network data; otherwise we'll just move
|
||||
// the contents of the current buffer to the front before filling it
|
||||
dest := b.dbuf[b.flipcnt&1]
|
||||
|
||||
// move existing data to the beginning
|
||||
if n > 0 && b.idx > 0 {
|
||||
copy(b.buf[0:n], b.buf[b.idx:])
|
||||
}
|
||||
|
||||
// grow buffer if necessary
|
||||
// TODO: let the buffer shrink again at some point
|
||||
// Maybe keep the org buf slice and swap back?
|
||||
if need > len(b.buf) {
|
||||
// grow buffer if necessary to fit the whole packet.
|
||||
if need > len(dest) {
|
||||
// Round up to the next multiple of the default size
|
||||
newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
|
||||
copy(newBuf, b.buf)
|
||||
b.buf = newBuf
|
||||
dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
|
||||
|
||||
// if the allocated buffer is not too large, move it to backing storage
|
||||
// to prevent extra allocations on applications that perform large reads
|
||||
if len(dest) <= maxCachedBufSize {
|
||||
b.dbuf[b.flipcnt&1] = dest
|
||||
}
|
||||
}
|
||||
|
||||
// if we're filling the fg buffer, move the existing data to the start of it.
|
||||
// if we're filling the bg buffer, copy over the data
|
||||
if n > 0 {
|
||||
copy(dest[:n], b.buf[b.idx:])
|
||||
}
|
||||
|
||||
b.buf = dest
|
||||
b.idx = 0
|
||||
|
||||
for {
|
||||
nn, err := b.rd.Read(b.buf[n:])
|
||||
if b.timeout > 0 {
|
||||
if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nn, err := b.nc.Read(b.buf[n:])
|
||||
n += nn
|
||||
|
||||
switch err {
|
||||
@ -94,43 +127,56 @@ func (b *buffer) readNext(need int) ([]byte, error) {
|
||||
return b.buf[offset:b.idx], nil
|
||||
}
|
||||
|
||||
// returns a buffer with the requested size.
|
||||
// takeBuffer returns a buffer with the requested size.
|
||||
// If possible, a slice from the existing buffer is returned.
|
||||
// Otherwise a bigger buffer is made.
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeBuffer(length int) []byte {
|
||||
func (b *buffer) takeBuffer(length int) ([]byte, error) {
|
||||
if b.length > 0 {
|
||||
return nil
|
||||
return nil, ErrBusyBuffer
|
||||
}
|
||||
|
||||
// test (cheap) general case first
|
||||
if length <= defaultBufSize || length <= cap(b.buf) {
|
||||
return b.buf[:length]
|
||||
if length <= cap(b.buf) {
|
||||
return b.buf[:length], nil
|
||||
}
|
||||
|
||||
if length < maxPacketSize {
|
||||
b.buf = make([]byte, length)
|
||||
return b.buf
|
||||
return b.buf, nil
|
||||
}
|
||||
return make([]byte, length)
|
||||
|
||||
// buffer is larger than we want to store.
|
||||
return make([]byte, length), nil
|
||||
}
|
||||
|
||||
// shortcut which can be used if the requested buffer is guaranteed to be
|
||||
// smaller than defaultBufSize
|
||||
// takeSmallBuffer is shortcut which can be used if length is
|
||||
// known to be smaller than defaultBufSize.
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeSmallBuffer(length int) []byte {
|
||||
if b.length == 0 {
|
||||
return b.buf[:length]
|
||||
func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
|
||||
if b.length > 0 {
|
||||
return nil, ErrBusyBuffer
|
||||
}
|
||||
return nil
|
||||
return b.buf[:length], nil
|
||||
}
|
||||
|
||||
// takeCompleteBuffer returns the complete existing buffer.
|
||||
// This can be used if the necessary buffer size is unknown.
|
||||
// cap and len of the returned buffer will be equal.
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeCompleteBuffer() []byte {
|
||||
if b.length == 0 {
|
||||
return b.buf
|
||||
func (b *buffer) takeCompleteBuffer() ([]byte, error) {
|
||||
if b.length > 0 {
|
||||
return nil, ErrBusyBuffer
|
||||
}
|
||||
return b.buf, nil
|
||||
}
|
||||
|
||||
// store stores buf, an updated buffer, if its suitable to do so.
|
||||
func (b *buffer) store(buf []byte) error {
|
||||
if b.length > 0 {
|
||||
return ErrBusyBuffer
|
||||
} else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
|
||||
b.buf = buf[:cap(buf)]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
379
vendor/github.com/go-sql-driver/mysql/collations.go
generated
vendored
379
vendor/github.com/go-sql-driver/mysql/collations.go
generated
vendored
@ -8,182 +8,190 @@
|
||||
|
||||
package mysql
|
||||
|
||||
const defaultCollation byte = 33 // utf8_general_ci
|
||||
const defaultCollation = "utf8mb4_general_ci"
|
||||
const binaryCollation = "binary"
|
||||
|
||||
// A list of available collations mapped to the internal ID.
|
||||
// To update this map use the following MySQL query:
|
||||
// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
|
||||
// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID
|
||||
//
|
||||
// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255.
|
||||
//
|
||||
// ucs2, utf16, and utf32 can't be used for connection charset.
|
||||
// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset
|
||||
// They are commented out to reduce this map.
|
||||
var collations = map[string]byte{
|
||||
"big5_chinese_ci": 1,
|
||||
"latin2_czech_cs": 2,
|
||||
"dec8_swedish_ci": 3,
|
||||
"cp850_general_ci": 4,
|
||||
"latin1_german1_ci": 5,
|
||||
"hp8_english_ci": 6,
|
||||
"koi8r_general_ci": 7,
|
||||
"latin1_swedish_ci": 8,
|
||||
"latin2_general_ci": 9,
|
||||
"swe7_swedish_ci": 10,
|
||||
"ascii_general_ci": 11,
|
||||
"ujis_japanese_ci": 12,
|
||||
"sjis_japanese_ci": 13,
|
||||
"cp1251_bulgarian_ci": 14,
|
||||
"latin1_danish_ci": 15,
|
||||
"hebrew_general_ci": 16,
|
||||
"tis620_thai_ci": 18,
|
||||
"euckr_korean_ci": 19,
|
||||
"latin7_estonian_cs": 20,
|
||||
"latin2_hungarian_ci": 21,
|
||||
"koi8u_general_ci": 22,
|
||||
"cp1251_ukrainian_ci": 23,
|
||||
"gb2312_chinese_ci": 24,
|
||||
"greek_general_ci": 25,
|
||||
"cp1250_general_ci": 26,
|
||||
"latin2_croatian_ci": 27,
|
||||
"gbk_chinese_ci": 28,
|
||||
"cp1257_lithuanian_ci": 29,
|
||||
"latin5_turkish_ci": 30,
|
||||
"latin1_german2_ci": 31,
|
||||
"armscii8_general_ci": 32,
|
||||
"utf8_general_ci": 33,
|
||||
"cp1250_czech_cs": 34,
|
||||
"ucs2_general_ci": 35,
|
||||
"cp866_general_ci": 36,
|
||||
"keybcs2_general_ci": 37,
|
||||
"macce_general_ci": 38,
|
||||
"macroman_general_ci": 39,
|
||||
"cp852_general_ci": 40,
|
||||
"latin7_general_ci": 41,
|
||||
"latin7_general_cs": 42,
|
||||
"macce_bin": 43,
|
||||
"cp1250_croatian_ci": 44,
|
||||
"utf8mb4_general_ci": 45,
|
||||
"utf8mb4_bin": 46,
|
||||
"latin1_bin": 47,
|
||||
"latin1_general_ci": 48,
|
||||
"latin1_general_cs": 49,
|
||||
"cp1251_bin": 50,
|
||||
"cp1251_general_ci": 51,
|
||||
"cp1251_general_cs": 52,
|
||||
"macroman_bin": 53,
|
||||
"utf16_general_ci": 54,
|
||||
"utf16_bin": 55,
|
||||
"utf16le_general_ci": 56,
|
||||
"cp1256_general_ci": 57,
|
||||
"cp1257_bin": 58,
|
||||
"cp1257_general_ci": 59,
|
||||
"utf32_general_ci": 60,
|
||||
"utf32_bin": 61,
|
||||
"utf16le_bin": 62,
|
||||
"binary": 63,
|
||||
"armscii8_bin": 64,
|
||||
"ascii_bin": 65,
|
||||
"cp1250_bin": 66,
|
||||
"cp1256_bin": 67,
|
||||
"cp866_bin": 68,
|
||||
"dec8_bin": 69,
|
||||
"greek_bin": 70,
|
||||
"hebrew_bin": 71,
|
||||
"hp8_bin": 72,
|
||||
"keybcs2_bin": 73,
|
||||
"koi8r_bin": 74,
|
||||
"koi8u_bin": 75,
|
||||
"latin2_bin": 77,
|
||||
"latin5_bin": 78,
|
||||
"latin7_bin": 79,
|
||||
"cp850_bin": 80,
|
||||
"cp852_bin": 81,
|
||||
"swe7_bin": 82,
|
||||
"utf8_bin": 83,
|
||||
"big5_bin": 84,
|
||||
"euckr_bin": 85,
|
||||
"gb2312_bin": 86,
|
||||
"gbk_bin": 87,
|
||||
"sjis_bin": 88,
|
||||
"tis620_bin": 89,
|
||||
"ucs2_bin": 90,
|
||||
"ujis_bin": 91,
|
||||
"geostd8_general_ci": 92,
|
||||
"geostd8_bin": 93,
|
||||
"latin1_spanish_ci": 94,
|
||||
"cp932_japanese_ci": 95,
|
||||
"cp932_bin": 96,
|
||||
"eucjpms_japanese_ci": 97,
|
||||
"eucjpms_bin": 98,
|
||||
"cp1250_polish_ci": 99,
|
||||
"utf16_unicode_ci": 101,
|
||||
"utf16_icelandic_ci": 102,
|
||||
"utf16_latvian_ci": 103,
|
||||
"utf16_romanian_ci": 104,
|
||||
"utf16_slovenian_ci": 105,
|
||||
"utf16_polish_ci": 106,
|
||||
"utf16_estonian_ci": 107,
|
||||
"utf16_spanish_ci": 108,
|
||||
"utf16_swedish_ci": 109,
|
||||
"utf16_turkish_ci": 110,
|
||||
"utf16_czech_ci": 111,
|
||||
"utf16_danish_ci": 112,
|
||||
"utf16_lithuanian_ci": 113,
|
||||
"utf16_slovak_ci": 114,
|
||||
"utf16_spanish2_ci": 115,
|
||||
"utf16_roman_ci": 116,
|
||||
"utf16_persian_ci": 117,
|
||||
"utf16_esperanto_ci": 118,
|
||||
"utf16_hungarian_ci": 119,
|
||||
"utf16_sinhala_ci": 120,
|
||||
"utf16_german2_ci": 121,
|
||||
"utf16_croatian_ci": 122,
|
||||
"utf16_unicode_520_ci": 123,
|
||||
"utf16_vietnamese_ci": 124,
|
||||
"ucs2_unicode_ci": 128,
|
||||
"ucs2_icelandic_ci": 129,
|
||||
"ucs2_latvian_ci": 130,
|
||||
"ucs2_romanian_ci": 131,
|
||||
"ucs2_slovenian_ci": 132,
|
||||
"ucs2_polish_ci": 133,
|
||||
"ucs2_estonian_ci": 134,
|
||||
"ucs2_spanish_ci": 135,
|
||||
"ucs2_swedish_ci": 136,
|
||||
"ucs2_turkish_ci": 137,
|
||||
"ucs2_czech_ci": 138,
|
||||
"ucs2_danish_ci": 139,
|
||||
"ucs2_lithuanian_ci": 140,
|
||||
"ucs2_slovak_ci": 141,
|
||||
"ucs2_spanish2_ci": 142,
|
||||
"ucs2_roman_ci": 143,
|
||||
"ucs2_persian_ci": 144,
|
||||
"ucs2_esperanto_ci": 145,
|
||||
"ucs2_hungarian_ci": 146,
|
||||
"ucs2_sinhala_ci": 147,
|
||||
"ucs2_german2_ci": 148,
|
||||
"ucs2_croatian_ci": 149,
|
||||
"ucs2_unicode_520_ci": 150,
|
||||
"ucs2_vietnamese_ci": 151,
|
||||
"ucs2_general_mysql500_ci": 159,
|
||||
"utf32_unicode_ci": 160,
|
||||
"utf32_icelandic_ci": 161,
|
||||
"utf32_latvian_ci": 162,
|
||||
"utf32_romanian_ci": 163,
|
||||
"utf32_slovenian_ci": 164,
|
||||
"utf32_polish_ci": 165,
|
||||
"utf32_estonian_ci": 166,
|
||||
"utf32_spanish_ci": 167,
|
||||
"utf32_swedish_ci": 168,
|
||||
"utf32_turkish_ci": 169,
|
||||
"utf32_czech_ci": 170,
|
||||
"utf32_danish_ci": 171,
|
||||
"utf32_lithuanian_ci": 172,
|
||||
"utf32_slovak_ci": 173,
|
||||
"utf32_spanish2_ci": 174,
|
||||
"utf32_roman_ci": 175,
|
||||
"utf32_persian_ci": 176,
|
||||
"utf32_esperanto_ci": 177,
|
||||
"utf32_hungarian_ci": 178,
|
||||
"utf32_sinhala_ci": 179,
|
||||
"utf32_german2_ci": 180,
|
||||
"utf32_croatian_ci": 181,
|
||||
"utf32_unicode_520_ci": 182,
|
||||
"utf32_vietnamese_ci": 183,
|
||||
"big5_chinese_ci": 1,
|
||||
"latin2_czech_cs": 2,
|
||||
"dec8_swedish_ci": 3,
|
||||
"cp850_general_ci": 4,
|
||||
"latin1_german1_ci": 5,
|
||||
"hp8_english_ci": 6,
|
||||
"koi8r_general_ci": 7,
|
||||
"latin1_swedish_ci": 8,
|
||||
"latin2_general_ci": 9,
|
||||
"swe7_swedish_ci": 10,
|
||||
"ascii_general_ci": 11,
|
||||
"ujis_japanese_ci": 12,
|
||||
"sjis_japanese_ci": 13,
|
||||
"cp1251_bulgarian_ci": 14,
|
||||
"latin1_danish_ci": 15,
|
||||
"hebrew_general_ci": 16,
|
||||
"tis620_thai_ci": 18,
|
||||
"euckr_korean_ci": 19,
|
||||
"latin7_estonian_cs": 20,
|
||||
"latin2_hungarian_ci": 21,
|
||||
"koi8u_general_ci": 22,
|
||||
"cp1251_ukrainian_ci": 23,
|
||||
"gb2312_chinese_ci": 24,
|
||||
"greek_general_ci": 25,
|
||||
"cp1250_general_ci": 26,
|
||||
"latin2_croatian_ci": 27,
|
||||
"gbk_chinese_ci": 28,
|
||||
"cp1257_lithuanian_ci": 29,
|
||||
"latin5_turkish_ci": 30,
|
||||
"latin1_german2_ci": 31,
|
||||
"armscii8_general_ci": 32,
|
||||
"utf8_general_ci": 33,
|
||||
"cp1250_czech_cs": 34,
|
||||
//"ucs2_general_ci": 35,
|
||||
"cp866_general_ci": 36,
|
||||
"keybcs2_general_ci": 37,
|
||||
"macce_general_ci": 38,
|
||||
"macroman_general_ci": 39,
|
||||
"cp852_general_ci": 40,
|
||||
"latin7_general_ci": 41,
|
||||
"latin7_general_cs": 42,
|
||||
"macce_bin": 43,
|
||||
"cp1250_croatian_ci": 44,
|
||||
"utf8mb4_general_ci": 45,
|
||||
"utf8mb4_bin": 46,
|
||||
"latin1_bin": 47,
|
||||
"latin1_general_ci": 48,
|
||||
"latin1_general_cs": 49,
|
||||
"cp1251_bin": 50,
|
||||
"cp1251_general_ci": 51,
|
||||
"cp1251_general_cs": 52,
|
||||
"macroman_bin": 53,
|
||||
//"utf16_general_ci": 54,
|
||||
//"utf16_bin": 55,
|
||||
//"utf16le_general_ci": 56,
|
||||
"cp1256_general_ci": 57,
|
||||
"cp1257_bin": 58,
|
||||
"cp1257_general_ci": 59,
|
||||
//"utf32_general_ci": 60,
|
||||
//"utf32_bin": 61,
|
||||
//"utf16le_bin": 62,
|
||||
"binary": 63,
|
||||
"armscii8_bin": 64,
|
||||
"ascii_bin": 65,
|
||||
"cp1250_bin": 66,
|
||||
"cp1256_bin": 67,
|
||||
"cp866_bin": 68,
|
||||
"dec8_bin": 69,
|
||||
"greek_bin": 70,
|
||||
"hebrew_bin": 71,
|
||||
"hp8_bin": 72,
|
||||
"keybcs2_bin": 73,
|
||||
"koi8r_bin": 74,
|
||||
"koi8u_bin": 75,
|
||||
"utf8_tolower_ci": 76,
|
||||
"latin2_bin": 77,
|
||||
"latin5_bin": 78,
|
||||
"latin7_bin": 79,
|
||||
"cp850_bin": 80,
|
||||
"cp852_bin": 81,
|
||||
"swe7_bin": 82,
|
||||
"utf8_bin": 83,
|
||||
"big5_bin": 84,
|
||||
"euckr_bin": 85,
|
||||
"gb2312_bin": 86,
|
||||
"gbk_bin": 87,
|
||||
"sjis_bin": 88,
|
||||
"tis620_bin": 89,
|
||||
//"ucs2_bin": 90,
|
||||
"ujis_bin": 91,
|
||||
"geostd8_general_ci": 92,
|
||||
"geostd8_bin": 93,
|
||||
"latin1_spanish_ci": 94,
|
||||
"cp932_japanese_ci": 95,
|
||||
"cp932_bin": 96,
|
||||
"eucjpms_japanese_ci": 97,
|
||||
"eucjpms_bin": 98,
|
||||
"cp1250_polish_ci": 99,
|
||||
//"utf16_unicode_ci": 101,
|
||||
//"utf16_icelandic_ci": 102,
|
||||
//"utf16_latvian_ci": 103,
|
||||
//"utf16_romanian_ci": 104,
|
||||
//"utf16_slovenian_ci": 105,
|
||||
//"utf16_polish_ci": 106,
|
||||
//"utf16_estonian_ci": 107,
|
||||
//"utf16_spanish_ci": 108,
|
||||
//"utf16_swedish_ci": 109,
|
||||
//"utf16_turkish_ci": 110,
|
||||
//"utf16_czech_ci": 111,
|
||||
//"utf16_danish_ci": 112,
|
||||
//"utf16_lithuanian_ci": 113,
|
||||
//"utf16_slovak_ci": 114,
|
||||
//"utf16_spanish2_ci": 115,
|
||||
//"utf16_roman_ci": 116,
|
||||
//"utf16_persian_ci": 117,
|
||||
//"utf16_esperanto_ci": 118,
|
||||
//"utf16_hungarian_ci": 119,
|
||||
//"utf16_sinhala_ci": 120,
|
||||
//"utf16_german2_ci": 121,
|
||||
//"utf16_croatian_ci": 122,
|
||||
//"utf16_unicode_520_ci": 123,
|
||||
//"utf16_vietnamese_ci": 124,
|
||||
//"ucs2_unicode_ci": 128,
|
||||
//"ucs2_icelandic_ci": 129,
|
||||
//"ucs2_latvian_ci": 130,
|
||||
//"ucs2_romanian_ci": 131,
|
||||
//"ucs2_slovenian_ci": 132,
|
||||
//"ucs2_polish_ci": 133,
|
||||
//"ucs2_estonian_ci": 134,
|
||||
//"ucs2_spanish_ci": 135,
|
||||
//"ucs2_swedish_ci": 136,
|
||||
//"ucs2_turkish_ci": 137,
|
||||
//"ucs2_czech_ci": 138,
|
||||
//"ucs2_danish_ci": 139,
|
||||
//"ucs2_lithuanian_ci": 140,
|
||||
//"ucs2_slovak_ci": 141,
|
||||
//"ucs2_spanish2_ci": 142,
|
||||
//"ucs2_roman_ci": 143,
|
||||
//"ucs2_persian_ci": 144,
|
||||
//"ucs2_esperanto_ci": 145,
|
||||
//"ucs2_hungarian_ci": 146,
|
||||
//"ucs2_sinhala_ci": 147,
|
||||
//"ucs2_german2_ci": 148,
|
||||
//"ucs2_croatian_ci": 149,
|
||||
//"ucs2_unicode_520_ci": 150,
|
||||
//"ucs2_vietnamese_ci": 151,
|
||||
//"ucs2_general_mysql500_ci": 159,
|
||||
//"utf32_unicode_ci": 160,
|
||||
//"utf32_icelandic_ci": 161,
|
||||
//"utf32_latvian_ci": 162,
|
||||
//"utf32_romanian_ci": 163,
|
||||
//"utf32_slovenian_ci": 164,
|
||||
//"utf32_polish_ci": 165,
|
||||
//"utf32_estonian_ci": 166,
|
||||
//"utf32_spanish_ci": 167,
|
||||
//"utf32_swedish_ci": 168,
|
||||
//"utf32_turkish_ci": 169,
|
||||
//"utf32_czech_ci": 170,
|
||||
//"utf32_danish_ci": 171,
|
||||
//"utf32_lithuanian_ci": 172,
|
||||
//"utf32_slovak_ci": 173,
|
||||
//"utf32_spanish2_ci": 174,
|
||||
//"utf32_roman_ci": 175,
|
||||
//"utf32_persian_ci": 176,
|
||||
//"utf32_esperanto_ci": 177,
|
||||
//"utf32_hungarian_ci": 178,
|
||||
//"utf32_sinhala_ci": 179,
|
||||
//"utf32_german2_ci": 180,
|
||||
//"utf32_croatian_ci": 181,
|
||||
//"utf32_unicode_520_ci": 182,
|
||||
//"utf32_vietnamese_ci": 183,
|
||||
"utf8_unicode_ci": 192,
|
||||
"utf8_icelandic_ci": 193,
|
||||
"utf8_latvian_ci": 194,
|
||||
@ -233,18 +241,25 @@ var collations = map[string]byte{
|
||||
"utf8mb4_croatian_ci": 245,
|
||||
"utf8mb4_unicode_520_ci": 246,
|
||||
"utf8mb4_vietnamese_ci": 247,
|
||||
"gb18030_chinese_ci": 248,
|
||||
"gb18030_bin": 249,
|
||||
"gb18030_unicode_520_ci": 250,
|
||||
"utf8mb4_0900_ai_ci": 255,
|
||||
}
|
||||
|
||||
// A blacklist of collations which is unsafe to interpolate parameters.
|
||||
// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
|
||||
var unsafeCollations = map[byte]bool{
|
||||
1: true, // big5_chinese_ci
|
||||
13: true, // sjis_japanese_ci
|
||||
28: true, // gbk_chinese_ci
|
||||
84: true, // big5_bin
|
||||
86: true, // gb2312_bin
|
||||
87: true, // gbk_bin
|
||||
88: true, // sjis_bin
|
||||
95: true, // cp932_japanese_ci
|
||||
96: true, // cp932_bin
|
||||
var unsafeCollations = map[string]bool{
|
||||
"big5_chinese_ci": true,
|
||||
"sjis_japanese_ci": true,
|
||||
"gbk_chinese_ci": true,
|
||||
"big5_bin": true,
|
||||
"gb2312_bin": true,
|
||||
"gbk_bin": true,
|
||||
"sjis_bin": true,
|
||||
"cp932_japanese_ci": true,
|
||||
"cp932_bin": true,
|
||||
"gb18030_chinese_ci": true,
|
||||
"gb18030_bin": true,
|
||||
"gb18030_unicode_520_ci": true,
|
||||
}
|
||||
|
53
vendor/github.com/go-sql-driver/mysql/conncheck.go
generated
vendored
Normal file
53
vendor/github.com/go-sql-driver/mysql/conncheck.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build !windows,!appengine
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var errUnexpectedRead = errors.New("unexpected read from socket")
|
||||
|
||||
func connCheck(c net.Conn) error {
|
||||
var (
|
||||
n int
|
||||
err error
|
||||
buff [1]byte
|
||||
)
|
||||
|
||||
sconn, ok := c.(syscall.Conn)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
rc, err := sconn.SyscallConn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rerr := rc.Read(func(fd uintptr) bool {
|
||||
n, err = syscall.Read(int(fd), buff[:])
|
||||
return true
|
||||
})
|
||||
switch {
|
||||
case rerr != nil:
|
||||
return rerr
|
||||
case n == 0 && err == nil:
|
||||
return io.EOF
|
||||
case n > 0:
|
||||
return errUnexpectedRead
|
||||
case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
@ -1,19 +1,17 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build appengine
|
||||
// +build windows appengine
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"appengine/cloudsql"
|
||||
)
|
||||
import "net"
|
||||
|
||||
func init() {
|
||||
RegisterDial("cloudsql", cloudsql.Dial)
|
||||
func connCheck(c net.Conn) error {
|
||||
return nil
|
||||
}
|
38
vendor/github.com/go-sql-driver/mysql/conncheck_test.go
generated
vendored
Normal file
38
vendor/github.com/go-sql-driver/mysql/conncheck_test.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build go1.10,!windows
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestStaleConnectionChecks(t *testing.T) {
|
||||
runTests(t, dsn, func(dbt *DBTest) {
|
||||
dbt.mustExec("SET @@SESSION.wait_timeout = 2")
|
||||
|
||||
if err := dbt.db.Ping(); err != nil {
|
||||
dbt.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for MySQL to close our connection
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
tx, err := dbt.db.Begin()
|
||||
if err != nil {
|
||||
dbt.Fatal(err)
|
||||
}
|
||||
|
||||
if err := tx.Rollback(); err != nil {
|
||||
dbt.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
414
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
414
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
@ -9,9 +9,10 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -21,40 +22,31 @@ import (
|
||||
type mysqlConn struct {
|
||||
buf buffer
|
||||
netConn net.Conn
|
||||
rawConn net.Conn // underlying connection when netConn is TLS connection.
|
||||
affectedRows uint64
|
||||
insertId uint64
|
||||
cfg *config
|
||||
maxPacketAllowed int
|
||||
cfg *Config
|
||||
maxAllowedPacket int
|
||||
maxWriteSize int
|
||||
writeTimeout time.Duration
|
||||
flags clientFlag
|
||||
status statusFlag
|
||||
sequence uint8
|
||||
parseTime bool
|
||||
strict bool
|
||||
}
|
||||
reset bool // set when the Go SQL package calls ResetSession
|
||||
|
||||
type config struct {
|
||||
user string
|
||||
passwd string
|
||||
net string
|
||||
addr string
|
||||
dbname string
|
||||
params map[string]string
|
||||
loc *time.Location
|
||||
tls *tls.Config
|
||||
timeout time.Duration
|
||||
collation uint8
|
||||
allowAllFiles bool
|
||||
allowOldPasswords bool
|
||||
allowCleartextPasswords bool
|
||||
clientFoundRows bool
|
||||
columnsWithAlias bool
|
||||
interpolateParams bool
|
||||
// for context support (Go 1.8+)
|
||||
watching bool
|
||||
watcher chan<- context.Context
|
||||
closech chan struct{}
|
||||
finished chan<- struct{}
|
||||
canceled atomicError // set non-nil if conn is canceled
|
||||
closed atomicBool // set when conn is closed, before closech is closed
|
||||
}
|
||||
|
||||
// Handles parameters set in DSN after the connection is established
|
||||
func (mc *mysqlConn) handleParams() (err error) {
|
||||
for param, val := range mc.cfg.params {
|
||||
for param, val := range mc.cfg.Params {
|
||||
switch param {
|
||||
// Charset
|
||||
case "charset":
|
||||
@ -70,27 +62,6 @@ func (mc *mysqlConn) handleParams() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// time.Time parsing
|
||||
case "parseTime":
|
||||
var isBool bool
|
||||
mc.parseTime, isBool = readBool(val)
|
||||
if !isBool {
|
||||
return errors.New("Invalid Bool value: " + val)
|
||||
}
|
||||
|
||||
// Strict mode
|
||||
case "strict":
|
||||
var isBool bool
|
||||
mc.strict, isBool = readBool(val)
|
||||
if !isBool {
|
||||
return errors.New("Invalid Bool value: " + val)
|
||||
}
|
||||
|
||||
// Compression
|
||||
case "compress":
|
||||
err = errors.New("Compression not implemented yet")
|
||||
return
|
||||
|
||||
// System Vars
|
||||
default:
|
||||
err = mc.exec("SET " + param + "=" + val + "")
|
||||
@ -103,46 +74,87 @@ func (mc *mysqlConn) handleParams() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) markBadConn(err error) error {
|
||||
if mc == nil {
|
||||
return err
|
||||
}
|
||||
if err != errBadConnNoWrite {
|
||||
return err
|
||||
}
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Begin() (driver.Tx, error) {
|
||||
if mc.netConn == nil {
|
||||
return mc.begin(false)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
err := mc.exec("START TRANSACTION")
|
||||
var q string
|
||||
if readOnly {
|
||||
q = "START TRANSACTION READ ONLY"
|
||||
} else {
|
||||
q = "START TRANSACTION"
|
||||
}
|
||||
err := mc.exec(q)
|
||||
if err == nil {
|
||||
return &mysqlTx{mc}, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return nil, mc.markBadConn(err)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Close() (err error) {
|
||||
// Makes Close idempotent
|
||||
if mc.netConn != nil {
|
||||
if !mc.closed.IsSet() {
|
||||
err = mc.writeCommandPacket(comQuit)
|
||||
if err == nil {
|
||||
err = mc.netConn.Close()
|
||||
} else {
|
||||
mc.netConn.Close()
|
||||
}
|
||||
mc.netConn = nil
|
||||
}
|
||||
|
||||
mc.cfg = nil
|
||||
mc.buf.rd = nil
|
||||
mc.cleanup()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
|
||||
// Closes the network connection and unsets internal variables. Do not call this
|
||||
// function after successfully authentication, call Close instead. This function
|
||||
// is called before auth or on auth failure because MySQL will have already
|
||||
// closed the network connection.
|
||||
func (mc *mysqlConn) cleanup() {
|
||||
if !mc.closed.TrySet(true) {
|
||||
return
|
||||
}
|
||||
|
||||
// Makes cleanup idempotent
|
||||
close(mc.closech)
|
||||
if mc.netConn == nil {
|
||||
return
|
||||
}
|
||||
if err := mc.netConn.Close(); err != nil {
|
||||
errLog.Print(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) error() error {
|
||||
if mc.closed.IsSet() {
|
||||
if err := mc.canceled.Value(); err != nil {
|
||||
return err
|
||||
}
|
||||
return ErrInvalidConn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
// Send command
|
||||
err := mc.writeCommandPacketStr(comStmtPrepare, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, mc.markBadConn(err)
|
||||
}
|
||||
|
||||
stmt := &mysqlStmt{
|
||||
@ -167,11 +179,16 @@ func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
|
||||
buf := mc.buf.takeCompleteBuffer()
|
||||
if buf == nil {
|
||||
// Number of ? should be same to len(args)
|
||||
if strings.Count(query, "?") != len(args) {
|
||||
return "", driver.ErrSkip
|
||||
}
|
||||
|
||||
buf, err := mc.buf.takeCompleteBuffer()
|
||||
if err != nil {
|
||||
// can not take the buffer. Something must be wrong with the connection
|
||||
errLog.Print(ErrBusyBuffer)
|
||||
return "", driver.ErrBadConn
|
||||
errLog.Print(err)
|
||||
return "", ErrInvalidConn
|
||||
}
|
||||
buf = buf[:0]
|
||||
argPos := 0
|
||||
@ -196,6 +213,9 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
|
||||
switch v := arg.(type) {
|
||||
case int64:
|
||||
buf = strconv.AppendInt(buf, v, 10)
|
||||
case uint64:
|
||||
// Handle uint64 explicitly because our custom ConvertValue emits unsigned values
|
||||
buf = strconv.AppendUint(buf, v, 10)
|
||||
case float64:
|
||||
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
|
||||
case bool:
|
||||
@ -208,7 +228,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
|
||||
if v.IsZero() {
|
||||
buf = append(buf, "'0000-00-00'"...)
|
||||
} else {
|
||||
v := v.In(mc.cfg.loc)
|
||||
v := v.In(mc.cfg.Loc)
|
||||
v = v.Add(time.Nanosecond * 500) // To round under microsecond
|
||||
year := v.Year()
|
||||
year100 := year / 100
|
||||
@ -273,7 +293,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
|
||||
return "", driver.ErrSkip
|
||||
}
|
||||
|
||||
if len(buf)+4 > mc.maxPacketAllowed {
|
||||
if len(buf)+4 > mc.maxAllowedPacket {
|
||||
return "", driver.ErrSkip
|
||||
}
|
||||
}
|
||||
@ -284,12 +304,12 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
|
||||
if mc.netConn == nil {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if len(args) != 0 {
|
||||
if !mc.cfg.interpolateParams {
|
||||
if !mc.cfg.InterpolateParams {
|
||||
return nil, driver.ErrSkip
|
||||
}
|
||||
// try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
|
||||
@ -298,7 +318,6 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
|
||||
return nil, err
|
||||
}
|
||||
query = prepared
|
||||
args = nil
|
||||
}
|
||||
mc.affectedRows = 0
|
||||
mc.insertId = 0
|
||||
@ -310,37 +329,48 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
|
||||
insertId: int64(mc.insertId),
|
||||
}, err
|
||||
}
|
||||
return nil, err
|
||||
return nil, mc.markBadConn(err)
|
||||
}
|
||||
|
||||
// Internal function to execute commands
|
||||
func (mc *mysqlConn) exec(query string) error {
|
||||
// Send command
|
||||
err := mc.writeCommandPacketStr(comQuery, query)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
|
||||
return mc.markBadConn(err)
|
||||
}
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err == nil && resLen > 0 {
|
||||
if err = mc.readUntilEOF(); err != nil {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resLen > 0 {
|
||||
// columns
|
||||
if err := mc.readUntilEOF(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mc.readUntilEOF()
|
||||
// rows
|
||||
if err := mc.readUntilEOF(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
return mc.discardResults()
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
|
||||
if mc.netConn == nil {
|
||||
return mc.query(query, args)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if len(args) != 0 {
|
||||
if !mc.cfg.interpolateParams {
|
||||
if !mc.cfg.InterpolateParams {
|
||||
return nil, driver.ErrSkip
|
||||
}
|
||||
// try client-side prepare to reduce roundtrip
|
||||
@ -349,7 +379,6 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
|
||||
return nil, err
|
||||
}
|
||||
query = prepared
|
||||
args = nil
|
||||
}
|
||||
// Send command
|
||||
err := mc.writeCommandPacketStr(comQuery, query)
|
||||
@ -362,15 +391,22 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
|
||||
rows.mc = mc
|
||||
|
||||
if resLen == 0 {
|
||||
// no columns, no more data
|
||||
return emptyRows{}, nil
|
||||
rows.rs.done = true
|
||||
|
||||
switch err := rows.NextResultSet(); err {
|
||||
case nil, io.EOF:
|
||||
return rows, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Columns
|
||||
rows.columns, err = mc.readColumns(resLen)
|
||||
rows.rs.columns, err = mc.readColumns(resLen)
|
||||
return rows, err
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
return nil, mc.markBadConn(err)
|
||||
}
|
||||
|
||||
// Gets the value of the given MySQL System Variable
|
||||
@ -386,6 +422,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
|
||||
if err == nil {
|
||||
rows := new(textRows)
|
||||
rows.mc = mc
|
||||
rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
|
||||
|
||||
if resLen > 0 {
|
||||
// Columns
|
||||
@ -401,3 +438,212 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// finish is called when the query has canceled.
|
||||
func (mc *mysqlConn) cancel(err error) {
|
||||
mc.canceled.Set(err)
|
||||
mc.cleanup()
|
||||
}
|
||||
|
||||
// finish is called when the query has succeeded.
|
||||
func (mc *mysqlConn) finish() {
|
||||
if !mc.watching || mc.finished == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case mc.finished <- struct{}{}:
|
||||
mc.watching = false
|
||||
case <-mc.closech:
|
||||
}
|
||||
}
|
||||
|
||||
// Ping implements driver.Pinger interface
|
||||
func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
|
||||
if err = mc.watchCancel(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
defer mc.finish()
|
||||
|
||||
if err = mc.writeCommandPacket(comPing); err != nil {
|
||||
return mc.markBadConn(err)
|
||||
}
|
||||
|
||||
return mc.readResultOK()
|
||||
}
|
||||
|
||||
// BeginTx implements driver.ConnBeginTx interface
|
||||
func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mc.finish()
|
||||
|
||||
if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
|
||||
level, err := mapIsolationLevel(opts.Isolation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return mc.begin(opts.ReadOnly)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
|
||||
dargs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rows, err := mc.query(query, dargs)
|
||||
if err != nil {
|
||||
mc.finish()
|
||||
return nil, err
|
||||
}
|
||||
rows.finish = mc.finish
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
|
||||
dargs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mc.finish()
|
||||
|
||||
return mc.Exec(query, dargs)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stmt, err := mc.Prepare(query)
|
||||
mc.finish()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
select {
|
||||
default:
|
||||
case <-ctx.Done():
|
||||
stmt.Close()
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
return stmt, nil
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
dargs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := stmt.mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rows, err := stmt.query(dargs)
|
||||
if err != nil {
|
||||
stmt.mc.finish()
|
||||
return nil, err
|
||||
}
|
||||
rows.finish = stmt.mc.finish
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
dargs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := stmt.mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stmt.mc.finish()
|
||||
|
||||
return stmt.Exec(dargs)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) watchCancel(ctx context.Context) error {
|
||||
if mc.watching {
|
||||
// Reach here if canceled,
|
||||
// so the connection is already invalid
|
||||
mc.cleanup()
|
||||
return nil
|
||||
}
|
||||
// When ctx is already cancelled, don't watch it.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
// When ctx is not cancellable, don't watch it.
|
||||
if ctx.Done() == nil {
|
||||
return nil
|
||||
}
|
||||
// When watcher is not alive, can't watch it.
|
||||
if mc.watcher == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
mc.watching = true
|
||||
mc.watcher <- ctx
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) startWatcher() {
|
||||
watcher := make(chan context.Context, 1)
|
||||
mc.watcher = watcher
|
||||
finished := make(chan struct{})
|
||||
mc.finished = finished
|
||||
go func() {
|
||||
for {
|
||||
var ctx context.Context
|
||||
select {
|
||||
case ctx = <-watcher:
|
||||
case <-mc.closech:
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
mc.cancel(ctx.Err())
|
||||
case <-finished:
|
||||
case <-mc.closech:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
|
||||
nv.Value, err = converter{}.ConvertValue(nv.Value)
|
||||
return
|
||||
}
|
||||
|
||||
// ResetSession implements driver.SessionResetter.
|
||||
// (From Go 1.10)
|
||||
func (mc *mysqlConn) ResetSession(ctx context.Context) error {
|
||||
if mc.closed.IsSet() {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
mc.reset = true
|
||||
return nil
|
||||
}
|
||||
|
175
vendor/github.com/go-sql-driver/mysql/connection_test.go
generated
vendored
Normal file
175
vendor/github.com/go-sql-driver/mysql/connection_test.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestInterpolateParams(t *testing.T) {
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(nil),
|
||||
maxAllowedPacket: maxPacketSize,
|
||||
cfg: &Config{
|
||||
InterpolateParams: true,
|
||||
},
|
||||
}
|
||||
|
||||
q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42), "gopher"})
|
||||
if err != nil {
|
||||
t.Errorf("Expected err=nil, got %#v", err)
|
||||
return
|
||||
}
|
||||
expected := `SELECT 42+'gopher'`
|
||||
if q != expected {
|
||||
t.Errorf("Expected: %q\nGot: %q", expected, q)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterpolateParamsTooManyPlaceholders(t *testing.T) {
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(nil),
|
||||
maxAllowedPacket: maxPacketSize,
|
||||
cfg: &Config{
|
||||
InterpolateParams: true,
|
||||
},
|
||||
}
|
||||
|
||||
q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42)})
|
||||
if err != driver.ErrSkip {
|
||||
t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
|
||||
}
|
||||
}
|
||||
|
||||
// We don't support placeholder in string literal for now.
|
||||
// https://github.com/go-sql-driver/mysql/pull/490
|
||||
func TestInterpolateParamsPlaceholderInString(t *testing.T) {
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(nil),
|
||||
maxAllowedPacket: maxPacketSize,
|
||||
cfg: &Config{
|
||||
InterpolateParams: true,
|
||||
},
|
||||
}
|
||||
|
||||
q, err := mc.interpolateParams("SELECT 'abc?xyz',?", []driver.Value{int64(42)})
|
||||
// When InterpolateParams support string literal, this should return `"SELECT 'abc?xyz', 42`
|
||||
if err != driver.ErrSkip {
|
||||
t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterpolateParamsUint64(t *testing.T) {
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(nil),
|
||||
maxAllowedPacket: maxPacketSize,
|
||||
cfg: &Config{
|
||||
InterpolateParams: true,
|
||||
},
|
||||
}
|
||||
|
||||
q, err := mc.interpolateParams("SELECT ?", []driver.Value{uint64(42)})
|
||||
if err != nil {
|
||||
t.Errorf("Expected err=nil, got err=%#v, q=%#v", err, q)
|
||||
}
|
||||
if q != "SELECT 42" {
|
||||
t.Errorf("Expected uint64 interpolation to work, got q=%#v", q)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckNamedValue(t *testing.T) {
|
||||
value := driver.NamedValue{Value: ^uint64(0)}
|
||||
x := &mysqlConn{}
|
||||
err := x.CheckNamedValue(&value)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("uint64 high-bit not convertible", err)
|
||||
}
|
||||
|
||||
if value.Value != ^uint64(0) {
|
||||
t.Fatalf("uint64 high-bit converted, got %#v %T", value.Value, value.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCleanCancel tests passed context is cancelled at start.
|
||||
// No packet should be sent. Connection should keep current status.
|
||||
func TestCleanCancel(t *testing.T) {
|
||||
mc := &mysqlConn{
|
||||
closech: make(chan struct{}),
|
||||
}
|
||||
mc.startWatcher()
|
||||
defer mc.cleanup()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
for i := 0; i < 3; i++ { // Repeat same behavior
|
||||
err := mc.Ping(ctx)
|
||||
if err != context.Canceled {
|
||||
t.Errorf("expected context.Canceled, got %#v", err)
|
||||
}
|
||||
|
||||
if mc.closed.IsSet() {
|
||||
t.Error("expected mc is not closed, closed actually")
|
||||
}
|
||||
|
||||
if mc.watching {
|
||||
t.Error("expected watching is false, but true")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingMarkBadConnection(t *testing.T) {
|
||||
nc := badConnection{err: errors.New("boom")}
|
||||
ms := &mysqlConn{
|
||||
netConn: nc,
|
||||
buf: newBuffer(nc),
|
||||
maxAllowedPacket: defaultMaxAllowedPacket,
|
||||
}
|
||||
|
||||
err := ms.Ping(context.Background())
|
||||
|
||||
if err != driver.ErrBadConn {
|
||||
t.Errorf("expected driver.ErrBadConn, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingErrInvalidConn(t *testing.T) {
|
||||
nc := badConnection{err: errors.New("failed to write"), n: 10}
|
||||
ms := &mysqlConn{
|
||||
netConn: nc,
|
||||
buf: newBuffer(nc),
|
||||
maxAllowedPacket: defaultMaxAllowedPacket,
|
||||
closech: make(chan struct{}),
|
||||
}
|
||||
|
||||
err := ms.Ping(context.Background())
|
||||
|
||||
if err != ErrInvalidConn {
|
||||
t.Errorf("expected ErrInvalidConn, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type badConnection struct {
|
||||
n int
|
||||
err error
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (bc badConnection) Write(b []byte) (n int, err error) {
|
||||
return bc.n, bc.err
|
||||
}
|
||||
|
||||
func (bc badConnection) Close() error {
|
||||
return nil
|
||||
}
|
143
vendor/github.com/go-sql-driver/mysql/connector.go
generated
vendored
Normal file
143
vendor/github.com/go-sql-driver/mysql/connector.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"net"
|
||||
)
|
||||
|
||||
type connector struct {
|
||||
cfg *Config // immutable private copy.
|
||||
}
|
||||
|
||||
// Connect implements driver.Connector interface.
|
||||
// Connect returns a connection to the database.
|
||||
func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
var err error
|
||||
|
||||
// New mysqlConn
|
||||
mc := &mysqlConn{
|
||||
maxAllowedPacket: maxPacketSize,
|
||||
maxWriteSize: maxPacketSize - 1,
|
||||
closech: make(chan struct{}),
|
||||
cfg: c.cfg,
|
||||
}
|
||||
mc.parseTime = mc.cfg.ParseTime
|
||||
|
||||
// Connect to Server
|
||||
dialsLock.RLock()
|
||||
dial, ok := dials[mc.cfg.Net]
|
||||
dialsLock.RUnlock()
|
||||
if ok {
|
||||
mc.netConn, err = dial(ctx, mc.cfg.Addr)
|
||||
} else {
|
||||
nd := net.Dialer{Timeout: mc.cfg.Timeout}
|
||||
mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
|
||||
errLog.Print("net.Error from Dial()': ", nerr.Error())
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Enable TCP Keepalives on TCP connections
|
||||
if tc, ok := mc.netConn.(*net.TCPConn); ok {
|
||||
if err := tc.SetKeepAlive(true); err != nil {
|
||||
// Don't send COM_QUIT before handshake.
|
||||
mc.netConn.Close()
|
||||
mc.netConn = nil
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Call startWatcher for context support (From Go 1.8)
|
||||
mc.startWatcher()
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mc.finish()
|
||||
|
||||
mc.buf = newBuffer(mc.netConn)
|
||||
|
||||
// Set I/O timeouts
|
||||
mc.buf.timeout = mc.cfg.ReadTimeout
|
||||
mc.writeTimeout = mc.cfg.WriteTimeout
|
||||
|
||||
// Reading Handshake Initialization Packet
|
||||
authData, plugin, err := mc.readHandshakePacket()
|
||||
if err != nil {
|
||||
mc.cleanup()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if plugin == "" {
|
||||
plugin = defaultAuthPlugin
|
||||
}
|
||||
|
||||
// Send Client Authentication Packet
|
||||
authResp, err := mc.auth(authData, plugin)
|
||||
if err != nil {
|
||||
// try the default auth plugin, if using the requested plugin failed
|
||||
errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
|
||||
plugin = defaultAuthPlugin
|
||||
authResp, err = mc.auth(authData, plugin)
|
||||
if err != nil {
|
||||
mc.cleanup()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
|
||||
mc.cleanup()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle response to auth packet, switch methods if possible
|
||||
if err = mc.handleAuthResult(authData, plugin); err != nil {
|
||||
// Authentication failed and MySQL has already closed the connection
|
||||
// (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
|
||||
// Do not send COM_QUIT, just cleanup and return the error.
|
||||
mc.cleanup()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if mc.cfg.MaxAllowedPacket > 0 {
|
||||
mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
|
||||
} else {
|
||||
// Get max allowed packet size
|
||||
maxap, err := mc.getSystemVar("max_allowed_packet")
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
mc.maxAllowedPacket = stringToInt(maxap) - 1
|
||||
}
|
||||
if mc.maxAllowedPacket < maxPacketSize {
|
||||
mc.maxWriteSize = mc.maxAllowedPacket
|
||||
}
|
||||
|
||||
// Handle DSN Params
|
||||
err = mc.handleParams()
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
// Driver implements driver.Connector interface.
|
||||
// Driver returns &MySQLDriver{}.
|
||||
func (c *connector) Driver() driver.Driver {
|
||||
return &MySQLDriver{}
|
||||
}
|
26
vendor/github.com/go-sql-driver/mysql/const.go
generated
vendored
26
vendor/github.com/go-sql-driver/mysql/const.go
generated
vendored
@ -9,7 +9,9 @@
|
||||
package mysql
|
||||
|
||||
const (
|
||||
minProtocolVersion byte = 10
|
||||
defaultAuthPlugin = "mysql_native_password"
|
||||
defaultMaxAllowedPacket = 4 << 20 // 4 MiB
|
||||
minProtocolVersion = 10
|
||||
maxPacketSize = 1<<24 - 1
|
||||
timeFormat = "2006-01-02 15:04:05.999999"
|
||||
)
|
||||
@ -18,10 +20,11 @@ const (
|
||||
// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
|
||||
|
||||
const (
|
||||
iOK byte = 0x00
|
||||
iLocalInFile byte = 0xfb
|
||||
iEOF byte = 0xfe
|
||||
iERR byte = 0xff
|
||||
iOK byte = 0x00
|
||||
iAuthMoreData byte = 0x01
|
||||
iLocalInFile byte = 0xfb
|
||||
iEOF byte = 0xfe
|
||||
iERR byte = 0xff
|
||||
)
|
||||
|
||||
// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
|
||||
@ -87,8 +90,10 @@ const (
|
||||
)
|
||||
|
||||
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
|
||||
type fieldType byte
|
||||
|
||||
const (
|
||||
fieldTypeDecimal byte = iota
|
||||
fieldTypeDecimal fieldType = iota
|
||||
fieldTypeTiny
|
||||
fieldTypeShort
|
||||
fieldTypeLong
|
||||
@ -107,7 +112,8 @@ const (
|
||||
fieldTypeBit
|
||||
)
|
||||
const (
|
||||
fieldTypeNewDecimal byte = iota + 0xf6
|
||||
fieldTypeJSON fieldType = iota + 0xf5
|
||||
fieldTypeNewDecimal
|
||||
fieldTypeEnum
|
||||
fieldTypeSet
|
||||
fieldTypeTinyBLOB
|
||||
@ -160,3 +166,9 @@ const (
|
||||
statusInTransReadonly
|
||||
statusSessionStateChanged
|
||||
)
|
||||
|
||||
const (
|
||||
cachingSha2PasswordRequestPublicKey = 2
|
||||
cachingSha2PasswordFastAuthSuccess = 3
|
||||
cachingSha2PasswordPerformFullAuthentication = 4
|
||||
)
|
||||
|
136
vendor/github.com/go-sql-driver/mysql/driver.go
generated
vendored
136
vendor/github.com/go-sql-driver/mysql/driver.go
generated
vendored
@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
// Package mysql provides a MySQL driver for Go's database/sql package.
|
||||
//
|
||||
// The driver should be used via the database/sql package:
|
||||
//
|
||||
@ -17,131 +17,67 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// This struct is exported to make the driver directly accessible.
|
||||
// MySQLDriver is exported to make the driver directly accessible.
|
||||
// In general the driver is used via the database/sql package.
|
||||
type MySQLDriver struct{}
|
||||
|
||||
// DialFunc is a function which can be used to establish the network connection.
|
||||
// Custom dial functions must be registered with RegisterDial
|
||||
//
|
||||
// Deprecated: users should register a DialContextFunc instead
|
||||
type DialFunc func(addr string) (net.Conn, error)
|
||||
|
||||
var dials map[string]DialFunc
|
||||
// DialContextFunc is a function which can be used to establish the network connection.
|
||||
// Custom dial functions must be registered with RegisterDialContext
|
||||
type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error)
|
||||
|
||||
// RegisterDial registers a custom dial function. It can then be used by the
|
||||
var (
|
||||
dialsLock sync.RWMutex
|
||||
dials map[string]DialContextFunc
|
||||
)
|
||||
|
||||
// RegisterDialContext registers a custom dial function. It can then be used by the
|
||||
// network address mynet(addr), where mynet is the registered new network.
|
||||
// addr is passed as a parameter to the dial function.
|
||||
func RegisterDial(net string, dial DialFunc) {
|
||||
// The current context for the connection and its address is passed to the dial function.
|
||||
func RegisterDialContext(net string, dial DialContextFunc) {
|
||||
dialsLock.Lock()
|
||||
defer dialsLock.Unlock()
|
||||
if dials == nil {
|
||||
dials = make(map[string]DialFunc)
|
||||
dials = make(map[string]DialContextFunc)
|
||||
}
|
||||
dials[net] = dial
|
||||
}
|
||||
|
||||
// RegisterDial registers a custom dial function. It can then be used by the
|
||||
// network address mynet(addr), where mynet is the registered new network.
|
||||
// addr is passed as a parameter to the dial function.
|
||||
//
|
||||
// Deprecated: users should call RegisterDialContext instead
|
||||
func RegisterDial(network string, dial DialFunc) {
|
||||
RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) {
|
||||
return dial(addr)
|
||||
})
|
||||
}
|
||||
|
||||
// Open new Connection.
|
||||
// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
|
||||
// the DSN string is formated
|
||||
// the DSN string is formatted
|
||||
func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
|
||||
var err error
|
||||
|
||||
// New mysqlConn
|
||||
mc := &mysqlConn{
|
||||
maxPacketAllowed: maxPacketSize,
|
||||
maxWriteSize: maxPacketSize - 1,
|
||||
}
|
||||
mc.cfg, err = parseDSN(dsn)
|
||||
cfg, err := ParseDSN(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Connect to Server
|
||||
if dial, ok := dials[mc.cfg.net]; ok {
|
||||
mc.netConn, err = dial(mc.cfg.addr)
|
||||
} else {
|
||||
nd := net.Dialer{Timeout: mc.cfg.timeout}
|
||||
mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr)
|
||||
c := &connector{
|
||||
cfg: cfg,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Enable TCP Keepalives on TCP connections
|
||||
if tc, ok := mc.netConn.(*net.TCPConn); ok {
|
||||
if err := tc.SetKeepAlive(true); err != nil {
|
||||
// Don't send COM_QUIT before handshake.
|
||||
mc.netConn.Close()
|
||||
mc.netConn = nil
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
mc.buf = newBuffer(mc.netConn)
|
||||
|
||||
// Reading Handshake Initialization Packet
|
||||
cipher, err := mc.readInitPacket()
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send Client Authentication Packet
|
||||
if err = mc.writeAuthPacket(cipher); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read Result Packet
|
||||
err = mc.readResultOK()
|
||||
if err != nil {
|
||||
// Retry with old authentication method, if allowed
|
||||
if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword {
|
||||
if err = mc.writeOldAuthPacket(cipher); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err = mc.readResultOK(); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
} else if mc.cfg != nil && mc.cfg.allowCleartextPasswords && err == ErrCleartextPassword {
|
||||
if err = mc.writeClearAuthPacket(); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err = mc.readResultOK(); err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Get max allowed packet size
|
||||
maxap, err := mc.getSystemVar("max_allowed_packet")
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
mc.maxPacketAllowed = stringToInt(maxap) - 1
|
||||
if mc.maxPacketAllowed < maxPacketSize {
|
||||
mc.maxWriteSize = mc.maxPacketAllowed
|
||||
}
|
||||
|
||||
// Handle DSN Params
|
||||
err = mc.handleParams()
|
||||
if err != nil {
|
||||
mc.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mc, nil
|
||||
return c.Connect(context.Background())
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
37
vendor/github.com/go-sql-driver/mysql/driver_go110.go
generated
vendored
Normal file
37
vendor/github.com/go-sql-driver/mysql/driver_go110.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
)
|
||||
|
||||
// NewConnector returns new driver.Connector.
|
||||
func NewConnector(cfg *Config) (driver.Connector, error) {
|
||||
cfg = cfg.Clone()
|
||||
// normalize the contents of cfg so calls to NewConnector have the same
|
||||
// behavior as MySQLDriver.OpenConnector
|
||||
if err := cfg.normalize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &connector{cfg: cfg}, nil
|
||||
}
|
||||
|
||||
// OpenConnector implements driver.DriverContext.
|
||||
func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
|
||||
cfg, err := ParseDSN(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &connector{
|
||||
cfg: cfg,
|
||||
}, nil
|
||||
}
|
137
vendor/github.com/go-sql-driver/mysql/driver_go110_test.go
generated
vendored
Normal file
137
vendor/github.com/go-sql-driver/mysql/driver_go110_test.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ driver.DriverContext = &MySQLDriver{}
|
||||
|
||||
type dialCtxKey struct{}
|
||||
|
||||
func TestConnectorObeysDialTimeouts(t *testing.T) {
|
||||
if !available {
|
||||
t.Skipf("MySQL server not running on %s", netAddr)
|
||||
}
|
||||
|
||||
RegisterDialContext("dialctxtest", func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
var d net.Dialer
|
||||
if !ctx.Value(dialCtxKey{}).(bool) {
|
||||
return nil, fmt.Errorf("test error: query context is not propagated to our dialer")
|
||||
}
|
||||
return d.DialContext(ctx, prot, addr)
|
||||
})
|
||||
|
||||
db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@dialctxtest(%s)/%s?timeout=30s", user, pass, addr, dbname))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting: %s", err.Error())
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
ctx := context.WithValue(context.Background(), dialCtxKey{}, true)
|
||||
|
||||
_, err = db.ExecContext(ctx, "DO 1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func configForTests(t *testing.T) *Config {
|
||||
if !available {
|
||||
t.Skipf("MySQL server not running on %s", netAddr)
|
||||
}
|
||||
|
||||
mycnf := NewConfig()
|
||||
mycnf.User = user
|
||||
mycnf.Passwd = pass
|
||||
mycnf.Addr = addr
|
||||
mycnf.Net = prot
|
||||
mycnf.DBName = dbname
|
||||
return mycnf
|
||||
}
|
||||
|
||||
func TestNewConnector(t *testing.T) {
|
||||
mycnf := configForTests(t)
|
||||
conn, err := NewConnector(mycnf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
db := sql.OpenDB(conn)
|
||||
defer db.Close()
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type slowConnection struct {
|
||||
net.Conn
|
||||
slowdown time.Duration
|
||||
}
|
||||
|
||||
func (sc *slowConnection) Read(b []byte) (int, error) {
|
||||
time.Sleep(sc.slowdown)
|
||||
return sc.Conn.Read(b)
|
||||
}
|
||||
|
||||
type connectorHijack struct {
|
||||
driver.Connector
|
||||
connErr error
|
||||
}
|
||||
|
||||
func (cw *connectorHijack) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
var conn driver.Conn
|
||||
conn, cw.connErr = cw.Connector.Connect(ctx)
|
||||
return conn, cw.connErr
|
||||
}
|
||||
|
||||
func TestConnectorTimeoutsDuringOpen(t *testing.T) {
|
||||
RegisterDialContext("slowconn", func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
var d net.Dialer
|
||||
conn, err := d.DialContext(ctx, prot, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &slowConnection{Conn: conn, slowdown: 100 * time.Millisecond}, nil
|
||||
})
|
||||
|
||||
mycnf := configForTests(t)
|
||||
mycnf.Net = "slowconn"
|
||||
|
||||
conn, err := NewConnector(mycnf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hijack := &connectorHijack{Connector: conn}
|
||||
|
||||
db := sql.OpenDB(hijack)
|
||||
defer db.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
_, err = db.ExecContext(ctx, "DO 1")
|
||||
if err != context.DeadlineExceeded {
|
||||
t.Fatalf("ExecContext should have timed out")
|
||||
}
|
||||
if hijack.connErr != context.DeadlineExceeded {
|
||||
t.Fatalf("(*Connector).Connect should have timed out")
|
||||
}
|
||||
}
|
2996
vendor/github.com/go-sql-driver/mysql/driver_test.go
generated
vendored
Normal file
2996
vendor/github.com/go-sql-driver/mysql/driver_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@ -10,11 +10,15 @@ package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@ -26,31 +30,122 @@ var (
|
||||
errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
|
||||
)
|
||||
|
||||
// Config is a configuration parsed from a DSN string
|
||||
// Config is a configuration parsed from a DSN string.
|
||||
// If a new Config is created instead of being parsed from a DSN string,
|
||||
// the NewConfig function should be used, which sets default values.
|
||||
type Config struct {
|
||||
User string // Username
|
||||
Passwd string // Password (requires User)
|
||||
Net string // Network type
|
||||
Addr string // Network address (requires Net)
|
||||
DBName string // Database name
|
||||
Params map[string]string // Connection parameters
|
||||
Collation string // Connection collation
|
||||
Loc *time.Location // Location for time.Time values
|
||||
TLSConfig string // TLS configuration name
|
||||
tls *tls.Config // TLS configuration
|
||||
Timeout time.Duration // Dial timeout
|
||||
ReadTimeout time.Duration // I/O read timeout
|
||||
WriteTimeout time.Duration // I/O write timeout
|
||||
User string // Username
|
||||
Passwd string // Password (requires User)
|
||||
Net string // Network type
|
||||
Addr string // Network address (requires Net)
|
||||
DBName string // Database name
|
||||
Params map[string]string // Connection parameters
|
||||
Collation string // Connection collation
|
||||
Loc *time.Location // Location for time.Time values
|
||||
MaxAllowedPacket int // Max packet size allowed
|
||||
ServerPubKey string // Server public key name
|
||||
pubKey *rsa.PublicKey // Server public key
|
||||
TLSConfig string // TLS configuration name
|
||||
tls *tls.Config // TLS configuration
|
||||
Timeout time.Duration // Dial timeout
|
||||
ReadTimeout time.Duration // I/O read timeout
|
||||
WriteTimeout time.Duration // I/O write timeout
|
||||
|
||||
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
|
||||
AllowCleartextPasswords bool // Allows the cleartext client side plugin
|
||||
AllowNativePasswords bool // Allows the native password authentication method
|
||||
AllowOldPasswords bool // Allows the old insecure password method
|
||||
ClientFoundRows bool // Return number of matching rows instead of rows changed
|
||||
ColumnsWithAlias bool // Prepend table alias to column names
|
||||
InterpolateParams bool // Interpolate placeholders into query string
|
||||
MultiStatements bool // Allow multiple statements in one query
|
||||
ParseTime bool // Parse time values to time.Time
|
||||
Strict bool // Return warnings as errors
|
||||
RejectReadOnly bool // Reject read-only connections
|
||||
}
|
||||
|
||||
// NewConfig creates a new Config and sets default values.
|
||||
func NewConfig() *Config {
|
||||
return &Config{
|
||||
Collation: defaultCollation,
|
||||
Loc: time.UTC,
|
||||
MaxAllowedPacket: defaultMaxAllowedPacket,
|
||||
AllowNativePasswords: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *Config) Clone() *Config {
|
||||
cp := *cfg
|
||||
if cp.tls != nil {
|
||||
cp.tls = cfg.tls.Clone()
|
||||
}
|
||||
if len(cp.Params) > 0 {
|
||||
cp.Params = make(map[string]string, len(cfg.Params))
|
||||
for k, v := range cfg.Params {
|
||||
cp.Params[k] = v
|
||||
}
|
||||
}
|
||||
if cfg.pubKey != nil {
|
||||
cp.pubKey = &rsa.PublicKey{
|
||||
N: new(big.Int).Set(cfg.pubKey.N),
|
||||
E: cfg.pubKey.E,
|
||||
}
|
||||
}
|
||||
return &cp
|
||||
}
|
||||
|
||||
func (cfg *Config) normalize() error {
|
||||
if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
|
||||
return errInvalidDSNUnsafeCollation
|
||||
}
|
||||
|
||||
// Set default network if empty
|
||||
if cfg.Net == "" {
|
||||
cfg.Net = "tcp"
|
||||
}
|
||||
|
||||
// Set default address if empty
|
||||
if cfg.Addr == "" {
|
||||
switch cfg.Net {
|
||||
case "tcp":
|
||||
cfg.Addr = "127.0.0.1:3306"
|
||||
case "unix":
|
||||
cfg.Addr = "/tmp/mysql.sock"
|
||||
default:
|
||||
return errors.New("default addr for network '" + cfg.Net + "' unknown")
|
||||
}
|
||||
} else if cfg.Net == "tcp" {
|
||||
cfg.Addr = ensureHavePort(cfg.Addr)
|
||||
}
|
||||
|
||||
switch cfg.TLSConfig {
|
||||
case "false", "":
|
||||
// don't set anything
|
||||
case "true":
|
||||
cfg.tls = &tls.Config{}
|
||||
case "skip-verify", "preferred":
|
||||
cfg.tls = &tls.Config{InsecureSkipVerify: true}
|
||||
default:
|
||||
cfg.tls = getTLSConfigClone(cfg.TLSConfig)
|
||||
if cfg.tls == nil {
|
||||
return errors.New("invalid value / unknown config name: " + cfg.TLSConfig)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
|
||||
host, _, err := net.SplitHostPort(cfg.Addr)
|
||||
if err == nil {
|
||||
cfg.tls.ServerName = host
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.ServerPubKey != "" {
|
||||
cfg.pubKey = getServerPubKey(cfg.ServerPubKey)
|
||||
if cfg.pubKey == nil {
|
||||
return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FormatDSN formats the given Config into a DSN string which can be passed to
|
||||
@ -99,6 +194,15 @@ func (cfg *Config) FormatDSN() string {
|
||||
}
|
||||
}
|
||||
|
||||
if !cfg.AllowNativePasswords {
|
||||
if hasParam {
|
||||
buf.WriteString("&allowNativePasswords=false")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?allowNativePasswords=false")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.AllowOldPasswords {
|
||||
if hasParam {
|
||||
buf.WriteString("&allowOldPasswords=true")
|
||||
@ -183,15 +287,25 @@ func (cfg *Config) FormatDSN() string {
|
||||
buf.WriteString(cfg.ReadTimeout.String())
|
||||
}
|
||||
|
||||
if cfg.Strict {
|
||||
if cfg.RejectReadOnly {
|
||||
if hasParam {
|
||||
buf.WriteString("&strict=true")
|
||||
buf.WriteString("&rejectReadOnly=true")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?strict=true")
|
||||
buf.WriteString("?rejectReadOnly=true")
|
||||
}
|
||||
}
|
||||
|
||||
if len(cfg.ServerPubKey) > 0 {
|
||||
if hasParam {
|
||||
buf.WriteString("&serverPubKey=")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?serverPubKey=")
|
||||
}
|
||||
buf.WriteString(url.QueryEscape(cfg.ServerPubKey))
|
||||
}
|
||||
|
||||
if cfg.Timeout > 0 {
|
||||
if hasParam {
|
||||
buf.WriteString("&timeout=")
|
||||
@ -222,9 +336,25 @@ func (cfg *Config) FormatDSN() string {
|
||||
buf.WriteString(cfg.WriteTimeout.String())
|
||||
}
|
||||
|
||||
if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
|
||||
if hasParam {
|
||||
buf.WriteString("&maxAllowedPacket=")
|
||||
} else {
|
||||
hasParam = true
|
||||
buf.WriteString("?maxAllowedPacket=")
|
||||
}
|
||||
buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
|
||||
|
||||
}
|
||||
|
||||
// other params
|
||||
if cfg.Params != nil {
|
||||
for param, value := range cfg.Params {
|
||||
var params []string
|
||||
for param := range cfg.Params {
|
||||
params = append(params, param)
|
||||
}
|
||||
sort.Strings(params)
|
||||
for _, param := range params {
|
||||
if hasParam {
|
||||
buf.WriteByte('&')
|
||||
} else {
|
||||
@ -234,7 +364,7 @@ func (cfg *Config) FormatDSN() string {
|
||||
|
||||
buf.WriteString(param)
|
||||
buf.WriteByte('=')
|
||||
buf.WriteString(url.QueryEscape(value))
|
||||
buf.WriteString(url.QueryEscape(cfg.Params[param]))
|
||||
}
|
||||
}
|
||||
|
||||
@ -244,10 +374,7 @@ func (cfg *Config) FormatDSN() string {
|
||||
// ParseDSN parses the DSN string to a Config
|
||||
func ParseDSN(dsn string) (cfg *Config, err error) {
|
||||
// New config with some default values
|
||||
cfg = &Config{
|
||||
Loc: time.UTC,
|
||||
Collation: defaultCollation,
|
||||
}
|
||||
cfg = NewConfig()
|
||||
|
||||
// [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
|
||||
// Find the last '/' (since the password or the net addr might contain a '/')
|
||||
@ -315,28 +442,9 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
|
||||
return nil, errInvalidDSNNoSlash
|
||||
}
|
||||
|
||||
if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
|
||||
return nil, errInvalidDSNUnsafeCollation
|
||||
if err = cfg.normalize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set default network if empty
|
||||
if cfg.Net == "" {
|
||||
cfg.Net = "tcp"
|
||||
}
|
||||
|
||||
// Set default address if empty
|
||||
if cfg.Addr == "" {
|
||||
switch cfg.Net {
|
||||
case "tcp":
|
||||
cfg.Addr = "127.0.0.1:3306"
|
||||
case "unix":
|
||||
cfg.Addr = "/tmp/mysql.sock"
|
||||
default:
|
||||
return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -351,7 +459,6 @@ func parseDSNParams(cfg *Config, params string) (err error) {
|
||||
|
||||
// cfg params
|
||||
switch value := param[1]; param[0] {
|
||||
|
||||
// Disable INFILE whitelist / enable all files
|
||||
case "allowAllFiles":
|
||||
var isBool bool
|
||||
@ -368,6 +475,14 @@ func parseDSNParams(cfg *Config, params string) (err error) {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Use native password authentication
|
||||
case "allowNativePasswords":
|
||||
var isBool bool
|
||||
cfg.AllowNativePasswords, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Use old authentication mode (pre MySQL 4.1)
|
||||
case "allowOldPasswords":
|
||||
var isBool bool
|
||||
@ -441,14 +556,26 @@ func parseDSNParams(cfg *Config, params string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Strict mode
|
||||
case "strict":
|
||||
// Reject read-only connections
|
||||
case "rejectReadOnly":
|
||||
var isBool bool
|
||||
cfg.Strict, isBool = readBool(value)
|
||||
cfg.RejectReadOnly, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return errors.New("invalid bool value: " + value)
|
||||
}
|
||||
|
||||
// Server public key
|
||||
case "serverPubKey":
|
||||
name, err := url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for server pub key name: %v", err)
|
||||
}
|
||||
cfg.ServerPubKey = name
|
||||
|
||||
// Strict mode
|
||||
case "strict":
|
||||
panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
|
||||
|
||||
// Dial Timeout
|
||||
case "timeout":
|
||||
cfg.Timeout, err = time.ParseDuration(value)
|
||||
@ -462,32 +589,17 @@ func parseDSNParams(cfg *Config, params string) (err error) {
|
||||
if isBool {
|
||||
if boolValue {
|
||||
cfg.TLSConfig = "true"
|
||||
cfg.tls = &tls.Config{}
|
||||
} else {
|
||||
cfg.TLSConfig = "false"
|
||||
}
|
||||
} else if vl := strings.ToLower(value); vl == "skip-verify" {
|
||||
} else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" {
|
||||
cfg.TLSConfig = vl
|
||||
cfg.tls = &tls.Config{InsecureSkipVerify: true}
|
||||
} else {
|
||||
name, err := url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for TLS config name: %v", err)
|
||||
}
|
||||
|
||||
if tlsConfig, ok := tlsConfigRegister[name]; ok {
|
||||
if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
|
||||
host, _, err := net.SplitHostPort(cfg.Addr)
|
||||
if err == nil {
|
||||
tlsConfig.ServerName = host
|
||||
}
|
||||
}
|
||||
|
||||
cfg.TLSConfig = name
|
||||
cfg.tls = tlsConfig
|
||||
} else {
|
||||
return errors.New("invalid value / unknown config name: " + name)
|
||||
}
|
||||
cfg.TLSConfig = name
|
||||
}
|
||||
|
||||
// I/O write Timeout
|
||||
@ -496,7 +608,11 @@ func parseDSNParams(cfg *Config, params string) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
case "maxAllowedPacket":
|
||||
cfg.MaxAllowedPacket, err = strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
// lazy init
|
||||
if cfg.Params == nil {
|
||||
@ -511,3 +627,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ensureHavePort(addr string) string {
|
||||
if _, _, err := net.SplitHostPort(addr); err != nil {
|
||||
return net.JoinHostPort(addr, "3306")
|
||||
}
|
||||
return addr
|
||||
}
|
415
vendor/github.com/go-sql-driver/mysql/dsn_test.go
generated
vendored
Normal file
415
vendor/github.com/go-sql-driver/mysql/dsn_test.go
generated
vendored
Normal file
@ -0,0 +1,415 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var testDSNs = []struct {
|
||||
in string
|
||||
out *Config
|
||||
}{{
|
||||
"username:password@protocol(address)/dbname?param=value",
|
||||
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true",
|
||||
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true},
|
||||
}, {
|
||||
"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true",
|
||||
&Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, ColumnsWithAlias: true, MultiStatements: true},
|
||||
}, {
|
||||
"user@unix(/path/to/socket)/dbname?charset=utf8",
|
||||
&Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true",
|
||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "true"},
|
||||
}, {
|
||||
"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify",
|
||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true, TLSConfig: "skip-verify"},
|
||||
}, {
|
||||
"user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216&tls=false&allowCleartextPasswords=true&parseTime=true&rejectReadOnly=true",
|
||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, TLSConfig: "false", AllowCleartextPasswords: true, AllowNativePasswords: true, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216, ParseTime: true, RejectReadOnly: true},
|
||||
}, {
|
||||
"user:password@/dbname?allowNativePasswords=false&maxAllowedPacket=0",
|
||||
&Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: 0, AllowNativePasswords: false},
|
||||
}, {
|
||||
"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local",
|
||||
&Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.Local, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"/dbname",
|
||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"@/",
|
||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"/",
|
||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"",
|
||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"user:p@/ssword@/",
|
||||
&Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"unix/?arg=%2Fsome%2Fpath.ext",
|
||||
&Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"tcp(127.0.0.1)/dbname",
|
||||
&Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
}, {
|
||||
"tcp(de:ad:be:ef::ca:fe)/dbname",
|
||||
&Config{Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:3306", DBName: "dbname", Collation: "utf8mb4_general_ci", Loc: time.UTC, MaxAllowedPacket: defaultMaxAllowedPacket, AllowNativePasswords: true},
|
||||
},
|
||||
}
|
||||
|
||||
func TestDSNParser(t *testing.T) {
|
||||
for i, tst := range testDSNs {
|
||||
cfg, err := ParseDSN(tst.in)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
|
||||
// pointer not static
|
||||
cfg.tls = nil
|
||||
|
||||
if !reflect.DeepEqual(cfg, tst.out) {
|
||||
t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNParserInvalid(t *testing.T) {
|
||||
var invalidDSNs = []string{
|
||||
"@net(addr/", // no closing brace
|
||||
"@tcp(/", // no closing brace
|
||||
"tcp(/", // no closing brace
|
||||
"(/", // no closing brace
|
||||
"net(addr)//", // unescaped
|
||||
"User:pass@tcp(1.2.3.4:3306)", // no trailing slash
|
||||
"net()/", // unknown default addr
|
||||
//"/dbname?arg=/some/unescaped/path",
|
||||
}
|
||||
|
||||
for i, tst := range invalidDSNs {
|
||||
if _, err := ParseDSN(tst); err == nil {
|
||||
t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNReformat(t *testing.T) {
|
||||
for i, tst := range testDSNs {
|
||||
dsn1 := tst.in
|
||||
cfg1, err := ParseDSN(dsn1)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
cfg1.tls = nil // pointer not static
|
||||
res1 := fmt.Sprintf("%+v", cfg1)
|
||||
|
||||
dsn2 := cfg1.FormatDSN()
|
||||
cfg2, err := ParseDSN(dsn2)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
continue
|
||||
}
|
||||
cfg2.tls = nil // pointer not static
|
||||
res2 := fmt.Sprintf("%+v", cfg2)
|
||||
|
||||
if res1 != res2 {
|
||||
t.Errorf("%d. %q does not match %q", i, res2, res1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNServerPubKey(t *testing.T) {
|
||||
baseDSN := "User:password@tcp(localhost:5555)/dbname?serverPubKey="
|
||||
|
||||
RegisterServerPubKey("testKey", testPubKeyRSA)
|
||||
defer DeregisterServerPubKey("testKey")
|
||||
|
||||
tst := baseDSN + "testKey"
|
||||
cfg, err := ParseDSN(tst)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
|
||||
if cfg.ServerPubKey != "testKey" {
|
||||
t.Errorf("unexpected cfg.ServerPubKey value: %v", cfg.ServerPubKey)
|
||||
}
|
||||
if cfg.pubKey != testPubKeyRSA {
|
||||
t.Error("pub key pointer doesn't match")
|
||||
}
|
||||
|
||||
// Key is missing
|
||||
tst = baseDSN + "invalid_name"
|
||||
cfg, err = ParseDSN(tst)
|
||||
if err == nil {
|
||||
t.Errorf("invalid name in DSN (%s) but did not error. Got config: %#v", tst, cfg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNServerPubKeyQueryEscape(t *testing.T) {
|
||||
const name = "&%!:"
|
||||
dsn := "User:password@tcp(localhost:5555)/dbname?serverPubKey=" + url.QueryEscape(name)
|
||||
|
||||
RegisterServerPubKey(name, testPubKeyRSA)
|
||||
defer DeregisterServerPubKey(name)
|
||||
|
||||
cfg, err := ParseDSN(dsn)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
|
||||
if cfg.pubKey != testPubKeyRSA {
|
||||
t.Error("pub key pointer doesn't match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNWithCustomTLS(t *testing.T) {
|
||||
baseDSN := "User:password@tcp(localhost:5555)/dbname?tls="
|
||||
tlsCfg := tls.Config{}
|
||||
|
||||
RegisterTLSConfig("utils_test", &tlsCfg)
|
||||
defer DeregisterTLSConfig("utils_test")
|
||||
|
||||
// Custom TLS is missing
|
||||
tst := baseDSN + "invalid_tls"
|
||||
cfg, err := ParseDSN(tst)
|
||||
if err == nil {
|
||||
t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg)
|
||||
}
|
||||
|
||||
tst = baseDSN + "utils_test"
|
||||
|
||||
// Custom TLS with a server name
|
||||
name := "foohost"
|
||||
tlsCfg.ServerName = name
|
||||
cfg, err = ParseDSN(tst)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
} else if cfg.tls.ServerName != name {
|
||||
t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst)
|
||||
}
|
||||
|
||||
// Custom TLS without a server name
|
||||
name = "localhost"
|
||||
tlsCfg.ServerName = ""
|
||||
cfg, err = ParseDSN(tst)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
} else if cfg.tls.ServerName != name {
|
||||
t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst)
|
||||
} else if tlsCfg.ServerName != "" {
|
||||
t.Errorf("tlsCfg was mutated ServerName (%s) should be empty parsing DSN (%s).", name, tst)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNTLSConfig(t *testing.T) {
|
||||
expectedServerName := "example.com"
|
||||
dsn := "tcp(example.com:1234)/?tls=true"
|
||||
|
||||
cfg, err := ParseDSN(dsn)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
if cfg.tls == nil {
|
||||
t.Error("cfg.tls should not be nil")
|
||||
}
|
||||
if cfg.tls.ServerName != expectedServerName {
|
||||
t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
|
||||
}
|
||||
|
||||
dsn = "tcp(example.com)/?tls=true"
|
||||
cfg, err = ParseDSN(dsn)
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
if cfg.tls == nil {
|
||||
t.Error("cfg.tls should not be nil")
|
||||
}
|
||||
if cfg.tls.ServerName != expectedServerName {
|
||||
t.Errorf("cfg.tls.ServerName should be %q, got %q (host without port)", expectedServerName, cfg.tls.ServerName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNWithCustomTLSQueryEscape(t *testing.T) {
|
||||
const configKey = "&%!:"
|
||||
dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey)
|
||||
name := "foohost"
|
||||
tlsCfg := tls.Config{ServerName: name}
|
||||
|
||||
RegisterTLSConfig(configKey, &tlsCfg)
|
||||
defer DeregisterTLSConfig(configKey)
|
||||
|
||||
cfg, err := ParseDSN(dsn)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
} else if cfg.tls.ServerName != name {
|
||||
t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSNUnsafeCollation(t *testing.T) {
|
||||
_, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true")
|
||||
if err != errInvalidDSNUnsafeCollation {
|
||||
t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=gbk_chinese_ci")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
|
||||
_, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true")
|
||||
if err != nil {
|
||||
t.Errorf("expected %v, got %v", nil, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamsAreSorted(t *testing.T) {
|
||||
expected := "/dbname?interpolateParams=true&foobar=baz&quux=loo"
|
||||
cfg := NewConfig()
|
||||
cfg.DBName = "dbname"
|
||||
cfg.InterpolateParams = true
|
||||
cfg.Params = map[string]string{
|
||||
"quux": "loo",
|
||||
"foobar": "baz",
|
||||
}
|
||||
actual := cfg.FormatDSN()
|
||||
if actual != expected {
|
||||
t.Errorf("generic Config.Params were not sorted: want %#v, got %#v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloneConfig(t *testing.T) {
|
||||
RegisterServerPubKey("testKey", testPubKeyRSA)
|
||||
defer DeregisterServerPubKey("testKey")
|
||||
|
||||
expectedServerName := "example.com"
|
||||
dsn := "tcp(example.com:1234)/?tls=true&foobar=baz&serverPubKey=testKey"
|
||||
cfg, err := ParseDSN(dsn)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
cfg2 := cfg.Clone()
|
||||
if cfg == cfg2 {
|
||||
t.Errorf("Config.Clone did not create a separate config struct")
|
||||
}
|
||||
|
||||
if cfg2.tls.ServerName != expectedServerName {
|
||||
t.Errorf("cfg.tls.ServerName should be %q, got %q (host with port)", expectedServerName, cfg.tls.ServerName)
|
||||
}
|
||||
|
||||
cfg2.tls.ServerName = "example2.com"
|
||||
if cfg.tls.ServerName == cfg2.tls.ServerName {
|
||||
t.Errorf("changed cfg.tls.Server name should not propagate to original Config")
|
||||
}
|
||||
|
||||
if _, ok := cfg2.Params["foobar"]; !ok {
|
||||
t.Errorf("cloned Config is missing custom params")
|
||||
}
|
||||
|
||||
delete(cfg2.Params, "foobar")
|
||||
|
||||
if _, ok := cfg.Params["foobar"]; !ok {
|
||||
t.Errorf("custom params in cloned Config should not propagate to original Config")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cfg.pubKey, cfg2.pubKey) {
|
||||
t.Errorf("public key in Config should be identical")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeTLSConfig(t *testing.T) {
|
||||
tt := []struct {
|
||||
tlsConfig string
|
||||
want *tls.Config
|
||||
}{
|
||||
{"", nil},
|
||||
{"false", nil},
|
||||
{"true", &tls.Config{ServerName: "myserver"}},
|
||||
{"skip-verify", &tls.Config{InsecureSkipVerify: true}},
|
||||
{"preferred", &tls.Config{InsecureSkipVerify: true}},
|
||||
{"test_tls_config", &tls.Config{ServerName: "myServerName"}},
|
||||
}
|
||||
|
||||
RegisterTLSConfig("test_tls_config", &tls.Config{ServerName: "myServerName"})
|
||||
defer func() { DeregisterTLSConfig("test_tls_config") }()
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.tlsConfig, func(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Addr: "myserver:3306",
|
||||
TLSConfig: tc.tlsConfig,
|
||||
}
|
||||
|
||||
cfg.normalize()
|
||||
|
||||
if cfg.tls == nil {
|
||||
if tc.want != nil {
|
||||
t.Fatal("wanted a tls config but got nil instead")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.tls.ServerName != tc.want.ServerName {
|
||||
t.Errorf("tls.ServerName doesn't match (want: '%s', got: '%s')",
|
||||
tc.want.ServerName, cfg.tls.ServerName)
|
||||
}
|
||||
if cfg.tls.InsecureSkipVerify != tc.want.InsecureSkipVerify {
|
||||
t.Errorf("tls.InsecureSkipVerify doesn't match (want: %T, got :%T)",
|
||||
tc.want.InsecureSkipVerify, cfg.tls.InsecureSkipVerify)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseDSN(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, tst := range testDSNs {
|
||||
if _, err := ParseDSN(tst.in); err != nil {
|
||||
b.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
104
vendor/github.com/go-sql-driver/mysql/errors.go
generated
vendored
104
vendor/github.com/go-sql-driver/mysql/errors.go
generated
vendored
@ -9,30 +9,35 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Various errors the driver might return. Can change between driver versions.
|
||||
var (
|
||||
ErrInvalidConn = errors.New("Invalid Connection")
|
||||
ErrMalformPkt = errors.New("Malformed Packet")
|
||||
ErrNoTLS = errors.New("TLS encryption requested but server does not support TLS")
|
||||
ErrOldPassword = errors.New("This user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
|
||||
ErrCleartextPassword = errors.New("This user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN.")
|
||||
ErrUnknownPlugin = errors.New("The authentication plugin is not supported.")
|
||||
ErrOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+")
|
||||
ErrPktSync = errors.New("Commands out of sync. You can't run this command now")
|
||||
ErrPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?")
|
||||
ErrPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.")
|
||||
ErrBusyBuffer = errors.New("Busy buffer")
|
||||
ErrInvalidConn = errors.New("invalid connection")
|
||||
ErrMalformPkt = errors.New("malformed packet")
|
||||
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
|
||||
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
|
||||
ErrNativePassword = errors.New("this user requires mysql native password authentication.")
|
||||
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
|
||||
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
|
||||
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
|
||||
ErrPktSync = errors.New("commands out of sync. You can't run this command now")
|
||||
ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
|
||||
ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
|
||||
ErrBusyBuffer = errors.New("busy buffer")
|
||||
|
||||
// errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
|
||||
// If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
|
||||
// to trigger a resend.
|
||||
// See https://github.com/go-sql-driver/mysql/pull/302
|
||||
errBadConnNoWrite = errors.New("bad connection")
|
||||
)
|
||||
|
||||
var errLog Logger = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile)
|
||||
var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
|
||||
|
||||
// Logger is used to log critical error messages.
|
||||
type Logger interface {
|
||||
@ -58,74 +63,3 @@ type MySQLError struct {
|
||||
func (me *MySQLError) Error() string {
|
||||
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
|
||||
}
|
||||
|
||||
// MySQLWarnings is an error type which represents a group of one or more MySQL
|
||||
// warnings
|
||||
type MySQLWarnings []MySQLWarning
|
||||
|
||||
func (mws MySQLWarnings) Error() string {
|
||||
var msg string
|
||||
for i, warning := range mws {
|
||||
if i > 0 {
|
||||
msg += "\r\n"
|
||||
}
|
||||
msg += fmt.Sprintf(
|
||||
"%s %s: %s",
|
||||
warning.Level,
|
||||
warning.Code,
|
||||
warning.Message,
|
||||
)
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// MySQLWarning is an error type which represents a single MySQL warning.
|
||||
// Warnings are returned in groups only. See MySQLWarnings
|
||||
type MySQLWarning struct {
|
||||
Level string
|
||||
Code string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) getWarnings() (err error) {
|
||||
rows, err := mc.Query("SHOW WARNINGS", nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var warnings = MySQLWarnings{}
|
||||
var values = make([]driver.Value, 3)
|
||||
|
||||
for {
|
||||
err = rows.Next(values)
|
||||
switch err {
|
||||
case nil:
|
||||
warning := MySQLWarning{}
|
||||
|
||||
if raw, ok := values[0].([]byte); ok {
|
||||
warning.Level = string(raw)
|
||||
} else {
|
||||
warning.Level = fmt.Sprintf("%s", values[0])
|
||||
}
|
||||
if raw, ok := values[1].([]byte); ok {
|
||||
warning.Code = string(raw)
|
||||
} else {
|
||||
warning.Code = fmt.Sprintf("%s", values[1])
|
||||
}
|
||||
if raw, ok := values[2].([]byte); ok {
|
||||
warning.Message = string(raw)
|
||||
} else {
|
||||
warning.Message = fmt.Sprintf("%s", values[0])
|
||||
}
|
||||
|
||||
warnings = append(warnings, warning)
|
||||
|
||||
case io.EOF:
|
||||
return warnings
|
||||
|
||||
default:
|
||||
rows.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
42
vendor/github.com/go-sql-driver/mysql/errors_test.go
generated
vendored
Normal file
42
vendor/github.com/go-sql-driver/mysql/errors_test.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestErrorsSetLogger(t *testing.T) {
|
||||
previous := errLog
|
||||
defer func() {
|
||||
errLog = previous
|
||||
}()
|
||||
|
||||
// set up logger
|
||||
const expected = "prefix: test\n"
|
||||
buffer := bytes.NewBuffer(make([]byte, 0, 64))
|
||||
logger := log.New(buffer, "prefix: ", 0)
|
||||
|
||||
// print
|
||||
SetLogger(logger)
|
||||
errLog.Print("test")
|
||||
|
||||
// check result
|
||||
if actual := buffer.String(); actual != expected {
|
||||
t.Errorf("expected %q, got %q", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorsStrictIgnoreNotes(t *testing.T) {
|
||||
runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) {
|
||||
dbt.mustExec("DROP TABLE IF EXISTS does_not_exist")
|
||||
})
|
||||
}
|
194
vendor/github.com/go-sql-driver/mysql/fields.go
generated
vendored
Normal file
194
vendor/github.com/go-sql-driver/mysql/fields.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func (mf *mysqlField) typeDatabaseName() string {
|
||||
switch mf.fieldType {
|
||||
case fieldTypeBit:
|
||||
return "BIT"
|
||||
case fieldTypeBLOB:
|
||||
if mf.charSet != collations[binaryCollation] {
|
||||
return "TEXT"
|
||||
}
|
||||
return "BLOB"
|
||||
case fieldTypeDate:
|
||||
return "DATE"
|
||||
case fieldTypeDateTime:
|
||||
return "DATETIME"
|
||||
case fieldTypeDecimal:
|
||||
return "DECIMAL"
|
||||
case fieldTypeDouble:
|
||||
return "DOUBLE"
|
||||
case fieldTypeEnum:
|
||||
return "ENUM"
|
||||
case fieldTypeFloat:
|
||||
return "FLOAT"
|
||||
case fieldTypeGeometry:
|
||||
return "GEOMETRY"
|
||||
case fieldTypeInt24:
|
||||
return "MEDIUMINT"
|
||||
case fieldTypeJSON:
|
||||
return "JSON"
|
||||
case fieldTypeLong:
|
||||
return "INT"
|
||||
case fieldTypeLongBLOB:
|
||||
if mf.charSet != collations[binaryCollation] {
|
||||
return "LONGTEXT"
|
||||
}
|
||||
return "LONGBLOB"
|
||||
case fieldTypeLongLong:
|
||||
return "BIGINT"
|
||||
case fieldTypeMediumBLOB:
|
||||
if mf.charSet != collations[binaryCollation] {
|
||||
return "MEDIUMTEXT"
|
||||
}
|
||||
return "MEDIUMBLOB"
|
||||
case fieldTypeNewDate:
|
||||
return "DATE"
|
||||
case fieldTypeNewDecimal:
|
||||
return "DECIMAL"
|
||||
case fieldTypeNULL:
|
||||
return "NULL"
|
||||
case fieldTypeSet:
|
||||
return "SET"
|
||||
case fieldTypeShort:
|
||||
return "SMALLINT"
|
||||
case fieldTypeString:
|
||||
if mf.charSet == collations[binaryCollation] {
|
||||
return "BINARY"
|
||||
}
|
||||
return "CHAR"
|
||||
case fieldTypeTime:
|
||||
return "TIME"
|
||||
case fieldTypeTimestamp:
|
||||
return "TIMESTAMP"
|
||||
case fieldTypeTiny:
|
||||
return "TINYINT"
|
||||
case fieldTypeTinyBLOB:
|
||||
if mf.charSet != collations[binaryCollation] {
|
||||
return "TINYTEXT"
|
||||
}
|
||||
return "TINYBLOB"
|
||||
case fieldTypeVarChar:
|
||||
if mf.charSet == collations[binaryCollation] {
|
||||
return "VARBINARY"
|
||||
}
|
||||
return "VARCHAR"
|
||||
case fieldTypeVarString:
|
||||
if mf.charSet == collations[binaryCollation] {
|
||||
return "VARBINARY"
|
||||
}
|
||||
return "VARCHAR"
|
||||
case fieldTypeYear:
|
||||
return "YEAR"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
scanTypeFloat32 = reflect.TypeOf(float32(0))
|
||||
scanTypeFloat64 = reflect.TypeOf(float64(0))
|
||||
scanTypeInt8 = reflect.TypeOf(int8(0))
|
||||
scanTypeInt16 = reflect.TypeOf(int16(0))
|
||||
scanTypeInt32 = reflect.TypeOf(int32(0))
|
||||
scanTypeInt64 = reflect.TypeOf(int64(0))
|
||||
scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
|
||||
scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
|
||||
scanTypeNullTime = reflect.TypeOf(NullTime{})
|
||||
scanTypeUint8 = reflect.TypeOf(uint8(0))
|
||||
scanTypeUint16 = reflect.TypeOf(uint16(0))
|
||||
scanTypeUint32 = reflect.TypeOf(uint32(0))
|
||||
scanTypeUint64 = reflect.TypeOf(uint64(0))
|
||||
scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
|
||||
scanTypeUnknown = reflect.TypeOf(new(interface{}))
|
||||
)
|
||||
|
||||
type mysqlField struct {
|
||||
tableName string
|
||||
name string
|
||||
length uint32
|
||||
flags fieldFlag
|
||||
fieldType fieldType
|
||||
decimals byte
|
||||
charSet uint8
|
||||
}
|
||||
|
||||
func (mf *mysqlField) scanType() reflect.Type {
|
||||
switch mf.fieldType {
|
||||
case fieldTypeTiny:
|
||||
if mf.flags&flagNotNULL != 0 {
|
||||
if mf.flags&flagUnsigned != 0 {
|
||||
return scanTypeUint8
|
||||
}
|
||||
return scanTypeInt8
|
||||
}
|
||||
return scanTypeNullInt
|
||||
|
||||
case fieldTypeShort, fieldTypeYear:
|
||||
if mf.flags&flagNotNULL != 0 {
|
||||
if mf.flags&flagUnsigned != 0 {
|
||||
return scanTypeUint16
|
||||
}
|
||||
return scanTypeInt16
|
||||
}
|
||||
return scanTypeNullInt
|
||||
|
||||
case fieldTypeInt24, fieldTypeLong:
|
||||
if mf.flags&flagNotNULL != 0 {
|
||||
if mf.flags&flagUnsigned != 0 {
|
||||
return scanTypeUint32
|
||||
}
|
||||
return scanTypeInt32
|
||||
}
|
||||
return scanTypeNullInt
|
||||
|
||||
case fieldTypeLongLong:
|
||||
if mf.flags&flagNotNULL != 0 {
|
||||
if mf.flags&flagUnsigned != 0 {
|
||||
return scanTypeUint64
|
||||
}
|
||||
return scanTypeInt64
|
||||
}
|
||||
return scanTypeNullInt
|
||||
|
||||
case fieldTypeFloat:
|
||||
if mf.flags&flagNotNULL != 0 {
|
||||
return scanTypeFloat32
|
||||
}
|
||||
return scanTypeNullFloat
|
||||
|
||||
case fieldTypeDouble:
|
||||
if mf.flags&flagNotNULL != 0 {
|
||||
return scanTypeFloat64
|
||||
}
|
||||
return scanTypeNullFloat
|
||||
|
||||
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
|
||||
fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
|
||||
fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
|
||||
fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
|
||||
fieldTypeTime:
|
||||
return scanTypeRawBytes
|
||||
|
||||
case fieldTypeDate, fieldTypeNewDate,
|
||||
fieldTypeTimestamp, fieldTypeDateTime:
|
||||
// NullTime is always returned for more consistent behavior as it can
|
||||
// handle both cases of parseTime regardless if the field is nullable.
|
||||
return scanTypeNullTime
|
||||
|
||||
default:
|
||||
return scanTypeUnknown
|
||||
}
|
||||
}
|
26
vendor/github.com/go-sql-driver/mysql/infile.go
generated
vendored
26
vendor/github.com/go-sql-driver/mysql/infile.go
generated
vendored
@ -96,6 +96,10 @@ func deferredClose(err *error, closer io.Closer) {
|
||||
func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
|
||||
var rdr io.Reader
|
||||
var data []byte
|
||||
packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
|
||||
if mc.maxWriteSize < packetSize {
|
||||
packetSize = mc.maxWriteSize
|
||||
}
|
||||
|
||||
if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
|
||||
// The server might return an an absolute path. See issue #355.
|
||||
@ -108,8 +112,6 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
|
||||
if inMap {
|
||||
rdr = handler()
|
||||
if rdr != nil {
|
||||
data = make([]byte, 4+mc.maxWriteSize)
|
||||
|
||||
if cl, ok := rdr.(io.Closer); ok {
|
||||
defer deferredClose(&err, cl)
|
||||
}
|
||||
@ -124,7 +126,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
|
||||
fileRegisterLock.RLock()
|
||||
fr := fileRegister[name]
|
||||
fileRegisterLock.RUnlock()
|
||||
if mc.cfg.allowAllFiles || fr {
|
||||
if mc.cfg.AllowAllFiles || fr {
|
||||
var file *os.File
|
||||
var fi os.FileInfo
|
||||
|
||||
@ -134,22 +136,20 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
|
||||
// get file size
|
||||
if fi, err = file.Stat(); err == nil {
|
||||
rdr = file
|
||||
if fileSize := int(fi.Size()); fileSize <= mc.maxWriteSize {
|
||||
data = make([]byte, 4+fileSize)
|
||||
} else if fileSize <= mc.maxPacketAllowed {
|
||||
data = make([]byte, 4+mc.maxWriteSize)
|
||||
} else {
|
||||
err = fmt.Errorf("Local File '%s' too large: Size: %d, Max: %d", name, fileSize, mc.maxPacketAllowed)
|
||||
if fileSize := int(fi.Size()); fileSize < packetSize {
|
||||
packetSize = fileSize
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name)
|
||||
err = fmt.Errorf("local file '%s' is not registered", name)
|
||||
}
|
||||
}
|
||||
|
||||
// send content packets
|
||||
if err == nil {
|
||||
// if packetSize == 0, the Reader contains no data
|
||||
if err == nil && packetSize > 0 {
|
||||
data := make([]byte, 4+packetSize)
|
||||
var n int
|
||||
for err == nil {
|
||||
n, err = rdr.Read(data[4:])
|
||||
@ -175,8 +175,8 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
|
||||
// read OK packet
|
||||
if err == nil {
|
||||
return mc.readResultOK()
|
||||
} else {
|
||||
mc.readPacket()
|
||||
}
|
||||
|
||||
mc.readPacket()
|
||||
return err
|
||||
}
|
||||
|
614
vendor/github.com/go-sql-driver/mysql/packets.go
generated
vendored
614
vendor/github.com/go-sql-driver/mysql/packets.go
generated
vendored
File diff suppressed because it is too large
Load Diff
336
vendor/github.com/go-sql-driver/mysql/packets_test.go
generated
vendored
Normal file
336
vendor/github.com/go-sql-driver/mysql/packets_test.go
generated
vendored
Normal file
@ -0,0 +1,336 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
errConnClosed = errors.New("connection is closed")
|
||||
errConnTooManyReads = errors.New("too many reads")
|
||||
errConnTooManyWrites = errors.New("too many writes")
|
||||
)
|
||||
|
||||
// struct to mock a net.Conn for testing purposes
|
||||
type mockConn struct {
|
||||
laddr net.Addr
|
||||
raddr net.Addr
|
||||
data []byte
|
||||
written []byte
|
||||
queuedReplies [][]byte
|
||||
closed bool
|
||||
read int
|
||||
reads int
|
||||
writes int
|
||||
maxReads int
|
||||
maxWrites int
|
||||
}
|
||||
|
||||
func (m *mockConn) Read(b []byte) (n int, err error) {
|
||||
if m.closed {
|
||||
return 0, errConnClosed
|
||||
}
|
||||
|
||||
m.reads++
|
||||
if m.maxReads > 0 && m.reads > m.maxReads {
|
||||
return 0, errConnTooManyReads
|
||||
}
|
||||
|
||||
n = copy(b, m.data)
|
||||
m.read += n
|
||||
m.data = m.data[n:]
|
||||
return
|
||||
}
|
||||
func (m *mockConn) Write(b []byte) (n int, err error) {
|
||||
if m.closed {
|
||||
return 0, errConnClosed
|
||||
}
|
||||
|
||||
m.writes++
|
||||
if m.maxWrites > 0 && m.writes > m.maxWrites {
|
||||
return 0, errConnTooManyWrites
|
||||
}
|
||||
|
||||
n = len(b)
|
||||
m.written = append(m.written, b...)
|
||||
|
||||
if n > 0 && len(m.queuedReplies) > 0 {
|
||||
m.data = m.queuedReplies[0]
|
||||
m.queuedReplies = m.queuedReplies[1:]
|
||||
}
|
||||
return
|
||||
}
|
||||
func (m *mockConn) Close() error {
|
||||
m.closed = true
|
||||
return nil
|
||||
}
|
||||
func (m *mockConn) LocalAddr() net.Addr {
|
||||
return m.laddr
|
||||
}
|
||||
func (m *mockConn) RemoteAddr() net.Addr {
|
||||
return m.raddr
|
||||
}
|
||||
func (m *mockConn) SetDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockConn) SetReadDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockConn) SetWriteDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// make sure mockConn implements the net.Conn interface
|
||||
var _ net.Conn = new(mockConn)
|
||||
|
||||
func newRWMockConn(sequence uint8) (*mockConn, *mysqlConn) {
|
||||
conn := new(mockConn)
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(conn),
|
||||
cfg: NewConfig(),
|
||||
netConn: conn,
|
||||
closech: make(chan struct{}),
|
||||
maxAllowedPacket: defaultMaxAllowedPacket,
|
||||
sequence: sequence,
|
||||
}
|
||||
return conn, mc
|
||||
}
|
||||
|
||||
func TestReadPacketSingleByte(t *testing.T) {
|
||||
conn := new(mockConn)
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(conn),
|
||||
}
|
||||
|
||||
conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
|
||||
conn.maxReads = 1
|
||||
packet, err := mc.readPacket()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(packet) != 1 {
|
||||
t.Fatalf("unexpected packet length: expected %d, got %d", 1, len(packet))
|
||||
}
|
||||
if packet[0] != 0xff {
|
||||
t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadPacketWrongSequenceID(t *testing.T) {
|
||||
conn := new(mockConn)
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(conn),
|
||||
}
|
||||
|
||||
// too low sequence id
|
||||
conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff}
|
||||
conn.maxReads = 1
|
||||
mc.sequence = 1
|
||||
_, err := mc.readPacket()
|
||||
if err != ErrPktSync {
|
||||
t.Errorf("expected ErrPktSync, got %v", err)
|
||||
}
|
||||
|
||||
// reset
|
||||
conn.reads = 0
|
||||
mc.sequence = 0
|
||||
mc.buf = newBuffer(conn)
|
||||
|
||||
// too high sequence id
|
||||
conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff}
|
||||
_, err = mc.readPacket()
|
||||
if err != ErrPktSyncMul {
|
||||
t.Errorf("expected ErrPktSyncMul, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadPacketSplit(t *testing.T) {
|
||||
conn := new(mockConn)
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(conn),
|
||||
}
|
||||
|
||||
data := make([]byte, maxPacketSize*2+4*3)
|
||||
const pkt2ofs = maxPacketSize + 4
|
||||
const pkt3ofs = 2 * (maxPacketSize + 4)
|
||||
|
||||
// case 1: payload has length maxPacketSize
|
||||
data = data[:pkt2ofs+4]
|
||||
|
||||
// 1st packet has maxPacketSize length and sequence id 0
|
||||
// ff ff ff 00 ...
|
||||
data[0] = 0xff
|
||||
data[1] = 0xff
|
||||
data[2] = 0xff
|
||||
|
||||
// mark the payload start and end of 1st packet so that we can check if the
|
||||
// content was correctly appended
|
||||
data[4] = 0x11
|
||||
data[maxPacketSize+3] = 0x22
|
||||
|
||||
// 2nd packet has payload length 0 and squence id 1
|
||||
// 00 00 00 01
|
||||
data[pkt2ofs+3] = 0x01
|
||||
|
||||
conn.data = data
|
||||
conn.maxReads = 3
|
||||
packet, err := mc.readPacket()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(packet) != maxPacketSize {
|
||||
t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize, len(packet))
|
||||
}
|
||||
if packet[0] != 0x11 {
|
||||
t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
|
||||
}
|
||||
if packet[maxPacketSize-1] != 0x22 {
|
||||
t.Fatalf("unexpected payload end: expected %x, got %x", 0x22, packet[maxPacketSize-1])
|
||||
}
|
||||
|
||||
// case 2: payload has length which is a multiple of maxPacketSize
|
||||
data = data[:cap(data)]
|
||||
|
||||
// 2nd packet now has maxPacketSize length
|
||||
data[pkt2ofs] = 0xff
|
||||
data[pkt2ofs+1] = 0xff
|
||||
data[pkt2ofs+2] = 0xff
|
||||
|
||||
// mark the payload start and end of the 2nd packet
|
||||
data[pkt2ofs+4] = 0x33
|
||||
data[pkt2ofs+maxPacketSize+3] = 0x44
|
||||
|
||||
// 3rd packet has payload length 0 and squence id 2
|
||||
// 00 00 00 02
|
||||
data[pkt3ofs+3] = 0x02
|
||||
|
||||
conn.data = data
|
||||
conn.reads = 0
|
||||
conn.maxReads = 5
|
||||
mc.sequence = 0
|
||||
packet, err = mc.readPacket()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(packet) != 2*maxPacketSize {
|
||||
t.Fatalf("unexpected packet length: expected %d, got %d", 2*maxPacketSize, len(packet))
|
||||
}
|
||||
if packet[0] != 0x11 {
|
||||
t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
|
||||
}
|
||||
if packet[2*maxPacketSize-1] != 0x44 {
|
||||
t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[2*maxPacketSize-1])
|
||||
}
|
||||
|
||||
// case 3: payload has a length larger maxPacketSize, which is not an exact
|
||||
// multiple of it
|
||||
data = data[:pkt2ofs+4+42]
|
||||
data[pkt2ofs] = 0x2a
|
||||
data[pkt2ofs+1] = 0x00
|
||||
data[pkt2ofs+2] = 0x00
|
||||
data[pkt2ofs+4+41] = 0x44
|
||||
|
||||
conn.data = data
|
||||
conn.reads = 0
|
||||
conn.maxReads = 4
|
||||
mc.sequence = 0
|
||||
packet, err = mc.readPacket()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(packet) != maxPacketSize+42 {
|
||||
t.Fatalf("unexpected packet length: expected %d, got %d", maxPacketSize+42, len(packet))
|
||||
}
|
||||
if packet[0] != 0x11 {
|
||||
t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0])
|
||||
}
|
||||
if packet[maxPacketSize+41] != 0x44 {
|
||||
t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[maxPacketSize+41])
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadPacketFail(t *testing.T) {
|
||||
conn := new(mockConn)
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(conn),
|
||||
closech: make(chan struct{}),
|
||||
}
|
||||
|
||||
// illegal empty (stand-alone) packet
|
||||
conn.data = []byte{0x00, 0x00, 0x00, 0x00}
|
||||
conn.maxReads = 1
|
||||
_, err := mc.readPacket()
|
||||
if err != ErrInvalidConn {
|
||||
t.Errorf("expected ErrInvalidConn, got %v", err)
|
||||
}
|
||||
|
||||
// reset
|
||||
conn.reads = 0
|
||||
mc.sequence = 0
|
||||
mc.buf = newBuffer(conn)
|
||||
|
||||
// fail to read header
|
||||
conn.closed = true
|
||||
_, err = mc.readPacket()
|
||||
if err != ErrInvalidConn {
|
||||
t.Errorf("expected ErrInvalidConn, got %v", err)
|
||||
}
|
||||
|
||||
// reset
|
||||
conn.closed = false
|
||||
conn.reads = 0
|
||||
mc.sequence = 0
|
||||
mc.buf = newBuffer(conn)
|
||||
|
||||
// fail to read body
|
||||
conn.maxReads = 1
|
||||
_, err = mc.readPacket()
|
||||
if err != ErrInvalidConn {
|
||||
t.Errorf("expected ErrInvalidConn, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/go-sql-driver/mysql/pull/801
|
||||
// not-NUL terminated plugin_name in init packet
|
||||
func TestRegression801(t *testing.T) {
|
||||
conn := new(mockConn)
|
||||
mc := &mysqlConn{
|
||||
buf: newBuffer(conn),
|
||||
cfg: new(Config),
|
||||
sequence: 42,
|
||||
closech: make(chan struct{}),
|
||||
}
|
||||
|
||||
conn.data = []byte{72, 0, 0, 42, 10, 53, 46, 53, 46, 56, 0, 165, 0, 0, 0,
|
||||
60, 70, 63, 58, 68, 104, 34, 97, 0, 223, 247, 33, 2, 0, 15, 128, 21, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 98, 120, 114, 47, 85, 75, 109, 99, 51, 77,
|
||||
50, 64, 0, 109, 121, 115, 113, 108, 95, 110, 97, 116, 105, 118, 101, 95,
|
||||
112, 97, 115, 115, 119, 111, 114, 100}
|
||||
conn.maxReads = 1
|
||||
|
||||
authData, pluginName, err := mc.readHandshakePacket()
|
||||
if err != nil {
|
||||
t.Fatalf("got error: %v", err)
|
||||
}
|
||||
|
||||
if pluginName != "mysql_native_password" {
|
||||
t.Errorf("expected plugin name 'mysql_native_password', got '%s'", pluginName)
|
||||
}
|
||||
|
||||
expectedAuthData := []byte{60, 70, 63, 58, 68, 104, 34, 97, 98, 120, 114,
|
||||
47, 85, 75, 109, 99, 51, 77, 50, 64}
|
||||
if !bytes.Equal(authData, expectedAuthData) {
|
||||
t.Errorf("expected authData '%v', got '%v'", expectedAuthData, authData)
|
||||
}
|
||||
}
|
191
vendor/github.com/go-sql-driver/mysql/rows.go
generated
vendored
191
vendor/github.com/go-sql-driver/mysql/rows.go
generated
vendored
@ -11,19 +11,20 @@ package mysql
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type mysqlField struct {
|
||||
tableName string
|
||||
name string
|
||||
flags fieldFlag
|
||||
fieldType byte
|
||||
decimals byte
|
||||
type resultSet struct {
|
||||
columns []mysqlField
|
||||
columnNames []string
|
||||
done bool
|
||||
}
|
||||
|
||||
type mysqlRows struct {
|
||||
mc *mysqlConn
|
||||
columns []mysqlField
|
||||
mc *mysqlConn
|
||||
rs resultSet
|
||||
finish func()
|
||||
}
|
||||
|
||||
type binaryRows struct {
|
||||
@ -34,45 +35,163 @@ type textRows struct {
|
||||
mysqlRows
|
||||
}
|
||||
|
||||
type emptyRows struct{}
|
||||
|
||||
func (rows *mysqlRows) Columns() []string {
|
||||
columns := make([]string, len(rows.columns))
|
||||
if rows.mc.cfg.columnsWithAlias {
|
||||
if rows.rs.columnNames != nil {
|
||||
return rows.rs.columnNames
|
||||
}
|
||||
|
||||
columns := make([]string, len(rows.rs.columns))
|
||||
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
|
||||
for i := range columns {
|
||||
if tableName := rows.columns[i].tableName; len(tableName) > 0 {
|
||||
columns[i] = tableName + "." + rows.columns[i].name
|
||||
if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
|
||||
columns[i] = tableName + "." + rows.rs.columns[i].name
|
||||
} else {
|
||||
columns[i] = rows.columns[i].name
|
||||
columns[i] = rows.rs.columns[i].name
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := range columns {
|
||||
columns[i] = rows.columns[i].name
|
||||
columns[i] = rows.rs.columns[i].name
|
||||
}
|
||||
}
|
||||
|
||||
rows.rs.columnNames = columns
|
||||
return columns
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) Close() error {
|
||||
func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
|
||||
return rows.rs.columns[i].typeDatabaseName()
|
||||
}
|
||||
|
||||
// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
|
||||
// return int64(rows.rs.columns[i].length), true
|
||||
// }
|
||||
|
||||
func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
|
||||
return rows.rs.columns[i].flags&flagNotNULL == 0, true
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
|
||||
column := rows.rs.columns[i]
|
||||
decimals := int64(column.decimals)
|
||||
|
||||
switch column.fieldType {
|
||||
case fieldTypeDecimal, fieldTypeNewDecimal:
|
||||
if decimals > 0 {
|
||||
return int64(column.length) - 2, decimals, true
|
||||
}
|
||||
return int64(column.length) - 1, decimals, true
|
||||
case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
|
||||
return decimals, decimals, true
|
||||
case fieldTypeFloat, fieldTypeDouble:
|
||||
if decimals == 0x1f {
|
||||
return math.MaxInt64, math.MaxInt64, true
|
||||
}
|
||||
return math.MaxInt64, decimals, true
|
||||
}
|
||||
|
||||
return 0, 0, false
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
|
||||
return rows.rs.columns[i].scanType()
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) Close() (err error) {
|
||||
if f := rows.finish; f != nil {
|
||||
f()
|
||||
rows.finish = nil
|
||||
}
|
||||
|
||||
mc := rows.mc
|
||||
if mc == nil {
|
||||
return nil
|
||||
}
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
if err := mc.error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// flip the buffer for this connection if we need to drain it.
|
||||
// note that for a successful query (i.e. one where rows.next()
|
||||
// has been called until it returns false), `rows.mc` will be nil
|
||||
// by the time the user calls `(*Rows).Close`, so we won't reach this
|
||||
// see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
|
||||
mc.buf.flip()
|
||||
|
||||
// Remove unread packets from stream
|
||||
if !rows.rs.done {
|
||||
err = mc.readUntilEOF()
|
||||
}
|
||||
if err == nil {
|
||||
if err = mc.discardResults(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rows.mc = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) HasNextResultSet() (b bool) {
|
||||
if rows.mc == nil {
|
||||
return false
|
||||
}
|
||||
return rows.mc.status&statusMoreResultsExists != 0
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) nextResultSet() (int, error) {
|
||||
if rows.mc == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if err := rows.mc.error(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Remove unread packets from stream
|
||||
err := mc.readUntilEOF()
|
||||
rows.mc = nil
|
||||
if !rows.rs.done {
|
||||
if err := rows.mc.readUntilEOF(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rows.rs.done = true
|
||||
}
|
||||
|
||||
if !rows.HasNextResultSet() {
|
||||
rows.mc = nil
|
||||
return 0, io.EOF
|
||||
}
|
||||
rows.rs = resultSet{}
|
||||
return rows.mc.readResultSetHeaderPacket()
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
|
||||
for {
|
||||
resLen, err := rows.nextResultSet()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if resLen > 0 {
|
||||
return resLen, nil
|
||||
}
|
||||
|
||||
rows.rs.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (rows *binaryRows) NextResultSet() error {
|
||||
resLen, err := rows.nextNotEmptyResultSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows.rs.columns, err = rows.mc.readColumns(resLen)
|
||||
return err
|
||||
}
|
||||
|
||||
func (rows *binaryRows) Next(dest []driver.Value) error {
|
||||
if mc := rows.mc; mc != nil {
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
if err := mc.error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Fetch next row from stream
|
||||
@ -81,10 +200,20 @@ func (rows *binaryRows) Next(dest []driver.Value) error {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
func (rows *textRows) NextResultSet() (err error) {
|
||||
resLen, err := rows.nextNotEmptyResultSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows.rs.columns, err = rows.mc.readColumns(resLen)
|
||||
return err
|
||||
}
|
||||
|
||||
func (rows *textRows) Next(dest []driver.Value) error {
|
||||
if mc := rows.mc; mc != nil {
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
if err := mc.error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Fetch next row from stream
|
||||
@ -92,15 +221,3 @@ func (rows *textRows) Next(dest []driver.Value) error {
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
func (rows emptyRows) Columns() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows emptyRows) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows emptyRows) Next(dest []driver.Value) error {
|
||||
return io.EOF
|
||||
}
|
||||
|
136
vendor/github.com/go-sql-driver/mysql/statement.go
generated
vendored
136
vendor/github.com/go-sql-driver/mysql/statement.go
generated
vendored
@ -11,20 +11,22 @@ package mysql
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type mysqlStmt struct {
|
||||
mc *mysqlConn
|
||||
id uint32
|
||||
paramCount int
|
||||
columns []mysqlField // cached from the first query
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Close() error {
|
||||
if stmt.mc == nil || stmt.mc.netConn == nil {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
if stmt.mc == nil || stmt.mc.closed.IsSet() {
|
||||
// driver.Stmt.Close can be called more than once, thus this function
|
||||
// has to be idempotent.
|
||||
// See also Issue #450 and golang/go#16019.
|
||||
//errLog.Print(ErrInvalidConn)
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
|
||||
@ -42,14 +44,14 @@ func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
if stmt.mc.netConn == nil {
|
||||
if stmt.mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
// Send command
|
||||
err := stmt.writeExecutePacket(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, stmt.mc.markBadConn(err)
|
||||
}
|
||||
|
||||
mc := stmt.mc
|
||||
@ -59,37 +61,45 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
|
||||
// Read Result
|
||||
resLen, err := mc.readResultSetHeaderPacket()
|
||||
if err == nil {
|
||||
if resLen > 0 {
|
||||
// Columns
|
||||
err = mc.readUntilEOF()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Rows
|
||||
err = mc.readUntilEOF()
|
||||
if resLen > 0 {
|
||||
// Columns
|
||||
if err = mc.readUntilEOF(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil {
|
||||
return &mysqlResult{
|
||||
affectedRows: int64(mc.affectedRows),
|
||||
insertId: int64(mc.insertId),
|
||||
}, nil
|
||||
|
||||
// Rows
|
||||
if err := mc.readUntilEOF(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
if err := mc.discardResults(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &mysqlResult{
|
||||
affectedRows: int64(mc.affectedRows),
|
||||
insertId: int64(mc.insertId),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
if stmt.mc.netConn == nil {
|
||||
return stmt.query(args)
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
|
||||
if stmt.mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
// Send command
|
||||
err := stmt.writeExecutePacket(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, stmt.mc.markBadConn(err)
|
||||
}
|
||||
|
||||
mc := stmt.mc
|
||||
@ -101,17 +111,18 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
}
|
||||
|
||||
rows := new(binaryRows)
|
||||
rows.mc = mc
|
||||
|
||||
if resLen > 0 {
|
||||
// Columns
|
||||
// If not cached, read them and cache them
|
||||
if stmt.columns == nil {
|
||||
rows.columns, err = mc.readColumns(resLen)
|
||||
stmt.columns = rows.columns
|
||||
} else {
|
||||
rows.columns = stmt.columns
|
||||
err = mc.readUntilEOF()
|
||||
rows.mc = mc
|
||||
rows.rs.columns, err = mc.readColumns(resLen)
|
||||
} else {
|
||||
rows.rs.done = true
|
||||
|
||||
switch err := rows.NextResultSet(); err {
|
||||
case nil, io.EOF:
|
||||
return rows, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -120,31 +131,74 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
|
||||
type converter struct{}
|
||||
|
||||
// ConvertValue mirrors the reference/default converter in database/sql/driver
|
||||
// with _one_ exception. We support uint64 with their high bit and the default
|
||||
// implementation does not. This function should be kept in sync with
|
||||
// database/sql/driver defaultConverter.ConvertValue() except for that
|
||||
// deliberate difference.
|
||||
func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
|
||||
if driver.IsValue(v) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
if vr, ok := v.(driver.Valuer); ok {
|
||||
sv, err := callValuerValue(vr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !driver.IsValue(sv) {
|
||||
return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
|
||||
}
|
||||
return sv, nil
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// indirect pointers
|
||||
if rv.IsNil() {
|
||||
return nil, nil
|
||||
} else {
|
||||
return c.ConvertValue(rv.Elem().Interface())
|
||||
}
|
||||
return c.ConvertValue(rv.Elem().Interface())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int(), nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return int64(rv.Uint()), nil
|
||||
case reflect.Uint64:
|
||||
u64 := rv.Uint()
|
||||
if u64 >= 1<<63 {
|
||||
return strconv.FormatUint(u64, 10), nil
|
||||
}
|
||||
return int64(u64), nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return rv.Uint(), nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float(), nil
|
||||
case reflect.Bool:
|
||||
return rv.Bool(), nil
|
||||
case reflect.Slice:
|
||||
ek := rv.Type().Elem().Kind()
|
||||
if ek == reflect.Uint8 {
|
||||
return rv.Bytes(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
|
||||
case reflect.String:
|
||||
return rv.String(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
|
||||
}
|
||||
|
||||
var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
|
||||
|
||||
// callValuerValue returns vr.Value(), with one exception:
|
||||
// If vr.Value is an auto-generated method on a pointer type and the
|
||||
// pointer is nil, it would panic at runtime in the panicwrap
|
||||
// method. Treat it like nil instead.
|
||||
//
|
||||
// This is so people can implement driver.Value on value types and
|
||||
// still use nil pointers to those types to mean nil/NULL, just like
|
||||
// string/*string.
|
||||
//
|
||||
// This is an exact copy of the same-named unexported function from the
|
||||
// database/sql package.
|
||||
func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
|
||||
if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
|
||||
rv.IsNil() &&
|
||||
rv.Type().Elem().Implements(valuerReflectType) {
|
||||
return nil, nil
|
||||
}
|
||||
return vr.Value()
|
||||
}
|
||||
|
126
vendor/github.com/go-sql-driver/mysql/statement_test.go
generated
vendored
Normal file
126
vendor/github.com/go-sql-driver/mysql/statement_test.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConvertDerivedString(t *testing.T) {
|
||||
type derived string
|
||||
|
||||
output, err := converter{}.ConvertValue(derived("value"))
|
||||
if err != nil {
|
||||
t.Fatal("Derived string type not convertible", err)
|
||||
}
|
||||
|
||||
if output != "value" {
|
||||
t.Fatalf("Derived string type not converted, got %#v %T", output, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertDerivedByteSlice(t *testing.T) {
|
||||
type derived []uint8
|
||||
|
||||
output, err := converter{}.ConvertValue(derived("value"))
|
||||
if err != nil {
|
||||
t.Fatal("Byte slice not convertible", err)
|
||||
}
|
||||
|
||||
if bytes.Compare(output.([]byte), []byte("value")) != 0 {
|
||||
t.Fatalf("Byte slice not converted, got %#v %T", output, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertDerivedUnsupportedSlice(t *testing.T) {
|
||||
type derived []int
|
||||
|
||||
_, err := converter{}.ConvertValue(derived{1})
|
||||
if err == nil || err.Error() != "unsupported type mysql.derived, a slice of int" {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertDerivedBool(t *testing.T) {
|
||||
type derived bool
|
||||
|
||||
output, err := converter{}.ConvertValue(derived(true))
|
||||
if err != nil {
|
||||
t.Fatal("Derived bool type not convertible", err)
|
||||
}
|
||||
|
||||
if output != true {
|
||||
t.Fatalf("Derived bool type not converted, got %#v %T", output, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertPointer(t *testing.T) {
|
||||
str := "value"
|
||||
|
||||
output, err := converter{}.ConvertValue(&str)
|
||||
if err != nil {
|
||||
t.Fatal("Pointer type not convertible", err)
|
||||
}
|
||||
|
||||
if output != "value" {
|
||||
t.Fatalf("Pointer type not converted, got %#v %T", output, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertSignedIntegers(t *testing.T) {
|
||||
values := []interface{}{
|
||||
int8(-42),
|
||||
int16(-42),
|
||||
int32(-42),
|
||||
int64(-42),
|
||||
int(-42),
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
output, err := converter{}.ConvertValue(value)
|
||||
if err != nil {
|
||||
t.Fatalf("%T type not convertible %s", value, err)
|
||||
}
|
||||
|
||||
if output != int64(-42) {
|
||||
t.Fatalf("%T type not converted, got %#v %T", value, output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertUnsignedIntegers(t *testing.T) {
|
||||
values := []interface{}{
|
||||
uint8(42),
|
||||
uint16(42),
|
||||
uint32(42),
|
||||
uint64(42),
|
||||
uint(42),
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
output, err := converter{}.ConvertValue(value)
|
||||
if err != nil {
|
||||
t.Fatalf("%T type not convertible %s", value, err)
|
||||
}
|
||||
|
||||
if output != uint64(42) {
|
||||
t.Fatalf("%T type not converted, got %#v %T", value, output, output)
|
||||
}
|
||||
}
|
||||
|
||||
output, err := converter{}.ConvertValue(^uint64(0))
|
||||
if err != nil {
|
||||
t.Fatal("uint64 high-bit not convertible", err)
|
||||
}
|
||||
|
||||
if output != ^uint64(0) {
|
||||
t.Fatalf("uint64 high-bit converted, got %#v %T", output, output)
|
||||
}
|
||||
}
|
4
vendor/github.com/go-sql-driver/mysql/transaction.go
generated
vendored
4
vendor/github.com/go-sql-driver/mysql/transaction.go
generated
vendored
@ -13,7 +13,7 @@ type mysqlTx struct {
|
||||
}
|
||||
|
||||
func (tx *mysqlTx) Commit() (err error) {
|
||||
if tx.mc == nil || tx.mc.netConn == nil {
|
||||
if tx.mc == nil || tx.mc.closed.IsSet() {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
err = tx.mc.exec("COMMIT")
|
||||
@ -22,7 +22,7 @@ func (tx *mysqlTx) Commit() (err error) {
|
||||
}
|
||||
|
||||
func (tx *mysqlTx) Rollback() (err error) {
|
||||
if tx.mc == nil || tx.mc.netConn == nil {
|
||||
if tx.mc == nil || tx.mc.closed.IsSet() {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
err = tx.mc.exec("ROLLBACK")
|
||||
|
730
vendor/github.com/go-sql-driver/mysql/utils.go
generated
vendored
730
vendor/github.com/go-sql-driver/mysql/utils.go
generated
vendored
@ -9,35 +9,32 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Registry for custom tls.Configs
|
||||
var (
|
||||
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
|
||||
|
||||
errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?")
|
||||
errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)")
|
||||
errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name")
|
||||
errInvalidDSNUnsafeCollation = errors.New("Invalid DSN: interpolateParams can be used with ascii, latin1, utf8 and utf8mb4 charset")
|
||||
tlsConfigLock sync.RWMutex
|
||||
tlsConfigRegistry map[string]*tls.Config
|
||||
)
|
||||
|
||||
func init() {
|
||||
tlsConfigRegister = make(map[string]*tls.Config)
|
||||
}
|
||||
|
||||
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
|
||||
// Use the key as a value in the DSN where tls=value.
|
||||
//
|
||||
// Note: The provided tls.Config is exclusively owned by the driver after
|
||||
// registering it.
|
||||
//
|
||||
// rootCertPool := x509.NewCertPool()
|
||||
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
|
||||
// if err != nil {
|
||||
@ -59,243 +56,35 @@ func init() {
|
||||
// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
|
||||
//
|
||||
func RegisterTLSConfig(key string, config *tls.Config) error {
|
||||
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
|
||||
return fmt.Errorf("Key '%s' is reserved", key)
|
||||
if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
|
||||
return fmt.Errorf("key '%s' is reserved", key)
|
||||
}
|
||||
|
||||
tlsConfigRegister[key] = config
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegistry == nil {
|
||||
tlsConfigRegistry = make(map[string]*tls.Config)
|
||||
}
|
||||
|
||||
tlsConfigRegistry[key] = config
|
||||
tlsConfigLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeregisterTLSConfig removes the tls.Config associated with key.
|
||||
func DeregisterTLSConfig(key string) {
|
||||
delete(tlsConfigRegister, key)
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegistry != nil {
|
||||
delete(tlsConfigRegistry, key)
|
||||
}
|
||||
tlsConfigLock.Unlock()
|
||||
}
|
||||
|
||||
// parseDSN parses the DSN string to a config
|
||||
func parseDSN(dsn string) (cfg *config, err error) {
|
||||
// New config with some default values
|
||||
cfg = &config{
|
||||
loc: time.UTC,
|
||||
collation: defaultCollation,
|
||||
func getTLSConfigClone(key string) (config *tls.Config) {
|
||||
tlsConfigLock.RLock()
|
||||
if v, ok := tlsConfigRegistry[key]; ok {
|
||||
config = v.Clone()
|
||||
}
|
||||
|
||||
// [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
|
||||
// Find the last '/' (since the password or the net addr might contain a '/')
|
||||
foundSlash := false
|
||||
for i := len(dsn) - 1; i >= 0; i-- {
|
||||
if dsn[i] == '/' {
|
||||
foundSlash = true
|
||||
var j, k int
|
||||
|
||||
// left part is empty if i <= 0
|
||||
if i > 0 {
|
||||
// [username[:password]@][protocol[(address)]]
|
||||
// Find the last '@' in dsn[:i]
|
||||
for j = i; j >= 0; j-- {
|
||||
if dsn[j] == '@' {
|
||||
// username[:password]
|
||||
// Find the first ':' in dsn[:j]
|
||||
for k = 0; k < j; k++ {
|
||||
if dsn[k] == ':' {
|
||||
cfg.passwd = dsn[k+1 : j]
|
||||
break
|
||||
}
|
||||
}
|
||||
cfg.user = dsn[:k]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// [protocol[(address)]]
|
||||
// Find the first '(' in dsn[j+1:i]
|
||||
for k = j + 1; k < i; k++ {
|
||||
if dsn[k] == '(' {
|
||||
// dsn[i-1] must be == ')' if an address is specified
|
||||
if dsn[i-1] != ')' {
|
||||
if strings.ContainsRune(dsn[k+1:i], ')') {
|
||||
return nil, errInvalidDSNUnescaped
|
||||
}
|
||||
return nil, errInvalidDSNAddr
|
||||
}
|
||||
cfg.addr = dsn[k+1 : i-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
cfg.net = dsn[j+1 : k]
|
||||
}
|
||||
|
||||
// dbname[?param1=value1&...¶mN=valueN]
|
||||
// Find the first '?' in dsn[i+1:]
|
||||
for j = i + 1; j < len(dsn); j++ {
|
||||
if dsn[j] == '?' {
|
||||
if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
cfg.dbname = dsn[i+1 : j]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundSlash && len(dsn) > 0 {
|
||||
return nil, errInvalidDSNNoSlash
|
||||
}
|
||||
|
||||
if cfg.interpolateParams && unsafeCollations[cfg.collation] {
|
||||
return nil, errInvalidDSNUnsafeCollation
|
||||
}
|
||||
|
||||
// Set default network if empty
|
||||
if cfg.net == "" {
|
||||
cfg.net = "tcp"
|
||||
}
|
||||
|
||||
// Set default address if empty
|
||||
if cfg.addr == "" {
|
||||
switch cfg.net {
|
||||
case "tcp":
|
||||
cfg.addr = "127.0.0.1:3306"
|
||||
case "unix":
|
||||
cfg.addr = "/tmp/mysql.sock"
|
||||
default:
|
||||
return nil, errors.New("Default addr for network '" + cfg.net + "' unknown")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseDSNParams parses the DSN "query string"
|
||||
// Values must be url.QueryEscape'ed
|
||||
func parseDSNParams(cfg *config, params string) (err error) {
|
||||
for _, v := range strings.Split(params, "&") {
|
||||
param := strings.SplitN(v, "=", 2)
|
||||
if len(param) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// cfg params
|
||||
switch value := param[1]; param[0] {
|
||||
|
||||
// Enable client side placeholder substitution
|
||||
case "interpolateParams":
|
||||
var isBool bool
|
||||
cfg.interpolateParams, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Disable INFILE whitelist / enable all files
|
||||
case "allowAllFiles":
|
||||
var isBool bool
|
||||
cfg.allowAllFiles, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Use cleartext authentication mode (MySQL 5.5.10+)
|
||||
case "allowCleartextPasswords":
|
||||
var isBool bool
|
||||
cfg.allowCleartextPasswords, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Use old authentication mode (pre MySQL 4.1)
|
||||
case "allowOldPasswords":
|
||||
var isBool bool
|
||||
cfg.allowOldPasswords, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Switch "rowsAffected" mode
|
||||
case "clientFoundRows":
|
||||
var isBool bool
|
||||
cfg.clientFoundRows, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Collation
|
||||
case "collation":
|
||||
collation, ok := collations[value]
|
||||
if !ok {
|
||||
// Note possibility for false negatives:
|
||||
// could be triggered although the collation is valid if the
|
||||
// collations map does not contain entries the server supports.
|
||||
err = errors.New("unknown collation")
|
||||
return
|
||||
}
|
||||
cfg.collation = collation
|
||||
break
|
||||
|
||||
case "columnsWithAlias":
|
||||
var isBool bool
|
||||
cfg.columnsWithAlias, isBool = readBool(value)
|
||||
if !isBool {
|
||||
return fmt.Errorf("Invalid Bool value: %s", value)
|
||||
}
|
||||
|
||||
// Time Location
|
||||
case "loc":
|
||||
if value, err = url.QueryUnescape(value); err != nil {
|
||||
return
|
||||
}
|
||||
cfg.loc, err = time.LoadLocation(value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Dial Timeout
|
||||
case "timeout":
|
||||
cfg.timeout, err = time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// TLS-Encryption
|
||||
case "tls":
|
||||
boolValue, isBool := readBool(value)
|
||||
if isBool {
|
||||
if boolValue {
|
||||
cfg.tls = &tls.Config{}
|
||||
}
|
||||
} else {
|
||||
if strings.ToLower(value) == "skip-verify" {
|
||||
cfg.tls = &tls.Config{InsecureSkipVerify: true}
|
||||
} else if tlsConfig, ok := tlsConfigRegister[value]; ok {
|
||||
if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
|
||||
host, _, err := net.SplitHostPort(cfg.addr)
|
||||
if err == nil {
|
||||
tlsConfig.ServerName = host
|
||||
}
|
||||
}
|
||||
|
||||
cfg.tls = tlsConfig
|
||||
} else {
|
||||
return fmt.Errorf("Invalid value / unknown config name: %s", value)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
// lazy init
|
||||
if cfg.params == nil {
|
||||
cfg.params = make(map[string]string)
|
||||
}
|
||||
|
||||
if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tlsConfigLock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
@ -313,119 +102,6 @@ func readBool(input string) (value bool, valid bool) {
|
||||
return
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Authentication *
|
||||
******************************************************************************/
|
||||
|
||||
// Encrypt password using 4.1+ method
|
||||
func scramblePassword(scramble, password []byte) []byte {
|
||||
if len(password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stage1Hash = SHA1(password)
|
||||
crypt := sha1.New()
|
||||
crypt.Write(password)
|
||||
stage1 := crypt.Sum(nil)
|
||||
|
||||
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
|
||||
// inner Hash
|
||||
crypt.Reset()
|
||||
crypt.Write(stage1)
|
||||
hash := crypt.Sum(nil)
|
||||
|
||||
// outer Hash
|
||||
crypt.Reset()
|
||||
crypt.Write(scramble)
|
||||
crypt.Write(hash)
|
||||
scramble = crypt.Sum(nil)
|
||||
|
||||
// token = scrambleHash XOR stage1Hash
|
||||
for i := range scramble {
|
||||
scramble[i] ^= stage1[i]
|
||||
}
|
||||
return scramble
|
||||
}
|
||||
|
||||
// Encrypt password using pre 4.1 (old password) method
|
||||
// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
|
||||
type myRnd struct {
|
||||
seed1, seed2 uint32
|
||||
}
|
||||
|
||||
const myRndMaxVal = 0x3FFFFFFF
|
||||
|
||||
// Pseudo random number generator
|
||||
func newMyRnd(seed1, seed2 uint32) *myRnd {
|
||||
return &myRnd{
|
||||
seed1: seed1 % myRndMaxVal,
|
||||
seed2: seed2 % myRndMaxVal,
|
||||
}
|
||||
}
|
||||
|
||||
// Tested to be equivalent to MariaDB's floating point variant
|
||||
// http://play.golang.org/p/QHvhd4qved
|
||||
// http://play.golang.org/p/RG0q4ElWDx
|
||||
func (r *myRnd) NextByte() byte {
|
||||
r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
|
||||
r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
|
||||
|
||||
return byte(uint64(r.seed1) * 31 / myRndMaxVal)
|
||||
}
|
||||
|
||||
// Generate binary hash from byte string using insecure pre 4.1 method
|
||||
func pwHash(password []byte) (result [2]uint32) {
|
||||
var add uint32 = 7
|
||||
var tmp uint32
|
||||
|
||||
result[0] = 1345345333
|
||||
result[1] = 0x12345671
|
||||
|
||||
for _, c := range password {
|
||||
// skip spaces and tabs in password
|
||||
if c == ' ' || c == '\t' {
|
||||
continue
|
||||
}
|
||||
|
||||
tmp = uint32(c)
|
||||
result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
|
||||
result[1] += (result[1] << 8) ^ result[0]
|
||||
add += tmp
|
||||
}
|
||||
|
||||
// Remove sign bit (1<<31)-1)
|
||||
result[0] &= 0x7FFFFFFF
|
||||
result[1] &= 0x7FFFFFFF
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Encrypt password using insecure pre 4.1 method
|
||||
func scrambleOldPassword(scramble, password []byte) []byte {
|
||||
if len(password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
scramble = scramble[:8]
|
||||
|
||||
hashPw := pwHash(password)
|
||||
hashSc := pwHash(scramble)
|
||||
|
||||
r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
|
||||
|
||||
var out [8]byte
|
||||
for i := range out {
|
||||
out[i] = r.NextByte() + 64
|
||||
}
|
||||
|
||||
mask := r.NextByte()
|
||||
for i := range out {
|
||||
out[i] ^= mask
|
||||
}
|
||||
|
||||
return out[:]
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Time related utils *
|
||||
******************************************************************************/
|
||||
@ -493,7 +169,7 @@ func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
|
||||
}
|
||||
t, err = time.Parse(timeFormat[:len(str)], str)
|
||||
default:
|
||||
err = fmt.Errorf("Invalid Time-String: %s", str)
|
||||
err = fmt.Errorf("invalid time string: %s", str)
|
||||
return
|
||||
}
|
||||
|
||||
@ -542,7 +218,7 @@ func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Va
|
||||
loc,
|
||||
), nil
|
||||
}
|
||||
return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num)
|
||||
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
|
||||
}
|
||||
|
||||
// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
|
||||
@ -554,87 +230,104 @@ var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
|
||||
const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
|
||||
const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
|
||||
|
||||
func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
|
||||
func appendMicrosecs(dst, src []byte, decimals int) []byte {
|
||||
if decimals <= 0 {
|
||||
return dst
|
||||
}
|
||||
if len(src) == 0 {
|
||||
return append(dst, ".000000"[:decimals+1]...)
|
||||
}
|
||||
|
||||
microsecs := binary.LittleEndian.Uint32(src[:4])
|
||||
p1 := byte(microsecs / 10000)
|
||||
microsecs -= 10000 * uint32(p1)
|
||||
p2 := byte(microsecs / 100)
|
||||
microsecs -= 100 * uint32(p2)
|
||||
p3 := byte(microsecs)
|
||||
|
||||
switch decimals {
|
||||
default:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
digits10[p3], digits01[p3],
|
||||
)
|
||||
case 1:
|
||||
return append(dst, '.',
|
||||
digits10[p1],
|
||||
)
|
||||
case 2:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
)
|
||||
case 3:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2],
|
||||
)
|
||||
case 4:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
)
|
||||
case 5:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
digits10[p3],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
|
||||
// length expects the deterministic length of the zero value,
|
||||
// negative time and 100+ hours are automatically added if needed
|
||||
if len(src) == 0 {
|
||||
if justTime {
|
||||
return zeroDateTime[11 : 11+length], nil
|
||||
}
|
||||
return zeroDateTime[:length], nil
|
||||
}
|
||||
var dst []byte // return value
|
||||
var pt, p1, p2, p3 byte // current digit pair
|
||||
var zOffs byte // offset of value in zeroDateTime
|
||||
if justTime {
|
||||
switch length {
|
||||
case
|
||||
8, // time (can be up to 10 when negative and 100+ hours)
|
||||
10, 11, 12, 13, 14, 15: // time with fractional seconds
|
||||
default:
|
||||
return nil, fmt.Errorf("illegal TIME length %d", length)
|
||||
var dst []byte // return value
|
||||
var p1, p2, p3 byte // current digit pair
|
||||
|
||||
switch length {
|
||||
case 10, 19, 21, 22, 23, 24, 25, 26:
|
||||
default:
|
||||
t := "DATE"
|
||||
if length > 10 {
|
||||
t += "TIME"
|
||||
}
|
||||
switch len(src) {
|
||||
case 8, 12:
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid TIME-packet length %d", len(src))
|
||||
}
|
||||
// +2 to enable negative time and 100+ hours
|
||||
dst = make([]byte, 0, length+2)
|
||||
if src[0] == 1 {
|
||||
dst = append(dst, '-')
|
||||
}
|
||||
if src[1] != 0 {
|
||||
hour := uint16(src[1])*24 + uint16(src[5])
|
||||
pt = byte(hour / 100)
|
||||
p1 = byte(hour - 100*uint16(pt))
|
||||
dst = append(dst, digits01[pt])
|
||||
} else {
|
||||
p1 = src[5]
|
||||
}
|
||||
zOffs = 11
|
||||
src = src[6:]
|
||||
} else {
|
||||
switch length {
|
||||
case 10, 19, 21, 22, 23, 24, 25, 26:
|
||||
default:
|
||||
t := "DATE"
|
||||
if length > 10 {
|
||||
t += "TIME"
|
||||
}
|
||||
return nil, fmt.Errorf("illegal %s length %d", t, length)
|
||||
}
|
||||
switch len(src) {
|
||||
case 4, 7, 11:
|
||||
default:
|
||||
t := "DATE"
|
||||
if length > 10 {
|
||||
t += "TIME"
|
||||
}
|
||||
return nil, fmt.Errorf("illegal %s-packet length %d", t, len(src))
|
||||
}
|
||||
dst = make([]byte, 0, length)
|
||||
// start with the date
|
||||
year := binary.LittleEndian.Uint16(src[:2])
|
||||
pt = byte(year / 100)
|
||||
p1 = byte(year - 100*uint16(pt))
|
||||
p2, p3 = src[2], src[3]
|
||||
dst = append(dst,
|
||||
digits10[pt], digits01[pt],
|
||||
digits10[p1], digits01[p1], '-',
|
||||
digits10[p2], digits01[p2], '-',
|
||||
digits10[p3], digits01[p3],
|
||||
)
|
||||
if length == 10 {
|
||||
return dst, nil
|
||||
}
|
||||
if len(src) == 4 {
|
||||
return append(dst, zeroDateTime[10:length]...), nil
|
||||
}
|
||||
dst = append(dst, ' ')
|
||||
p1 = src[4] // hour
|
||||
src = src[5:]
|
||||
return nil, fmt.Errorf("illegal %s length %d", t, length)
|
||||
}
|
||||
switch len(src) {
|
||||
case 4, 7, 11:
|
||||
default:
|
||||
t := "DATE"
|
||||
if length > 10 {
|
||||
t += "TIME"
|
||||
}
|
||||
return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
|
||||
}
|
||||
dst = make([]byte, 0, length)
|
||||
// start with the date
|
||||
year := binary.LittleEndian.Uint16(src[:2])
|
||||
pt := year / 100
|
||||
p1 = byte(year - 100*uint16(pt))
|
||||
p2, p3 = src[2], src[3]
|
||||
dst = append(dst,
|
||||
digits10[pt], digits01[pt],
|
||||
digits10[p1], digits01[p1], '-',
|
||||
digits10[p2], digits01[p2], '-',
|
||||
digits10[p3], digits01[p3],
|
||||
)
|
||||
if length == 10 {
|
||||
return dst, nil
|
||||
}
|
||||
if len(src) == 4 {
|
||||
return append(dst, zeroDateTime[10:length]...), nil
|
||||
}
|
||||
dst = append(dst, ' ')
|
||||
p1 = src[4] // hour
|
||||
src = src[5:]
|
||||
|
||||
// p1 is 2-digit hour, src is after hour
|
||||
p2, p3 = src[0], src[1]
|
||||
dst = append(dst,
|
||||
@ -642,51 +335,49 @@ func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value
|
||||
digits10[p2], digits01[p2], ':',
|
||||
digits10[p3], digits01[p3],
|
||||
)
|
||||
if length <= byte(len(dst)) {
|
||||
return dst, nil
|
||||
}
|
||||
src = src[2:]
|
||||
return appendMicrosecs(dst, src[2:], int(length)-20), nil
|
||||
}
|
||||
|
||||
func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
|
||||
// length expects the deterministic length of the zero value,
|
||||
// negative time and 100+ hours are automatically added if needed
|
||||
if len(src) == 0 {
|
||||
return append(dst, zeroDateTime[19:zOffs+length]...), nil
|
||||
return zeroDateTime[11 : 11+length], nil
|
||||
}
|
||||
microsecs := binary.LittleEndian.Uint32(src[:4])
|
||||
p1 = byte(microsecs / 10000)
|
||||
microsecs -= 10000 * uint32(p1)
|
||||
p2 = byte(microsecs / 100)
|
||||
microsecs -= 100 * uint32(p2)
|
||||
p3 = byte(microsecs)
|
||||
switch decimals := zOffs + length - 20; decimals {
|
||||
var dst []byte // return value
|
||||
|
||||
switch length {
|
||||
case
|
||||
8, // time (can be up to 10 when negative and 100+ hours)
|
||||
10, 11, 12, 13, 14, 15: // time with fractional seconds
|
||||
default:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
digits10[p3], digits01[p3],
|
||||
), nil
|
||||
case 1:
|
||||
return append(dst, '.',
|
||||
digits10[p1],
|
||||
), nil
|
||||
case 2:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
), nil
|
||||
case 3:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2],
|
||||
), nil
|
||||
case 4:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
), nil
|
||||
case 5:
|
||||
return append(dst, '.',
|
||||
digits10[p1], digits01[p1],
|
||||
digits10[p2], digits01[p2],
|
||||
digits10[p3],
|
||||
), nil
|
||||
return nil, fmt.Errorf("illegal TIME length %d", length)
|
||||
}
|
||||
switch len(src) {
|
||||
case 8, 12:
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
|
||||
}
|
||||
// +2 to enable negative time and 100+ hours
|
||||
dst = make([]byte, 0, length+2)
|
||||
if src[0] == 1 {
|
||||
dst = append(dst, '-')
|
||||
}
|
||||
days := binary.LittleEndian.Uint32(src[1:5])
|
||||
hours := int64(days)*24 + int64(src[5])
|
||||
|
||||
if hours >= 100 {
|
||||
dst = strconv.AppendInt(dst, hours, 10)
|
||||
} else {
|
||||
dst = append(dst, digits10[hours], digits01[hours])
|
||||
}
|
||||
|
||||
min, sec := src[6], src[7]
|
||||
dst = append(dst, ':',
|
||||
digits10[min], digits01[min], ':',
|
||||
digits10[sec], digits01[sec],
|
||||
)
|
||||
return appendMicrosecs(dst, src[8:], int(length)-9), nil
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -752,7 +443,7 @@ func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
|
||||
|
||||
// Check data length
|
||||
if len(b) >= n {
|
||||
return b[n-int(num) : n], false, n, nil
|
||||
return b[n-int(num) : n : n], false, n, nil
|
||||
}
|
||||
return nil, false, n, io.EOF
|
||||
}
|
||||
@ -781,8 +472,8 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
|
||||
if len(b) == 0 {
|
||||
return 0, true, 1
|
||||
}
|
||||
switch b[0] {
|
||||
|
||||
switch b[0] {
|
||||
// 251: NULL
|
||||
case 0xfb:
|
||||
return 0, true, 1
|
||||
@ -877,7 +568,7 @@ func escapeBytesBackslash(buf, v []byte) []byte {
|
||||
pos += 2
|
||||
default:
|
||||
buf[pos] = c
|
||||
pos += 1
|
||||
pos++
|
||||
}
|
||||
}
|
||||
|
||||
@ -922,7 +613,7 @@ func escapeStringBackslash(buf []byte, v string) []byte {
|
||||
pos += 2
|
||||
default:
|
||||
buf[pos] = c
|
||||
pos += 1
|
||||
pos++
|
||||
}
|
||||
}
|
||||
|
||||
@ -971,3 +662,94 @@ func escapeStringQuotes(buf []byte, v string) []byte {
|
||||
|
||||
return buf[:pos]
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Sync utils *
|
||||
******************************************************************************/
|
||||
|
||||
// noCopy may be embedded into structs which must not be copied
|
||||
// after the first use.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/8005#issuecomment-190753527
|
||||
// for details.
|
||||
type noCopy struct{}
|
||||
|
||||
// Lock is a no-op used by -copylocks checker from `go vet`.
|
||||
func (*noCopy) Lock() {}
|
||||
|
||||
// atomicBool is a wrapper around uint32 for usage as a boolean value with
|
||||
// atomic access.
|
||||
type atomicBool struct {
|
||||
_noCopy noCopy
|
||||
value uint32
|
||||
}
|
||||
|
||||
// IsSet returns whether the current boolean value is true
|
||||
func (ab *atomicBool) IsSet() bool {
|
||||
return atomic.LoadUint32(&ab.value) > 0
|
||||
}
|
||||
|
||||
// Set sets the value of the bool regardless of the previous value
|
||||
func (ab *atomicBool) Set(value bool) {
|
||||
if value {
|
||||
atomic.StoreUint32(&ab.value, 1)
|
||||
} else {
|
||||
atomic.StoreUint32(&ab.value, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// TrySet sets the value of the bool and returns whether the value changed
|
||||
func (ab *atomicBool) TrySet(value bool) bool {
|
||||
if value {
|
||||
return atomic.SwapUint32(&ab.value, 1) == 0
|
||||
}
|
||||
return atomic.SwapUint32(&ab.value, 0) > 0
|
||||
}
|
||||
|
||||
// atomicError is a wrapper for atomically accessed error values
|
||||
type atomicError struct {
|
||||
_noCopy noCopy
|
||||
value atomic.Value
|
||||
}
|
||||
|
||||
// Set sets the error value regardless of the previous value.
|
||||
// The value must not be nil
|
||||
func (ae *atomicError) Set(value error) {
|
||||
ae.value.Store(value)
|
||||
}
|
||||
|
||||
// Value returns the current error value
|
||||
func (ae *atomicError) Value() error {
|
||||
if v := ae.value.Load(); v != nil {
|
||||
// this will panic if the value doesn't implement the error interface
|
||||
return v.(error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
|
||||
dargs := make([]driver.Value, len(named))
|
||||
for n, param := range named {
|
||||
if len(param.Name) > 0 {
|
||||
// TODO: support the use of Named Parameters #561
|
||||
return nil, errors.New("mysql: driver does not support the use of Named Parameters")
|
||||
}
|
||||
dargs[n] = param.Value
|
||||
}
|
||||
return dargs, nil
|
||||
}
|
||||
|
||||
func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
|
||||
switch sql.IsolationLevel(level) {
|
||||
case sql.LevelRepeatableRead:
|
||||
return "REPEATABLE READ", nil
|
||||
case sql.LevelReadCommitted:
|
||||
return "READ COMMITTED", nil
|
||||
case sql.LevelReadUncommitted:
|
||||
return "READ UNCOMMITTED", nil
|
||||
case sql.LevelSerializable:
|
||||
return "SERIALIZABLE", nil
|
||||
default:
|
||||
return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
|
||||
}
|
||||
}
|
||||
|
334
vendor/github.com/go-sql-driver/mysql/utils_test.go
generated
vendored
Normal file
334
vendor/github.com/go-sql-driver/mysql/utils_test.go
generated
vendored
Normal file
@ -0,0 +1,334 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestScanNullTime(t *testing.T) {
|
||||
var scanTests = []struct {
|
||||
in interface{}
|
||||
error bool
|
||||
valid bool
|
||||
time time.Time
|
||||
}{
|
||||
{tDate, false, true, tDate},
|
||||
{sDate, false, true, tDate},
|
||||
{[]byte(sDate), false, true, tDate},
|
||||
{tDateTime, false, true, tDateTime},
|
||||
{sDateTime, false, true, tDateTime},
|
||||
{[]byte(sDateTime), false, true, tDateTime},
|
||||
{tDate0, false, true, tDate0},
|
||||
{sDate0, false, true, tDate0},
|
||||
{[]byte(sDate0), false, true, tDate0},
|
||||
{sDateTime0, false, true, tDate0},
|
||||
{[]byte(sDateTime0), false, true, tDate0},
|
||||
{"", true, false, tDate0},
|
||||
{"1234", true, false, tDate0},
|
||||
{0, true, false, tDate0},
|
||||
}
|
||||
|
||||
var nt = NullTime{}
|
||||
var err error
|
||||
|
||||
for _, tst := range scanTests {
|
||||
err = nt.Scan(tst.in)
|
||||
if (err != nil) != tst.error {
|
||||
t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil))
|
||||
}
|
||||
if nt.Valid != tst.valid {
|
||||
t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid)
|
||||
}
|
||||
if nt.Time != tst.time {
|
||||
t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLengthEncodedInteger(t *testing.T) {
|
||||
var integerTests = []struct {
|
||||
num uint64
|
||||
encoded []byte
|
||||
}{
|
||||
{0x0000000000000000, []byte{0x00}},
|
||||
{0x0000000000000012, []byte{0x12}},
|
||||
{0x00000000000000fa, []byte{0xfa}},
|
||||
{0x0000000000000100, []byte{0xfc, 0x00, 0x01}},
|
||||
{0x0000000000001234, []byte{0xfc, 0x34, 0x12}},
|
||||
{0x000000000000ffff, []byte{0xfc, 0xff, 0xff}},
|
||||
{0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}},
|
||||
{0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}},
|
||||
{0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}},
|
||||
{0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}},
|
||||
{0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}},
|
||||
{0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
|
||||
}
|
||||
|
||||
for _, tst := range integerTests {
|
||||
num, isNull, numLen := readLengthEncodedInteger(tst.encoded)
|
||||
if isNull {
|
||||
t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num)
|
||||
}
|
||||
if num != tst.num {
|
||||
t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num)
|
||||
}
|
||||
if numLen != len(tst.encoded) {
|
||||
t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen)
|
||||
}
|
||||
encoded := appendLengthEncodedInteger(nil, num)
|
||||
if !bytes.Equal(encoded, tst.encoded) {
|
||||
t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatBinaryDateTime(t *testing.T) {
|
||||
rawDate := [11]byte{}
|
||||
binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years
|
||||
rawDate[2] = 12 // months
|
||||
rawDate[3] = 30 // days
|
||||
rawDate[4] = 15 // hours
|
||||
rawDate[5] = 46 // minutes
|
||||
rawDate[6] = 23 // seconds
|
||||
binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds
|
||||
expect := func(expected string, inlen, outlen uint8) {
|
||||
actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen)
|
||||
bytes, ok := actual.([]byte)
|
||||
if !ok {
|
||||
t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
|
||||
}
|
||||
if string(bytes) != expected {
|
||||
t.Errorf(
|
||||
"expected %q, got %q for length in %d, out %d",
|
||||
expected, actual, inlen, outlen,
|
||||
)
|
||||
}
|
||||
}
|
||||
expect("0000-00-00", 0, 10)
|
||||
expect("0000-00-00 00:00:00", 0, 19)
|
||||
expect("1978-12-30", 4, 10)
|
||||
expect("1978-12-30 15:46:23", 7, 19)
|
||||
expect("1978-12-30 15:46:23.987654", 11, 26)
|
||||
}
|
||||
|
||||
func TestFormatBinaryTime(t *testing.T) {
|
||||
expect := func(expected string, src []byte, outlen uint8) {
|
||||
actual, _ := formatBinaryTime(src, outlen)
|
||||
bytes, ok := actual.([]byte)
|
||||
if !ok {
|
||||
t.Errorf("formatBinaryDateTime must return []byte, was %T", actual)
|
||||
}
|
||||
if string(bytes) != expected {
|
||||
t.Errorf(
|
||||
"expected %q, got %q for src=%q and outlen=%d",
|
||||
expected, actual, src, outlen)
|
||||
}
|
||||
}
|
||||
|
||||
// binary format:
|
||||
// sign (0: positive, 1: negative), days(4), hours, minutes, seconds, micro(4)
|
||||
|
||||
// Zeros
|
||||
expect("00:00:00", []byte{}, 8)
|
||||
expect("00:00:00.0", []byte{}, 10)
|
||||
expect("00:00:00.000000", []byte{}, 15)
|
||||
|
||||
// Without micro(4)
|
||||
expect("12:34:56", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 8)
|
||||
expect("-12:34:56", []byte{1, 0, 0, 0, 0, 12, 34, 56}, 8)
|
||||
expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56}, 11)
|
||||
expect("24:34:56", []byte{0, 1, 0, 0, 0, 0, 34, 56}, 8)
|
||||
expect("-99:34:56", []byte{1, 4, 0, 0, 0, 3, 34, 56}, 8)
|
||||
expect("103079215103:34:56", []byte{0, 255, 255, 255, 255, 23, 34, 56}, 8)
|
||||
|
||||
// With micro(4)
|
||||
expect("12:34:56.00", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 11)
|
||||
expect("12:34:56.000099", []byte{0, 0, 0, 0, 0, 12, 34, 56, 99, 0, 0, 0}, 15)
|
||||
}
|
||||
|
||||
func TestEscapeBackslash(t *testing.T) {
|
||||
expect := func(expected, value string) {
|
||||
actual := string(escapeBytesBackslash([]byte{}, []byte(value)))
|
||||
if actual != expected {
|
||||
t.Errorf(
|
||||
"expected %s, got %s",
|
||||
expected, actual,
|
||||
)
|
||||
}
|
||||
|
||||
actual = string(escapeStringBackslash([]byte{}, value))
|
||||
if actual != expected {
|
||||
t.Errorf(
|
||||
"expected %s, got %s",
|
||||
expected, actual,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
expect("foo\\0bar", "foo\x00bar")
|
||||
expect("foo\\nbar", "foo\nbar")
|
||||
expect("foo\\rbar", "foo\rbar")
|
||||
expect("foo\\Zbar", "foo\x1abar")
|
||||
expect("foo\\\"bar", "foo\"bar")
|
||||
expect("foo\\\\bar", "foo\\bar")
|
||||
expect("foo\\'bar", "foo'bar")
|
||||
}
|
||||
|
||||
func TestEscapeQuotes(t *testing.T) {
|
||||
expect := func(expected, value string) {
|
||||
actual := string(escapeBytesQuotes([]byte{}, []byte(value)))
|
||||
if actual != expected {
|
||||
t.Errorf(
|
||||
"expected %s, got %s",
|
||||
expected, actual,
|
||||
)
|
||||
}
|
||||
|
||||
actual = string(escapeStringQuotes([]byte{}, value))
|
||||
if actual != expected {
|
||||
t.Errorf(
|
||||
"expected %s, got %s",
|
||||
expected, actual,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
expect("foo\x00bar", "foo\x00bar") // not affected
|
||||
expect("foo\nbar", "foo\nbar") // not affected
|
||||
expect("foo\rbar", "foo\rbar") // not affected
|
||||
expect("foo\x1abar", "foo\x1abar") // not affected
|
||||
expect("foo''bar", "foo'bar") // affected
|
||||
expect("foo\"bar", "foo\"bar") // not affected
|
||||
}
|
||||
|
||||
func TestAtomicBool(t *testing.T) {
|
||||
var ab atomicBool
|
||||
if ab.IsSet() {
|
||||
t.Fatal("Expected value to be false")
|
||||
}
|
||||
|
||||
ab.Set(true)
|
||||
if ab.value != 1 {
|
||||
t.Fatal("Set(true) did not set value to 1")
|
||||
}
|
||||
if !ab.IsSet() {
|
||||
t.Fatal("Expected value to be true")
|
||||
}
|
||||
|
||||
ab.Set(true)
|
||||
if !ab.IsSet() {
|
||||
t.Fatal("Expected value to be true")
|
||||
}
|
||||
|
||||
ab.Set(false)
|
||||
if ab.value != 0 {
|
||||
t.Fatal("Set(false) did not set value to 0")
|
||||
}
|
||||
if ab.IsSet() {
|
||||
t.Fatal("Expected value to be false")
|
||||
}
|
||||
|
||||
ab.Set(false)
|
||||
if ab.IsSet() {
|
||||
t.Fatal("Expected value to be false")
|
||||
}
|
||||
if ab.TrySet(false) {
|
||||
t.Fatal("Expected TrySet(false) to fail")
|
||||
}
|
||||
if !ab.TrySet(true) {
|
||||
t.Fatal("Expected TrySet(true) to succeed")
|
||||
}
|
||||
if !ab.IsSet() {
|
||||
t.Fatal("Expected value to be true")
|
||||
}
|
||||
|
||||
ab.Set(true)
|
||||
if !ab.IsSet() {
|
||||
t.Fatal("Expected value to be true")
|
||||
}
|
||||
if ab.TrySet(true) {
|
||||
t.Fatal("Expected TrySet(true) to fail")
|
||||
}
|
||||
if !ab.TrySet(false) {
|
||||
t.Fatal("Expected TrySet(false) to succeed")
|
||||
}
|
||||
if ab.IsSet() {
|
||||
t.Fatal("Expected value to be false")
|
||||
}
|
||||
|
||||
ab._noCopy.Lock() // we've "tested" it ¯\_(ツ)_/¯
|
||||
}
|
||||
|
||||
func TestAtomicError(t *testing.T) {
|
||||
var ae atomicError
|
||||
if ae.Value() != nil {
|
||||
t.Fatal("Expected value to be nil")
|
||||
}
|
||||
|
||||
ae.Set(ErrMalformPkt)
|
||||
if v := ae.Value(); v != ErrMalformPkt {
|
||||
if v == nil {
|
||||
t.Fatal("Value is still nil")
|
||||
}
|
||||
t.Fatal("Error did not match")
|
||||
}
|
||||
ae.Set(ErrPktSync)
|
||||
if ae.Value() == ErrMalformPkt {
|
||||
t.Fatal("Error still matches old error")
|
||||
}
|
||||
if v := ae.Value(); v != ErrPktSync {
|
||||
t.Fatal("Error did not match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsolationLevelMapping(t *testing.T) {
|
||||
data := []struct {
|
||||
level driver.IsolationLevel
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
level: driver.IsolationLevel(sql.LevelReadCommitted),
|
||||
expected: "READ COMMITTED",
|
||||
},
|
||||
{
|
||||
level: driver.IsolationLevel(sql.LevelRepeatableRead),
|
||||
expected: "REPEATABLE READ",
|
||||
},
|
||||
{
|
||||
level: driver.IsolationLevel(sql.LevelReadUncommitted),
|
||||
expected: "READ UNCOMMITTED",
|
||||
},
|
||||
{
|
||||
level: driver.IsolationLevel(sql.LevelSerializable),
|
||||
expected: "SERIALIZABLE",
|
||||
},
|
||||
}
|
||||
|
||||
for i, td := range data {
|
||||
if actual, err := mapIsolationLevel(td.level); actual != td.expected || err != nil {
|
||||
t.Fatal(i, td.expected, actual, err)
|
||||
}
|
||||
}
|
||||
|
||||
// check unsupported mapping
|
||||
expectedErr := "mysql: unsupported isolation level: 7"
|
||||
actual, err := mapIsolationLevel(driver.IsolationLevel(sql.LevelLinearizable))
|
||||
if actual != "" || err == nil {
|
||||
t.Fatal("Expected error on unsupported isolation level")
|
||||
}
|
||||
if err.Error() != expectedErr {
|
||||
t.Fatalf("Expected error to be %q, got %q", expectedErr, err)
|
||||
}
|
||||
}
|
26
vendor/github.com/siddontang/go-mysql/.travis.yml
generated
vendored
26
vendor/github.com/siddontang/go-mysql/.travis.yml
generated
vendored
@ -1,32 +1,34 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
- "1.9"
|
||||
- "1.10"
|
||||
|
||||
dist: trusty
|
||||
sudo: required
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- mysql-5.7-trusty
|
||||
packages:
|
||||
- mysql-server-5.6
|
||||
- mysql-client-core-5.6
|
||||
- mysql-client-5.6
|
||||
- mysql-server
|
||||
- mysql-client
|
||||
|
||||
before_install:
|
||||
- sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
|
||||
- sudo mysql_upgrade
|
||||
|
||||
before_script:
|
||||
# stop mysql and use row-based format binlog
|
||||
- "sudo /etc/init.d/mysql stop || true"
|
||||
- "sudo service mysql stop || true"
|
||||
- "echo '[mysqld]' | sudo tee /etc/mysql/conf.d/replication.cnf"
|
||||
- "echo 'server-id=1' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
|
||||
- "echo 'log-bin=mysql' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
|
||||
- "echo 'log-bin=mysql' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
|
||||
- "echo 'binlog-format = row' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
|
||||
|
||||
# Start mysql (avoid errors to have logs)
|
||||
- "sudo /etc/init.d/mysql start || true"
|
||||
- "sudo service mysql start || true"
|
||||
- "sudo tail -1000 /var/log/syslog"
|
||||
|
||||
- mysql -e "CREATE DATABASE IF NOT EXISTS test;" -uroot
|
||||
|
||||
|
||||
script:
|
||||
- make test
|
||||
- make test
|
||||
|
78
vendor/github.com/siddontang/go-mysql/Gopkg.lock
generated
vendored
Normal file
78
vendor/github.com/siddontang/go-mysql/Gopkg.lock
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/BurntSushi/toml"
|
||||
packages = ["."]
|
||||
revision = "b26d9c308763d68093482582cea63d69be07a0f0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
packages = ["."]
|
||||
revision = "99ff426eb706cffe92ff3d058e168b278cabf7c7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/jmoiron/sqlx"
|
||||
packages = [
|
||||
".",
|
||||
"reflectx"
|
||||
]
|
||||
revision = "2aeb6a910c2b94f2d5eb53d9895d80e27264ec41"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/juju/errors"
|
||||
packages = ["."]
|
||||
revision = "c7d06af17c68cd34c835053720b21f6549d9b0ee"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pingcap/check"
|
||||
packages = ["."]
|
||||
revision = "1c287c953996ab3a0bf535dba9d53d809d3dc0b6"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/shopspring/decimal"
|
||||
packages = ["."]
|
||||
revision = "cd690d0c9e2447b1ef2a129a6b7b49077da89b8e"
|
||||
version = "1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/siddontang/go"
|
||||
packages = [
|
||||
"hack",
|
||||
"sync2"
|
||||
]
|
||||
revision = "2b7082d296ba89ae7ead0f977816bddefb65df9d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/siddontang/go-log"
|
||||
packages = [
|
||||
"log",
|
||||
"loggers"
|
||||
]
|
||||
revision = "a4d157e46fa3e08b7e7ff329af341fa3ff86c02c"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = ["cloudsql"]
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "a1f9939938a58551bbb3f19411c9d1386995d36296de6f6fb5d858f5923db85e"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
56
vendor/github.com/siddontang/go-mysql/Gopkg.toml
generated
vendored
Normal file
56
vendor/github.com/siddontang/go-mysql/Gopkg.toml
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/BurntSushi/toml"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/juju/errors"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/shopspring/decimal"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/siddontang/go"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
non-go = true
|
||||
|
23
vendor/github.com/siddontang/go-mysql/Makefile
generated
vendored
23
vendor/github.com/siddontang/go-mysql/Makefile
generated
vendored
@ -1,33 +1,14 @@
|
||||
all: build
|
||||
|
||||
build:
|
||||
rm -rf vendor && ln -s _vendor/vendor vendor
|
||||
go build -o bin/go-mysqlbinlog cmd/go-mysqlbinlog/main.go
|
||||
go build -o bin/go-mysqldump cmd/go-mysqldump/main.go
|
||||
go build -o bin/go-canal cmd/go-canal/main.go
|
||||
go build -o bin/go-binlogparser cmd/go-binlogparser/main.go
|
||||
rm -rf vendor
|
||||
|
||||
|
||||
test:
|
||||
rm -rf vendor && ln -s _vendor/vendor vendor
|
||||
go test --race -timeout 2m ./...
|
||||
rm -rf vendor
|
||||
|
||||
clean:
|
||||
go clean -i ./...
|
||||
@rm -rf ./bin
|
||||
|
||||
update_vendor:
|
||||
which glide >/dev/null || curl https://glide.sh/get | sh
|
||||
which glide-vc || go get -v -u github.com/sgotti/glide-vc
|
||||
rm -r vendor && mv _vendor/vendor vendor || true
|
||||
rm -rf _vendor
|
||||
ifdef PKG
|
||||
glide get --strip-vendor --skip-test ${PKG}
|
||||
else
|
||||
glide update --strip-vendor --skip-test
|
||||
endif
|
||||
@echo "removing test files"
|
||||
glide vc --only-code --no-tests
|
||||
mkdir -p _vendor
|
||||
mv vendor _vendor/vendor
|
||||
@rm -rf ./bin
|
90
vendor/github.com/siddontang/go-mysql/README.md
generated
vendored
90
vendor/github.com/siddontang/go-mysql/README.md
generated
vendored
@ -15,7 +15,7 @@ import (
|
||||
"github.com/siddontang/go-mysql/replication"
|
||||
"os"
|
||||
)
|
||||
// Create a binlog syncer with a unique server id, the server id must be different from other MySQL's.
|
||||
// Create a binlog syncer with a unique server id, the server id must be different from other MySQL's.
|
||||
// flavor is mysql or mariadb
|
||||
cfg := replication.BinlogSyncerConfig {
|
||||
ServerID: 100,
|
||||
@ -27,7 +27,7 @@ cfg := replication.BinlogSyncerConfig {
|
||||
}
|
||||
syncer := replication.NewBinlogSyncer(cfg)
|
||||
|
||||
// Start sync with sepcified binlog file and position
|
||||
// Start sync with specified binlog file and position
|
||||
streamer, _ := syncer.StartSync(mysql.Position{binlogFile, binlogPos})
|
||||
|
||||
// or you can start a gtid replication like
|
||||
@ -44,7 +44,7 @@ for {
|
||||
// or we can use a timeout context
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
e, _ := s.GetEvent(ctx)
|
||||
ev, err := s.GetEvent(ctx)
|
||||
cancel()
|
||||
|
||||
if err == context.DeadlineExceeded {
|
||||
@ -85,13 +85,13 @@ Schema: test
|
||||
Query: DROP TABLE IF EXISTS `test_replication` /* generated by server */
|
||||
```
|
||||
|
||||
## Canal
|
||||
## Canal
|
||||
|
||||
Canal is a package that can sync your MySQL into everywhere, like Redis, Elasticsearch.
|
||||
Canal is a package that can sync your MySQL into everywhere, like Redis, Elasticsearch.
|
||||
|
||||
First, canal will dump your MySQL data then sync changed data using binlog incrementally.
|
||||
First, canal will dump your MySQL data then sync changed data using binlog incrementally.
|
||||
|
||||
You must use ROW format for binlog, full binlog row image is preferred, because we may meet some errors when primary key changed in update for minimal or noblob row image.
|
||||
You must use ROW format for binlog, full binlog row image is preferred, because we may meet some errors when primary key changed in update for minimal or noblob row image.
|
||||
|
||||
A simple example:
|
||||
|
||||
@ -105,30 +105,31 @@ cfg.Dump.Tables = []string{"canal_test"}
|
||||
|
||||
c, err := NewCanal(cfg)
|
||||
|
||||
type myRowsEventHandler struct {
|
||||
type MyEventHandler struct {
|
||||
DummyEventHandler
|
||||
}
|
||||
|
||||
func (h *myRowsEventHandler) Do(e *RowsEvent) error {
|
||||
func (h *MyEventHandler) OnRow(e *RowsEvent) error {
|
||||
log.Infof("%s %v\n", e.Action, e.Rows)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *myRowsEventHandler) String() string {
|
||||
return "myRowsEventHandler"
|
||||
func (h *MyEventHandler) String() string {
|
||||
return "MyEventHandler"
|
||||
}
|
||||
|
||||
// Register a handler to handle RowsEvent
|
||||
c.RegRowsEventHandler(&MyRowsEventHandler{})
|
||||
c.SetEventHandler(&MyEventHandler{})
|
||||
|
||||
// Start canal
|
||||
c.Start()
|
||||
```
|
||||
|
||||
You can see [go-mysql-elasticsearch](https://github.com/siddontang/go-mysql-elasticsearch) for how to sync MySQL data into Elasticsearch.
|
||||
You can see [go-mysql-elasticsearch](https://github.com/siddontang/go-mysql-elasticsearch) for how to sync MySQL data into Elasticsearch.
|
||||
|
||||
## Client
|
||||
|
||||
Client package supports a simple MySQL connection driver which you can use it to communicate with MySQL server.
|
||||
Client package supports a simple MySQL connection driver which you can use it to communicate with MySQL server.
|
||||
|
||||
### Example
|
||||
|
||||
@ -137,9 +138,16 @@ import (
|
||||
"github.com/siddontang/go-mysql/client"
|
||||
)
|
||||
|
||||
// Connect MySQL at 127.0.0.1:3306, with user root, an empty passowrd and database test
|
||||
// Connect MySQL at 127.0.0.1:3306, with user root, an empty password and database test
|
||||
conn, _ := client.Connect("127.0.0.1:3306", "root", "", "test")
|
||||
|
||||
// Or to use SSL/TLS connection if MySQL server supports TLS
|
||||
//conn, _ := client.Connect("127.0.0.1:3306", "root", "", "test", func(c *Conn) {c.UseSSL(true)})
|
||||
|
||||
// or to set your own client-side certificates for identity verification for security
|
||||
//tlsConfig := NewClientTLSConfig(caPem, certPem, keyPem, false, "your-server-name")
|
||||
//conn, _ := client.Connect("127.0.0.1:3306", "root", "", "test", func(c *Conn) {c.SetTLSConfig(tlsConfig)})
|
||||
|
||||
conn.Ping()
|
||||
|
||||
// Insert
|
||||
@ -153,13 +161,20 @@ r, _ := conn.Execute(`select id, name from table where id = 1`)
|
||||
|
||||
// Handle resultset
|
||||
v, _ := r.GetInt(0, 0)
|
||||
v, _ = r.GetIntByName(0, "id")
|
||||
v, _ = r.GetIntByName(0, "id")
|
||||
```
|
||||
|
||||
Tested MySQL versions for the client include:
|
||||
- 5.5.x
|
||||
- 5.6.x
|
||||
- 5.7.x
|
||||
- 8.0.x
|
||||
|
||||
## Server
|
||||
|
||||
Server package supplies a framework to implement a simple MySQL server which can handle the packets from the MySQL client.
|
||||
You can use it to build your own MySQL proxy.
|
||||
Server package supplies a framework to implement a simple MySQL server which can handle the packets from the MySQL client.
|
||||
You can use it to build your own MySQL proxy. The server connection is compatible with MySQL 5.5, 5.6, 5.7, and 8.0 versions,
|
||||
so that most MySQL clients should be able to connect to the Server without modifications.
|
||||
|
||||
### Example
|
||||
|
||||
@ -173,42 +188,53 @@ l, _ := net.Listen("tcp", "127.0.0.1:4000")
|
||||
|
||||
c, _ := l.Accept()
|
||||
|
||||
// Create a connection with user root and an empty passowrd
|
||||
// We only an empty handler to handle command too
|
||||
// Create a connection with user root and an empty password.
|
||||
// You can use your own handler to handle command here.
|
||||
conn, _ := server.NewConn(c, "root", "", server.EmptyHandler{})
|
||||
|
||||
for {
|
||||
conn.HandleCommand()
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
Another shell
|
||||
|
||||
```
|
||||
mysql -h127.0.0.1 -P4000 -uroot -p
|
||||
//Becuase empty handler does nothing, so here the MySQL client can only connect the proxy server. :-)
|
||||
mysql -h127.0.0.1 -P4000 -uroot -p
|
||||
//Becuase empty handler does nothing, so here the MySQL client can only connect the proxy server. :-)
|
||||
```
|
||||
|
||||
> ```NewConn()``` will use default server configurations:
|
||||
> 1. automatically generate default server certificates and enable TLS/SSL support.
|
||||
> 2. support three mainstream authentication methods **'mysql_native_password'**, **'caching_sha2_password'**, and **'sha256_password'**
|
||||
> and use **'mysql_native_password'** as default.
|
||||
> 3. use an in-memory user credential provider to store user and password.
|
||||
>
|
||||
> To customize server configurations, use ```NewServer()``` and create connection via ```NewCustomizedConn()```.
|
||||
|
||||
|
||||
## Failover
|
||||
|
||||
Failover supports to promote a new master and let other slaves replicate from it automatically when the old master was down.
|
||||
|
||||
Failover supports MySQL >= 5.6.9 with GTID mode, if you use lower version, e.g, MySQL 5.0 - 5.5, please use [MHA](http://code.google.com/p/mysql-master-ha/) or [orchestrator](https://github.com/outbrain/orchestrator).
|
||||
|
||||
At the same time, Failover supports MariaDB >= 10.0.9 with GTID mode too.
|
||||
At the same time, Failover supports MariaDB >= 10.0.9 with GTID mode too.
|
||||
|
||||
Why only GTID? Supporting failover with no GTID mode is very hard, because slave can not find the proper binlog filename and position with the new master.
|
||||
Although there are many companies use MySQL 5.0 - 5.5, I think upgrade MySQL to 5.6 or higher is easy.
|
||||
Why only GTID? Supporting failover with no GTID mode is very hard, because slave can not find the proper binlog filename and position with the new master.
|
||||
Although there are many companies use MySQL 5.0 - 5.5, I think upgrade MySQL to 5.6 or higher is easy.
|
||||
|
||||
## Driver
|
||||
|
||||
Driver is the package that you can use go-mysql with go database/sql like other drivers. A simple example:
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
- "github.com/siddontang/go-mysql/driver"
|
||||
_ "github.com/siddontang/go-mysql/driver"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@ -221,9 +247,17 @@ func main() {
|
||||
|
||||
We pass all tests in https://github.com/bradfitz/go-sql-test using go-mysql driver. :-)
|
||||
|
||||
## Donate
|
||||
|
||||
If you like the project and want to buy me a cola, you can through:
|
||||
|
||||
|PayPal|微信|
|
||||
|------|---|
|
||||
|[![](https://www.paypalobjects.com/webstatic/paypalme/images/pp_logo_small.png)](https://paypal.me/siddontang)|[![](https://github.com/siddontang/blog/blob/master/donate/weixin.png)|
|
||||
|
||||
## Feedback
|
||||
|
||||
go-mysql is still in development, your feedback is very welcome.
|
||||
go-mysql is still in development, your feedback is very welcome.
|
||||
|
||||
|
||||
Gmail: siddontang@gmail.com
|
||||
|
@ -1,147 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
const defaultBufSize = 4096
|
||||
|
||||
// A buffer which is used for both reading and writing.
|
||||
// This is possible since communication on each connection is synchronous.
|
||||
// In other words, we can't write and read simultaneously on the same connection.
|
||||
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
|
||||
// Also highly optimized for this particular use case.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
nc net.Conn
|
||||
idx int
|
||||
length int
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func newBuffer(nc net.Conn) buffer {
|
||||
var b [defaultBufSize]byte
|
||||
return buffer{
|
||||
buf: b[:],
|
||||
nc: nc,
|
||||
}
|
||||
}
|
||||
|
||||
// fill reads into the buffer until at least _need_ bytes are in it
|
||||
func (b *buffer) fill(need int) error {
|
||||
n := b.length
|
||||
|
||||
// move existing data to the beginning
|
||||
if n > 0 && b.idx > 0 {
|
||||
copy(b.buf[0:n], b.buf[b.idx:])
|
||||
}
|
||||
|
||||
// grow buffer if necessary
|
||||
// TODO: let the buffer shrink again at some point
|
||||
// Maybe keep the org buf slice and swap back?
|
||||
if need > len(b.buf) {
|
||||
// Round up to the next multiple of the default size
|
||||
newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
|
||||
copy(newBuf, b.buf)
|
||||
b.buf = newBuf
|
||||
}
|
||||
|
||||
b.idx = 0
|
||||
|
||||
for {
|
||||
if b.timeout > 0 {
|
||||
if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nn, err := b.nc.Read(b.buf[n:])
|
||||
n += nn
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
if n < need {
|
||||
continue
|
||||
}
|
||||
b.length = n
|
||||
return nil
|
||||
|
||||
case io.EOF:
|
||||
if n >= need {
|
||||
b.length = n
|
||||
return nil
|
||||
}
|
||||
return io.ErrUnexpectedEOF
|
||||
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// returns next N bytes from buffer.
|
||||
// The returned slice is only guaranteed to be valid until the next read
|
||||
func (b *buffer) readNext(need int) ([]byte, error) {
|
||||
if b.length < need {
|
||||
// refill
|
||||
if err := b.fill(need); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
offset := b.idx
|
||||
b.idx += need
|
||||
b.length -= need
|
||||
return b.buf[offset:b.idx], nil
|
||||
}
|
||||
|
||||
// returns a buffer with the requested size.
|
||||
// If possible, a slice from the existing buffer is returned.
|
||||
// Otherwise a bigger buffer is made.
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeBuffer(length int) []byte {
|
||||
if b.length > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// test (cheap) general case first
|
||||
if length <= defaultBufSize || length <= cap(b.buf) {
|
||||
return b.buf[:length]
|
||||
}
|
||||
|
||||
if length < maxPacketSize {
|
||||
b.buf = make([]byte, length)
|
||||
return b.buf
|
||||
}
|
||||
return make([]byte, length)
|
||||
}
|
||||
|
||||
// shortcut which can be used if the requested buffer is guaranteed to be
|
||||
// smaller than defaultBufSize
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeSmallBuffer(length int) []byte {
|
||||
if b.length == 0 {
|
||||
return b.buf[:length]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// takeCompleteBuffer returns the complete existing buffer.
|
||||
// This can be used if the necessary buffer size is unknown.
|
||||
// Only one buffer (total) can be used at a time.
|
||||
func (b *buffer) takeCompleteBuffer() []byte {
|
||||
if b.length == 0 {
|
||||
return b.buf
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,250 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
const defaultCollation = "utf8_general_ci"
|
||||
|
||||
// A list of available collations mapped to the internal ID.
|
||||
// To update this map use the following MySQL query:
|
||||
// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
|
||||
var collations = map[string]byte{
|
||||
"big5_chinese_ci": 1,
|
||||
"latin2_czech_cs": 2,
|
||||
"dec8_swedish_ci": 3,
|
||||
"cp850_general_ci": 4,
|
||||
"latin1_german1_ci": 5,
|
||||
"hp8_english_ci": 6,
|
||||
"koi8r_general_ci": 7,
|
||||
"latin1_swedish_ci": 8,
|
||||
"latin2_general_ci": 9,
|
||||
"swe7_swedish_ci": 10,
|
||||
"ascii_general_ci": 11,
|
||||
"ujis_japanese_ci": 12,
|
||||
"sjis_japanese_ci": 13,
|
||||
"cp1251_bulgarian_ci": 14,
|
||||
"latin1_danish_ci": 15,
|
||||
"hebrew_general_ci": 16,
|
||||
"tis620_thai_ci": 18,
|
||||
"euckr_korean_ci": 19,
|
||||
"latin7_estonian_cs": 20,
|
||||
"latin2_hungarian_ci": 21,
|
||||
"koi8u_general_ci": 22,
|
||||
"cp1251_ukrainian_ci": 23,
|
||||
"gb2312_chinese_ci": 24,
|
||||
"greek_general_ci": 25,
|
||||
"cp1250_general_ci": 26,
|
||||
"latin2_croatian_ci": 27,
|
||||
"gbk_chinese_ci": 28,
|
||||
"cp1257_lithuanian_ci": 29,
|
||||
"latin5_turkish_ci": 30,
|
||||
"latin1_german2_ci": 31,
|
||||
"armscii8_general_ci": 32,
|
||||
"utf8_general_ci": 33,
|
||||
"cp1250_czech_cs": 34,
|
||||
"ucs2_general_ci": 35,
|
||||
"cp866_general_ci": 36,
|
||||
"keybcs2_general_ci": 37,
|
||||
"macce_general_ci": 38,
|
||||
"macroman_general_ci": 39,
|
||||
"cp852_general_ci": 40,
|
||||
"latin7_general_ci": 41,
|
||||
"latin7_general_cs": 42,
|
||||
"macce_bin": 43,
|
||||
"cp1250_croatian_ci": 44,
|
||||
"utf8mb4_general_ci": 45,
|
||||
"utf8mb4_bin": 46,
|
||||
"latin1_bin": 47,
|
||||
"latin1_general_ci": 48,
|
||||
"latin1_general_cs": 49,
|
||||
"cp1251_bin": 50,
|
||||
"cp1251_general_ci": 51,
|
||||
"cp1251_general_cs": 52,
|
||||
"macroman_bin": 53,
|
||||
"utf16_general_ci": 54,
|
||||
"utf16_bin": 55,
|
||||
"utf16le_general_ci": 56,
|
||||
"cp1256_general_ci": 57,
|
||||
"cp1257_bin": 58,
|
||||
"cp1257_general_ci": 59,
|
||||
"utf32_general_ci": 60,
|
||||
"utf32_bin": 61,
|
||||
"utf16le_bin": 62,
|
||||
"binary": 63,
|
||||
"armscii8_bin": 64,
|
||||
"ascii_bin": 65,
|
||||
"cp1250_bin": 66,
|
||||
"cp1256_bin": 67,
|
||||
"cp866_bin": 68,
|
||||
"dec8_bin": 69,
|
||||
"greek_bin": 70,
|
||||
"hebrew_bin": 71,
|
||||
"hp8_bin": 72,
|
||||
"keybcs2_bin": 73,
|
||||
"koi8r_bin": 74,
|
||||
"koi8u_bin": 75,
|
||||
"latin2_bin": 77,
|
||||
"latin5_bin": 78,
|
||||
"latin7_bin": 79,
|
||||
"cp850_bin": 80,
|
||||
"cp852_bin": 81,
|
||||
"swe7_bin": 82,
|
||||
"utf8_bin": 83,
|
||||
"big5_bin": 84,
|
||||
"euckr_bin": 85,
|
||||
"gb2312_bin": 86,
|
||||
"gbk_bin": 87,
|
||||
"sjis_bin": 88,
|
||||
"tis620_bin": 89,
|
||||
"ucs2_bin": 90,
|
||||
"ujis_bin": 91,
|
||||
"geostd8_general_ci": 92,
|
||||
"geostd8_bin": 93,
|
||||
"latin1_spanish_ci": 94,
|
||||
"cp932_japanese_ci": 95,
|
||||
"cp932_bin": 96,
|
||||
"eucjpms_japanese_ci": 97,
|
||||
"eucjpms_bin": 98,
|
||||
"cp1250_polish_ci": 99,
|
||||
"utf16_unicode_ci": 101,
|
||||
"utf16_icelandic_ci": 102,
|
||||
"utf16_latvian_ci": 103,
|
||||
"utf16_romanian_ci": 104,
|
||||
"utf16_slovenian_ci": 105,
|
||||
"utf16_polish_ci": 106,
|
||||
"utf16_estonian_ci": 107,
|
||||
"utf16_spanish_ci": 108,
|
||||
"utf16_swedish_ci": 109,
|
||||
"utf16_turkish_ci": 110,
|
||||
"utf16_czech_ci": 111,
|
||||
"utf16_danish_ci": 112,
|
||||
"utf16_lithuanian_ci": 113,
|
||||
"utf16_slovak_ci": 114,
|
||||
"utf16_spanish2_ci": 115,
|
||||
"utf16_roman_ci": 116,
|
||||
"utf16_persian_ci": 117,
|
||||
"utf16_esperanto_ci": 118,
|
||||
"utf16_hungarian_ci": 119,
|
||||
"utf16_sinhala_ci": 120,
|
||||
"utf16_german2_ci": 121,
|
||||
"utf16_croatian_ci": 122,
|
||||
"utf16_unicode_520_ci": 123,
|
||||
"utf16_vietnamese_ci": 124,
|
||||
"ucs2_unicode_ci": 128,
|
||||
"ucs2_icelandic_ci": 129,
|
||||
"ucs2_latvian_ci": 130,
|
||||
"ucs2_romanian_ci": 131,
|
||||
"ucs2_slovenian_ci": 132,
|
||||
"ucs2_polish_ci": 133,
|
||||
"ucs2_estonian_ci": 134,
|
||||
"ucs2_spanish_ci": 135,
|
||||
"ucs2_swedish_ci": 136,
|
||||
"ucs2_turkish_ci": 137,
|
||||
"ucs2_czech_ci": 138,
|
||||
"ucs2_danish_ci": 139,
|
||||
"ucs2_lithuanian_ci": 140,
|
||||
"ucs2_slovak_ci": 141,
|
||||
"ucs2_spanish2_ci": 142,
|
||||
"ucs2_roman_ci": 143,
|
||||
"ucs2_persian_ci": 144,
|
||||
"ucs2_esperanto_ci": 145,
|
||||
"ucs2_hungarian_ci": 146,
|
||||
"ucs2_sinhala_ci": 147,
|
||||
"ucs2_german2_ci": 148,
|
||||
"ucs2_croatian_ci": 149,
|
||||
"ucs2_unicode_520_ci": 150,
|
||||
"ucs2_vietnamese_ci": 151,
|
||||
"ucs2_general_mysql500_ci": 159,
|
||||
"utf32_unicode_ci": 160,
|
||||
"utf32_icelandic_ci": 161,
|
||||
"utf32_latvian_ci": 162,
|
||||
"utf32_romanian_ci": 163,
|
||||
"utf32_slovenian_ci": 164,
|
||||
"utf32_polish_ci": 165,
|
||||
"utf32_estonian_ci": 166,
|
||||
"utf32_spanish_ci": 167,
|
||||
"utf32_swedish_ci": 168,
|
||||
"utf32_turkish_ci": 169,
|
||||
"utf32_czech_ci": 170,
|
||||
"utf32_danish_ci": 171,
|
||||
"utf32_lithuanian_ci": 172,
|
||||
"utf32_slovak_ci": 173,
|
||||
"utf32_spanish2_ci": 174,
|
||||
"utf32_roman_ci": 175,
|
||||
"utf32_persian_ci": 176,
|
||||
"utf32_esperanto_ci": 177,
|
||||
"utf32_hungarian_ci": 178,
|
||||
"utf32_sinhala_ci": 179,
|
||||
"utf32_german2_ci": 180,
|
||||
"utf32_croatian_ci": 181,
|
||||
"utf32_unicode_520_ci": 182,
|
||||
"utf32_vietnamese_ci": 183,
|
||||
"utf8_unicode_ci": 192,
|
||||
"utf8_icelandic_ci": 193,
|
||||
"utf8_latvian_ci": 194,
|
||||
"utf8_romanian_ci": 195,
|
||||
"utf8_slovenian_ci": 196,
|
||||
"utf8_polish_ci": 197,
|
||||
"utf8_estonian_ci": 198,
|
||||
"utf8_spanish_ci": 199,
|
||||
"utf8_swedish_ci": 200,
|
||||
"utf8_turkish_ci": 201,
|
||||
"utf8_czech_ci": 202,
|
||||
"utf8_danish_ci": 203,
|
||||
"utf8_lithuanian_ci": 204,
|
||||
"utf8_slovak_ci": 205,
|
||||
"utf8_spanish2_ci": 206,
|
||||
"utf8_roman_ci": 207,
|
||||
"utf8_persian_ci": 208,
|
||||
"utf8_esperanto_ci": 209,
|
||||
"utf8_hungarian_ci": 210,
|
||||
"utf8_sinhala_ci": 211,
|
||||
"utf8_german2_ci": 212,
|
||||
"utf8_croatian_ci": 213,
|
||||
"utf8_unicode_520_ci": 214,
|
||||
"utf8_vietnamese_ci": 215,
|
||||
"utf8_general_mysql500_ci": 223,
|
||||
"utf8mb4_unicode_ci": 224,
|
||||
"utf8mb4_icelandic_ci": 225,
|
||||
"utf8mb4_latvian_ci": 226,
|
||||
"utf8mb4_romanian_ci": 227,
|
||||
"utf8mb4_slovenian_ci": 228,
|
||||
"utf8mb4_polish_ci": 229,
|
||||
"utf8mb4_estonian_ci": 230,
|
||||
"utf8mb4_spanish_ci": 231,
|
||||
"utf8mb4_swedish_ci": 232,
|
||||
"utf8mb4_turkish_ci": 233,
|
||||
"utf8mb4_czech_ci": 234,
|
||||
"utf8mb4_danish_ci": 235,
|
||||
"utf8mb4_lithuanian_ci": 236,
|
||||
"utf8mb4_slovak_ci": 237,
|
||||
"utf8mb4_spanish2_ci": 238,
|
||||
"utf8mb4_roman_ci": 239,
|
||||
"utf8mb4_persian_ci": 240,
|
||||
"utf8mb4_esperanto_ci": 241,
|
||||
"utf8mb4_hungarian_ci": 242,
|
||||
"utf8mb4_sinhala_ci": 243,
|
||||
"utf8mb4_german2_ci": 244,
|
||||
"utf8mb4_croatian_ci": 245,
|
||||
"utf8mb4_unicode_520_ci": 246,
|
||||
"utf8mb4_vietnamese_ci": 247,
|
||||
}
|
||||
|
||||
// A blacklist of collations which is unsafe to interpolate parameters.
|
||||
// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
|
||||
var unsafeCollations = map[string]bool{
|
||||
"big5_chinese_ci": true,
|
||||
"sjis_japanese_ci": true,
|
||||
"gbk_chinese_ci": true,
|
||||
"big5_bin": true,
|
||||
"gb2312_bin": true,
|
||||
"gbk_bin": true,
|
||||
"sjis_bin": true,
|
||||
"cp932_japanese_ci": true,
|
||||
"cp932_bin": true,
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user