Merge branch 'master' into limitation-encrypted-binlogs

This commit is contained in:
Tim Vaillancourt 2020-08-19 22:46:40 +02:00 committed by GitHub
commit 81a9538d6f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
334 changed files with 32879 additions and 6852 deletions

View File

@ -11,7 +11,7 @@ Related issue: https://github.com/github/gh-ost/issues/0123456789
### Description
This PR [briefly explain what is does]
This PR [briefly explain what it does]
> In case this PR introduced Go code changes:

25
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,25 @@
name: CI
on: [pull_request]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
- name: Build
run: script/cibuild
- name: Upload gh-ost binary artifact
uses: actions/upload-artifact@v1
with:
name: gh-ost
path: bin/gh-ost

19
.github/workflows/replica-tests.yml vendored Normal file
View File

@ -0,0 +1,19 @@
name: migration tests
on: [pull_request]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
- name: migration tests
run: script/cibuild-gh-ost-replica-tests

1
.gitignore vendored
View File

@ -2,3 +2,4 @@
/bin/
/libexec/
/.vendor/
.idea/

View File

@ -1,29 +0,0 @@
# http://docs.travis-ci.com/user/languages/go/
language: go
go: 1.9
os:
- linux
env:
- MYSQL_USER=root
- CURRENT_CI_ENV=travis
addons:
apt:
packages:
- git
- numactl
- libaio1
before_install:
- mysql -e 'CREATE DATABASE IF NOT EXISTS test;'
install: true
script:
- script/cibuild
notifications:
email: false

22
Dockerfile.packaging Normal file
View File

@ -0,0 +1,22 @@
#
FROM golang:1.14.7
RUN apt-get update
RUN apt-get install -y ruby ruby-dev rubygems build-essential
RUN gem install --no-ri --no-rdoc fpm
ENV GOPATH=/tmp/go
RUN apt-get install -y curl
RUN apt-get install -y rsync
RUN apt-get install -y gcc
RUN apt-get install -y g++
RUN apt-get install -y bash
RUN apt-get install -y git
RUN apt-get install -y tar
RUN apt-get install -y rpm
RUN mkdir -p $GOPATH/src/github.com/github/gh-ost
WORKDIR $GOPATH/src/github.com/github/gh-ost
COPY . .
RUN bash build.sh

11
Dockerfile.test Normal file
View File

@ -0,0 +1,11 @@
FROM golang:1.14.7
LABEL maintainer="github@github.com"
RUN apt-get update
RUN apt-get install -y lsb-release
RUN rm -rf /var/lib/apt/lists/*
COPY . /go/src/github.com/github/gh-ost
WORKDIR /go/src/github.com/github/gh-ost
CMD ["script/test"]

View File

@ -94,7 +94,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
`gh-ost` is a Go project; it is built with Go `1.8` (though `1.7` should work as well). To build on your own, use either:
`gh-ost` is a Go project; it is built with Go `1.14` and above. To build on your own, use either:
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
@ -109,3 +109,4 @@ Generally speaking, `master` branch is stable, but only [releases](https://githu
- [@shlomi-noach](https://github.com/shlomi-noach)
- [@jessbreckenridge](https://github.com/jessbreckenridge)
- [@gtowey](https://github.com/gtowey)
- [@timvaillancourt](https://github.com/timvaillancourt)

View File

@ -1 +1 @@
1.0.44
1.0.49

View File

@ -2,40 +2,70 @@
#
#
RELEASE_VERSION=$(cat RELEASE_VERSION)
RELEASE_VERSION=
buildpath=
function build {
osname=$1
osshort=$2
GOOS=$3
GOARCH=$4
if [[ $(go version | egrep "go1[.][012345678]") ]]; then
echo "go version is too low. Must use 1.9 or above"
exit 1
fi
echo "Building ${osname} binary"
export GOOS
export GOARCH
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
if [ $? -ne 0 ]; then
echo "Build failed for ${osname}"
exit 1
fi
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
function setuptree() {
b=$( mktemp -d $buildpath/gh-ostXXXXXX ) || return 1
mkdir -p $b/gh-ost
mkdir -p $b/gh-ost/usr/bin
echo $b
}
buildpath=/tmp/gh-ost
target=gh-ost
timestamp=$(date "+%Y%m%d%H%M%S")
ldflags="-X main.AppVersion=${RELEASE_VERSION}"
function build {
osname=$1
osshort=$2
GOOS=$3
GOARCH=$4
mkdir -p ${buildpath}
build macOS osx darwin amd64
build GNU/Linux linux linux amd64
if ! go version | egrep -q 'go(1\.1[456])' ; then
echo "go version must be 1.14 or above"
exit 1
fi
echo "Binaries found in:"
ls -1 $buildpath/gh-ost-binary*${timestamp}.tar.gz
echo "Building ${osname} binary"
export GOOS
export GOARCH
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
if [ $? -ne 0 ]; then
echo "Build failed for ${osname}"
exit 1
fi
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
if [ "$GOOS" == "linux" ] ; then
echo "Creating Distro full packages"
builddir=$(setuptree)
cp $buildpath/$target $builddir/gh-ost/usr/bin
cd $buildpath
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
fi
}
main() {
if [ -z "${RELEASE_VERSION}" ] ; then
RELEASE_VERSION=$(git describe --abbrev=0 --tags | tr -d 'v')
fi
if [ -z "${RELEASE_VERSION}" ] ; then
RELEASE_VERSION=$(cat RELEASE_VERSION)
fi
buildpath=/tmp/gh-ost-release
target=gh-ost
timestamp=$(date "+%Y%m%d%H%M%S")
ldflags="-X main.AppVersion=${RELEASE_VERSION}"
mkdir -p ${buildpath}
rm -rf ${buildpath:?}/*
build GNU/Linux linux linux amd64
# build macOS osx darwin amd64
echo "Binaries found in:"
find $buildpath/gh-ost* -type f -maxdepth 1
}
main "$@"

View File

@ -2,6 +2,10 @@
A more in-depth discussion of various `gh-ost` command line flags: implementation, implication, use cases.
### aliyun-rds
Add this flag when executing on Aliyun RDS.
### allow-master-master
See [`--assume-master-host`](#assume-master-host).
@ -14,7 +18,7 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c
### approve-renamed-columns
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try an associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
@ -65,6 +69,10 @@ This is somewhat similar to a Nagios `n`-times test, where `n` in our case is al
Optional. Default is `safe`. See more discussion in [`cut-over`](cut-over.md)
### cut-over-lock-timeout-seconds
Default `3`. Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout).
### discard-foreign-keys
**Danger**: this flag will _silently_ discard any foreign keys existing on your table.
@ -103,6 +111,22 @@ While the ongoing estimated number of rows is still heuristic, it's almost exact
Without this parameter, migration is a _noop_: testing table creation and validity of migration, but not touching data.
### force-named-cut-over
If given, a `cut-over` command must name the migrated table, or else ignored.
### force-named-panic
If given, a `panic` command must name the migrated table, or else ignored.
### force-table-names
Table name prefix to be used on the temporary tables.
### gcp
Add this flag when executing on a 1st generation Google Cloud Platform (GCP).
### heartbeat-interval-millis
Default 100. See [`subsecond-lag`](subsecond-lag.md) for details.
@ -117,6 +141,10 @@ We think `gh-ost` should not take chances or make assumptions about the user's t
See [`initially-drop-ghost-table`](#initially-drop-ghost-table)
### initially-drop-socket-file
Default False. Should `gh-ost` forcibly delete an existing socket file. Be careful: this might drop the socket file of a running migration!
### max-lag-millis
On a replication topology, this is perhaps the most important migration throttling factor: the maximum lag allowed for migration to work. If lag exceeds this value, migration throttles.
@ -133,7 +161,7 @@ List of metrics and threshold values; topping the threshold of any will cause th
### migrate-on-replica
Typically `gh-ost` is used to migrate tables on a master. If you wish to only perform the migration in full on a replica, connect `gh-ost` to said replica and pass `--migrate-on-replica`. `gh-ost` will briefly connect to the master but other issue no changes on the master. Migration will be fully executed on the replica, while making sure to maintain a small replication lag.
Typically `gh-ost` is used to migrate tables on a master. If you wish to only perform the migration in full on a replica, connect `gh-ost` to said replica and pass `--migrate-on-replica`. `gh-ost` will briefly connect to the master but otherwise will make no changes on the master. Migration will be fully executed on the replica, while making sure to maintain a small replication lag.
### postpone-cut-over-flag-file
@ -153,14 +181,42 @@ See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
### skip-strict-mode
By default `gh-ost` enforces STRICT_ALL_TABLES sql_mode as a safety measure. In some cases this changes the behaviour of other modes (namely ERROR_FOR_DIVISION_BY_ZERO, NO_ZERO_DATE, and NO_ZERO_IN_DATE) which may lead to errors during migration. Use `--skip-strict-mode` to explicitly tell `gh-ost` not to enforce this. **Danger** This may have some unexpected disastrous side effects.
### skip-renamed-columns
See [`approve-renamed-columns`](#approve-renamed-columns)
### ssl
By default `gh-ost` does not use ssl/tls connections to the database servers when performing migrations. This flag instructs `gh-ost` to use encrypted connections. If enabled, `gh-ost` will use the system's ca certificate pool for server certificate verification. If a different certificate is needed for server verification, see `--ssl-ca`. If you wish to skip server verification, but still use encrypted connections, use with `--ssl-allow-insecure`.
### ssl-allow-insecure
Allows `gh-ost` to connect to the MySQL servers using encrypted connections, but without verifying the validity of the certificate provided by the server during the connection. Requires `--ssl`.
### ssl-ca
`--ssl-ca=/path/to/ca-cert.pem`: ca certificate file (in PEM format) to use for server certificate verification. If specified, the default system ca cert pool will not be used for verification, only the ca cert provided here. Requires `--ssl`.
### ssl-cert
`--ssl-cert=/path/to/ssl-cert.crt`: SSL public key certificate file (in PEM format).
### ssl-key
`--ssl-key=/path/to/ssl-key.key`: SSL private key file (in PEM format).
### test-on-replica
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [`testing-on-replica`](testing-on-replica.md)
### test-on-replica-skip-replica-stop
Default `False`. When `--test-on-replica` is enabled, do not issue commands stop replication (requires `--test-on-replica`).
### throttle-control-replicas
Provide a command delimited list of replicas; `gh-ost` will throttle when any of the given replicas lag beyond [`--max-lag-millis`](#max-lag-millis). The list can be queried and updated dynamically via [interactive commands](interactive-commands.md)

View File

@ -65,10 +65,15 @@ The following variables are available on all hooks:
- `GH_OST_ELAPSED_COPY_SECONDS` - row-copy time (excluding startup, row-count and postpone time)
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
- `GH_OST_MIGRATED_HOST`
- `GH_OST_INSPECTED_HOST`
- `GH_OST_EXECUTING_HOST`
- `GH_OST_HOOKS_HINT` - copy of `--hooks-hint` value
- `GH_OST_HOOKS_HINT_OWNER` - copy of `--hooks-hint-owner` value
- `GH_OST_HOOKS_HINT_TOKEN` - copy of `--hooks-hint-token` value
- `GH_OST_DRY_RUN` - whether or not the `gh-ost` run is a dry run
The following variable are available on particular hooks:

View File

@ -28,3 +28,9 @@ It is therefore unlikely that `gh-ost` will support this behavior.
Yes. TL;DR if running all on same replica/master, make sure to provide `--replica-server-id`. [Read more](cheatsheet.md#concurrent-migrations)
# Why
### Why Is the "Connect to Replica" mode preferred?
To avoid placing extra load on the master. `gh-ost` connects as a replication client. Each additional replica adds some load to the master.
To monitor replication lag from a replica. This makes the replication lag throttle, `--max-lag-millis`, more representative of the lag experienced by other replicas following the master (perhaps N levels deep in a tree of replicas).

View File

@ -1,4 +1,4 @@
`gh-ost` has been updated to work with Amazon RDS however due to GitHub not relying using AWS for databases, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
`gh-ost` has been updated to work with Amazon RDS however due to GitHub not using AWS for databases, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
# Amazon RDS
@ -26,6 +26,14 @@ If you use `pt-table-checksum` as a part of your data integrity checks, you migh
This tool requires binlog_format=STATEMENT, but the current binlog_format is set to ROW and an error occurred while attempting to change it. If running MySQL 5.1.29 or newer, setting binlog_format requires the SUPER privilege. You will need to manually set binlog_format to 'STATEMENT' before running this tool.
```
#### Binlog filtering
In Aurora, the [binlog filtering feature][aws_replication_docs_bin_log_filtering] is enabled by default. This becomes an issue when gh-ost tries to do the cut-over, because gh-ost waits for an entry in the binlog to proceed but this entry will never end up in the binlog because it gets filtered out by the binlog filtering feature.
You need to turn this feature off during the migration process.
Set the `aurora_enable_repl_bin_log_filtering` parameter to 0 in the Parameter Group for your cluster.
When the migration is done, set it back to 1 (default).
#### Preflight checklist
Before trying to run any `gh-ost` migrations you will want to confirm the following:
@ -35,6 +43,7 @@ Before trying to run any `gh-ost` migrations you will want to confirm the follow
- [ ] Executing `SHOW SLAVE STATUS\G` on your replica cluster displays the correct master host, binlog position, etc.
- [ ] Database backup retention is greater than 1 day to enable binlogs
- [ ] You have setup [`hooks`][ghost_hooks] to issue RDS procedures for stopping and starting replication. (see [github/gh-ost#163][ghost_rds_issue_tracking] for examples)
- [ ] The parameter `aurora_enable_repl_bin_log_filtering` is set to 0
[new_issue]: https://github.com/github/gh-ost/issues/new
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
@ -43,3 +52,4 @@ Before trying to run any `gh-ost` migrations you will want to confirm the follow
[percona_toolkit_patch]: https://github.com/jacobbednarz/percona-toolkit/commit/0271ba6a094da446a5e5bb8d99b5c26f1777f2b9
[ghost_hooks]: https://github.com/github/gh-ost/blob/master/doc/hooks.md
[ghost_rds_issue_tracking]: https://github.com/github/gh-ost/issues/163
[aws_replication_docs_bin_log_filtering]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Replication.html#AuroraMySQL.Replication.Performance

View File

@ -22,14 +22,10 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
### Limitations
- Foreign keys not supported. They may be supported in the future, to some extent.
- Foreign key constraints are not supported. They may be supported in the future, to some extent.
- Triggers are not supported. They may be supported in the future.
- MySQL 5.7 generated columns are not supported. They may be supported in the future.
- MySQL 5.7 `POINT` column type is not supported.
- MySQL 5.7 `JSON` columns are supported but not as part of `PRIMARY KEY`
- The two _before_ & _after_ tables must share a `PRIMARY KEY` or other `UNIQUE KEY`. This key will be used by `gh-ost` to iterate through the table rows when copying. [Read more](shared-key.md)
@ -42,15 +38,17 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- It is not allowed to migrate a table where another table exists with same name and different upper/lower case.
- For example, you may not migrate `MyTable` if another table called `MYtable` exists in the same schema.
- Amazon RDS works, but has it's own [limitations](rds.md).
- Google Cloud SQL is currently not supported
- Amazon RDS works, but has its own [limitations](rds.md).
- Google Cloud SQL works, `--gcp` flag required.
- Aliyun RDS works, `--aliyun-rds` flag required.
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)
- Master-master setup is only supported in active-passive setup. Active-active (where table is being written to on both masters concurrently) is unsupported. It may be supported in the future.
- If you have en `enum` field as part of your migration key (typically the `PRIMARY KEY`), migration performance will be degraded and potentially bad. [Read more](https://github.com/github/gh-ost/pull/277#issuecomment-254811520)
- If you have an `enum` field as part of your migration key (typically the `PRIMARY KEY`), migration performance will be degraded and potentially bad. [Read more](https://github.com/github/gh-ost/pull/277#issuecomment-254811520)
- Migrating a `FEDERATED` table is unsupported and is irrelevant to the problem `gh-ost` tackles.
- [Encrypted binary logs](https://www.percona.com/blog/2018/03/08/binlog-encryption-percona-server-mysql/) are not supported.
- `ALTER TABLE ... RENAME TO some_other_name` is not supported (and you shouldn't use `gh-ost` for such a trivial operation).

View File

@ -1,12 +1,12 @@
# Shared key
A requirement for a migration to run is that the two _before_ and _after_ tables have a shared unique key. This is to elaborate and illustrate on the matter.
gh-ost requires for every migration that both the _before_ and _after_ versions of the table share the same unique not-null key columns. This page illustrates this rule.
### Introduction
Consider a classic, simple migration. The table is any normal:
Consider a simple migration, with a normal table,
```
```sql
CREATE TABLE tbl (
id bigint unsigned not null auto_increment,
data varchar(255),
@ -15,54 +15,72 @@ CREATE TABLE tbl (
)
```
And the migration is a simple `add column ts timestamp`.
In such migration there is no change in indexes, and in particular no change to any unique key, and specifically no change to the `PRIMARY KEY`. To run this migration, `gh-ost` would iterate the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` by order of `id`, and then apply binlog events onto `_tbl_gho`.
Applying the binlog events assumes the existence of a shared unique key. For example, an `UPDATE` statement in the binary log translate to a `REPLACE` statement which `gh-ost` applies to the _ghost_ table. Such statement expects to add or replace an existing row based on given row data. In particular, it would _replace_ an existing row if a unique key violation is met.
So `gh-ost` correlates `tbl` and `_tbl_gho` rows using a unique key. In the above example that would be the `PRIMARY KEY`.
### Rules
There must be a shared set of not-null columns for which there is a unique constraint in both the original table and the migration (_ghost_) table.
### Interpreting the rules
The same columns must be covered by a unique key in both tables. This doesn't have to be the `PRIMARY KEY`. This doesn't have to be a key of the same name.
Upon migration, `gh-ost` inspects both the original and _ghost_ table and attempts to find at least one such unique key (or rather, a set of columns) that is shared between the two. Typically this would just be the `PRIMARY KEY`, but sometimes you may change the `PRIMARY KEY` itself, in which case `gh-ost` will look for other options.
`gh-ost` expects unique keys where no `NULL` values are found, i.e. all columns covered by the unique key are defined as `NOT NULL`. This is implicitly true for `PRIMARY KEY`s. If no such key can be found, `gh-ost` bails out. In the event there is no such key, but you happen to _know_ your columns have no `NULL` values even though they're `NULL`-able, you may take responsibility and pass the `--allow-nullable-unique-key`. The migration will run well as long as no `NULL` values are found in the unique key's columns. Any actual `NULL`s may corrupt the migration.
### Examples: allowed and not allowed
and the migration `add column ts timestamp`. The _after_ table version would be:
```sql
CREATE TABLE tbl (
id bigint unsigned not null auto_increment,
data varchar(255),
more_data int,
ts timestamp,
PRIMARY KEY(id)
)
```
(This is also the definition of the _ghost_ table, except that that table would be called `_tbl_gho`).
In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tble` onto `_tbl_gho`.
The applying of the binlog events is what requires the shared unique key. For example, an `UPDATE` statement to `tbl` translates to a `REPLACE` statement which `gh-ost` applies to `_tbl_gho`. A `REPLACE` statement expects to insert or replace an existing row based on its row's values and the table's unique key constraints. In particular, if inserting that row would result in a unique key violation (e.g., a row with that primary key already exists), it would _replace_ that existing row with the new values.
So `gh-ost` correlates `tbl` and `_tbl_gho` rows one to one using a unique key. In the above example that would be the `PRIMARY KEY`.
### Interpreting the rule
The _before_ and _after_ versions of the table share the same unique not-null key, but:
- the key doesn't have to be the PRIMARY KEY
- the key can have a different name between the _before_ and _after_ versions (e.g., renamed via DROP INDEX and ADD INDEX) so long as it contains the exact same column(s)
At the start of the migration, `gh-ost` inspects both the original and _ghost_ table it created, and attempts to find at least one such unique key (or rather, a set of columns) that is shared between the two. Typically this would just be the `PRIMARY KEY`, but some tables don't have primary keys, or sometimes it is the primary key that is being modified by the migration. In these cases `gh-ost` will look for other options.
`gh-ost` expects unique keys where no `NULL` values are found, i.e. all columns contained in the unique key are defined as `NOT NULL`. This is implicitly true for primary keys. If no such key can be found, `gh-ost` bails out.
If the table contains a unique key with nullable columns, but you know your columns contain no `NULL` values, use the `--allow-nullable-unique-key` option. The migration will run well as long as no `NULL` values are found in the unique key's columns. **Any actual `NULL`s may corrupt the migration.**
### Examples: Allowed and Not Allowed
```sql
create table some_table (
id int auto_increment,
id int not null auto_increment,
ts timestamp,
name varchar(128) not null,
owner_id int not null,
loc_id int,
loc_id int not null,
primary key(id),
unique key name_uidx(name)
)
```
Following are examples of migrations that are _good to run_:
Note the two unique, not-null indexes: the primary key and `name_uidx`.
Allowed migrations:
- `add column i int`
- `add key owner_idx(owner_id)`
- `add unique key owner_name_idx(owner_id, name)` - though you need to make sure to not write conflicting rows while this migration runs
- `add key owner_idx (owner_id)`
- `add unique key owner_name_idx (owner_id, name)` - **be careful not to write conflicting rows while this migration runs**
- `drop key name_uidx` - `primary key` is shared between the tables
- `drop primary key, add primary key(owner_id, loc_id)` - `name_uidx` is shared between the tables and is used for migration
- `change id bigint unsigned` - the `'primary key` is used. The change of type still makes the `primary key` workable.
- `drop primary key, drop key name_uidx, create primary key(name), create unique key id_uidx(id)` - swapping the two keys. `gh-ost` is still happy because `id` is still unique in both tables. So is `name`.
- `drop primary key, add primary key(owner_id, loc_id)` - `name_uidx` is shared between the tables
- `change id bigint unsigned not null auto_increment` - the `primary key` changes datatype but not value, and can be used
- `drop primary key, drop key name_uidx, add primary key(name), add unique key id_uidx(id)` - swapping the two keys. Either `id` or `name` could be used
Not allowed:
- `drop primary key, drop key name_uidx` - the _ghost_ table has no unique key
- `drop primary key, drop key name_uidx, create primary key(name, owner_id)` - no shared columns to the unique keys on both tables. Even though `name` exists in the _ghost_ table's `primary key`, it is only part of the key and in itself does not guarantee uniqueness in the _ghost_ table.
Following are examples of migrations that _cannot run_:
### Workarounds
- `drop primary key, drop key name_uidx` - no unique key to _ghost_ table, so clearly cannot run
- `drop primary key, drop key name_uidx, create primary key(name, owner_id)` - no shared columns to both tables. Even though `name` exists in the _ghost_ table's `primary key`, it is only part of the key and in itself does not guarantee uniqueness in the _ghost_ table.
Also, you cannot run a migration on a table that doesn't have some form of `unique key` in the first place, such as `some_table (id int, ts timestamp)`
If you need to change your primary key or only not-null unique index to use different columns, you will want to do it as two separate migrations:
1. `ADD UNIQUE KEY temp_pk (temp_pk_column,...)`
1. `DROP PRIMARY KEY, DROP KEY temp_pk, ADD PRIMARY KEY (temp_pk_column,...)`

View File

@ -46,6 +46,14 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
An example query could be: `--throttle-query="select hour(now()) between 8 and 17"` which implies throttling auto-starts `8:00am` and migration auto-resumes at `18:00pm`.
#### HTTP Throttle
The `--throttle-http` flag allows for throttling via HTTP. Every 100ms `gh-ost` issues a `HEAD` request to the provided URL. If the response status code is not `200` throttling will kick in until a `200` response status code is returned.
If no URL is provided or the URL provided doesn't contain the scheme then the HTTP check will be disabled. For example `--throttle-http="http://1.2.3.4:6789/throttle"` will enable the HTTP check/throttling, but `--throttle-http="1.2.3.4:6789/throttle"` will not.
The URL can be queried and updated dynamically via [interactive interface](interactive-commands.md).
#### Manual control
In addition to the above, you are able to take control and throttle the operation any time you like.

7
docker-compose.yml Normal file
View File

@ -0,0 +1,7 @@
version: "3.5"
services:
app:
image: app
build:
context: .
dockerfile: Dockerfile.test

View File

@ -7,6 +7,7 @@ package base
import (
"fmt"
"math"
"os"
"regexp"
"strings"
@ -18,6 +19,7 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
"gopkg.in/gcfg.v1"
gcfgscanner "gopkg.in/gcfg.v1/scanner"
@ -28,23 +30,23 @@ type RowsEstimateMethod string
const (
TableStatusRowsEstimate RowsEstimateMethod = "TableStatusRowsEstimate"
ExplainRowsEstimate = "ExplainRowsEstimate"
CountRowsEstimate = "CountRowsEstimate"
ExplainRowsEstimate RowsEstimateMethod = "ExplainRowsEstimate"
CountRowsEstimate RowsEstimateMethod = "CountRowsEstimate"
)
type CutOver int
const (
CutOverAtomic CutOver = iota
CutOverTwoStep = iota
CutOverAtomic CutOver = iota
CutOverTwoStep
)
type ThrottleReasonHint string
const (
NoThrottleReasonHint ThrottleReasonHint = "NoThrottleReasonHint"
UserCommandThrottleReasonHint = "UserCommandThrottleReasonHint"
LeavingHibernationThrottleReasonHint = "LeavingHibernationThrottleReasonHint"
UserCommandThrottleReasonHint ThrottleReasonHint = "UserCommandThrottleReasonHint"
LeavingHibernationThrottleReasonHint ThrottleReasonHint = "LeavingHibernationThrottleReasonHint"
)
const (
@ -75,9 +77,10 @@ func NewThrottleCheckResult(throttle bool, reason string, reasonHint ThrottleRea
type MigrationContext struct {
Uuid string
DatabaseName string
OriginalTableName string
AlterStatement string
DatabaseName string
OriginalTableName string
AlterStatement string
AlterStatementOptions string // anything following the 'ALTER TABLE [schema.]table' from AlterStatement
CountTableRows bool
ConcurrentCountTableRows bool
@ -86,17 +89,25 @@ type MigrationContext struct {
SwitchToRowBinlogFormat bool
AssumeRBR bool
SkipForeignKeyChecks bool
SkipStrictMode bool
NullableUniqueKeyAllowed bool
ApproveRenamedColumns bool
SkipRenamedColumns bool
IsTungsten bool
DiscardForeignKeys bool
AliyunRDS bool
GoogleCloudPlatform bool
config ContextConfig
configMutex *sync.Mutex
ConfigFile string
CliUser string
CliPassword string
UseTLS bool
TLSAllowInsecure bool
TLSCACertificate string
TLSCertificate string
TLSKey string
CliMasterUser string
CliMasterPassword string
@ -110,6 +121,7 @@ type MigrationContext struct {
ThrottleAdditionalFlagFile string
throttleQuery string
throttleHTTP string
IgnoreHTTPErrors bool
ThrottleCommandedByUser int64
HibernateUntil int64
maxLoad LoadMap
@ -118,10 +130,15 @@ type MigrationContext struct {
CriticalLoadHibernateSeconds int64
PostponeCutOverFlagFile string
CutOverLockTimeoutSeconds int64
CutOverExponentialBackoff bool
ExponentialBackoffMaxInterval int64
ForceNamedCutOverCommand bool
ForceNamedPanicCommand bool
PanicFlagFile string
HooksPath string
HooksHintMessage string
HooksHintOwner string
HooksHintToken string
DropServeSocket bool
ServeSocketFile string
@ -161,6 +178,7 @@ type MigrationContext struct {
pointOfInterestTime time.Time
pointOfInterestTimeMutex *sync.Mutex
CurrentLag int64
currentProgress uint64
ThrottleHTTPStatusCode int64
controlReplicasLagResult mysql.ReplicationLagResult
TotalRowsCopied int64
@ -183,8 +201,10 @@ type MigrationContext struct {
OriginalTableColumnsOnApplier *sql.ColumnList
OriginalTableColumns *sql.ColumnList
OriginalTableVirtualColumns *sql.ColumnList
OriginalTableUniqueKeys [](*sql.UniqueKey)
GhostTableColumns *sql.ColumnList
GhostTableVirtualColumns *sql.ColumnList
GhostTableUniqueKeys [](*sql.UniqueKey)
UniqueKey *sql.UniqueKey
SharedColumns *sql.ColumnList
@ -199,6 +219,25 @@ type MigrationContext struct {
ForceTmpTableName string
recentBinlogCoordinates mysql.BinlogCoordinates
Log Logger
}
type Logger interface {
Debug(args ...interface{})
Debugf(format string, args ...interface{})
Info(args ...interface{})
Infof(format string, args ...interface{})
Warning(args ...interface{}) error
Warningf(format string, args ...interface{}) error
Error(args ...interface{}) error
Errorf(format string, args ...interface{}) error
Errore(err error) error
Fatal(args ...interface{}) error
Fatalf(format string, args ...interface{}) error
Fatale(err error) error
SetLevel(level log.LogLevel)
SetPrintStackTrace(printStackTraceFlag bool)
}
type ContextConfig struct {
@ -233,6 +272,7 @@ func NewMigrationContext() *MigrationContext {
pointOfInterestTimeMutex: &sync.Mutex{},
ColumnRenameMap: make(map[string]string),
PanicAbort: make(chan error),
Log: NewDefaultLogger(),
}
}
@ -341,6 +381,14 @@ func (this *MigrationContext) SetCutOverLockTimeoutSeconds(timeoutSeconds int64)
return nil
}
func (this *MigrationContext) SetExponentialBackoffMaxInterval(intervalSeconds int64) error {
if intervalSeconds < 2 {
return fmt.Errorf("Minimal maximum interval is 2sec. Timeout remains at %d", this.ExponentialBackoffMaxInterval)
}
this.ExponentialBackoffMaxInterval = intervalSeconds
return nil
}
func (this *MigrationContext) SetDefaultNumRetries(retries int64) {
this.throttleMutex.Lock()
defer this.throttleMutex.Unlock()
@ -405,6 +453,20 @@ func (this *MigrationContext) MarkRowCopyEndTime() {
this.RowCopyEndTime = time.Now()
}
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
}
func (this *MigrationContext) GetProgressPct() float64 {
return math.Float64frombits(atomic.LoadUint64(&this.currentProgress))
}
func (this *MigrationContext) SetProgressPct(progressPct float64) {
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
}
// math.Float64bits([f=0..100])
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
// This is not exactly the same as the rows being iterated via chunks, but potentially close enough
func (this *MigrationContext) GetTotalRowsCopied() int64 {
@ -535,6 +597,13 @@ func (this *MigrationContext) SetThrottleHTTP(throttleHTTP string) {
this.throttleHTTP = throttleHTTP
}
func (this *MigrationContext) SetIgnoreHTTPErrors(ignoreHTTPErrors bool) {
this.throttleHTTPMutex.Lock()
defer this.throttleHTTPMutex.Unlock()
this.IgnoreHTTPErrors = ignoreHTTPErrors
}
func (this *MigrationContext) GetMaxLoad() LoadMap {
this.throttleMutex.Lock()
defer this.throttleMutex.Unlock()
@ -681,6 +750,13 @@ func (this *MigrationContext) ApplyCredentials() {
}
}
func (this *MigrationContext) SetupTLS() error {
if this.UseTLS {
return this.InspectorConnectionConfig.UseTLS(this.TLSCACertificate, this.TLSCertificate, this.TLSKey, this.TLSAllowInsecure)
}
return nil
}
// ReadConfigFile attempts to read the config file, if it exists
func (this *MigrationContext) ReadConfigFile() error {
this.configMutex.Lock()

73
go/base/default_logger.go Normal file
View File

@ -0,0 +1,73 @@
package base
import (
"github.com/outbrain/golib/log"
)
type simpleLogger struct{}
func NewDefaultLogger() *simpleLogger {
return &simpleLogger{}
}
func (*simpleLogger) Debug(args ...interface{}) {
log.Debug(args[0].(string), args[1:])
return
}
func (*simpleLogger) Debugf(format string, args ...interface{}) {
log.Debugf(format, args...)
return
}
func (*simpleLogger) Info(args ...interface{}) {
log.Info(args[0].(string), args[1:])
return
}
func (*simpleLogger) Infof(format string, args ...interface{}) {
log.Infof(format, args...)
return
}
func (*simpleLogger) Warning(args ...interface{}) error {
return log.Warning(args[0].(string), args[1:])
}
func (*simpleLogger) Warningf(format string, args ...interface{}) error {
return log.Warningf(format, args...)
}
func (*simpleLogger) Error(args ...interface{}) error {
return log.Error(args[0].(string), args[1:])
}
func (*simpleLogger) Errorf(format string, args ...interface{}) error {
return log.Errorf(format, args...)
}
func (*simpleLogger) Errore(err error) error {
return log.Errore(err)
}
func (*simpleLogger) Fatal(args ...interface{}) error {
return log.Fatal(args[0].(string), args[1:])
}
func (*simpleLogger) Fatalf(format string, args ...interface{}) error {
return log.Fatalf(format, args...)
}
func (*simpleLogger) Fatale(err error) error {
return log.Fatale(err)
}
func (*simpleLogger) SetLevel(level log.LogLevel) {
log.SetLevel(level)
return
}
func (*simpleLogger) SetPrintStackTrace(printStackTraceFlag bool) {
log.SetPrintStackTrace(printStackTraceFlag)
return
}

View File

@ -14,7 +14,6 @@ import (
gosql "database/sql"
"github.com/github/gh-ost/go/mysql"
"github.com/outbrain/golib/log"
)
var (
@ -40,10 +39,9 @@ func FileExists(fileName string) bool {
func TouchFile(fileName string) error {
f, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE, 0755)
if err != nil {
return (err)
return err
}
defer f.Close()
return nil
return f.Close()
}
// StringContainsAll returns true if `s` contains all non empty given `substrings`
@ -64,20 +62,30 @@ func StringContainsAll(s string, substrings ...string) bool {
return nonEmptyStringsFound
}
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig) (string, error) {
query := `select @@global.port, @@global.version`
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext) (string, error) {
versionQuery := `select @@global.version`
var port, extraPort int
var version string
if err := db.QueryRow(query).Scan(&port, &version); err != nil {
if err := db.QueryRow(versionQuery).Scan(&version); err != nil {
return "", err
}
extraPortQuery := `select @@global.extra_port`
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil {
// swallow this error. not all servers support extra_port
}
// AliyunRDS set users port to "NULL", replace it by gh-ost param
// GCP set users port to "NULL", replace it by gh-ost param
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform {
port = connectionConfig.Key.Port
} else {
portQuery := `select @@global.port`
if err := db.QueryRow(portQuery).Scan(&port); err != nil {
return "", err
}
}
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
log.Infof("connection validated on %+v", connectionConfig.Key)
migrationContext.Log.Infof("connection validated on %+v", connectionConfig.Key)
return version, nil
} else if extraPort == 0 {
return "", fmt.Errorf("Unexpected database port reported: %+v", port)

View File

@ -7,17 +7,18 @@ package binlog
import (
"fmt"
"github.com/github/gh-ost/go/sql"
"strings"
"github.com/github/gh-ost/go/sql"
)
type EventDML string
const (
NotDML EventDML = "NoDML"
InsertDML = "Insert"
UpdateDML = "Update"
DeleteDML = "Delete"
InsertDML EventDML = "Insert"
UpdateDML EventDML = "Update"
DeleteDML EventDML = "Delete"
)
func ToEventDML(description string) EventDML {

View File

@ -26,7 +26,7 @@ func NewBinlogEntry(logFile string, logPos uint64) *BinlogEntry {
return binlogEntry
}
// NewBinlogEntry creates an empty, ready to go BinlogEntry object
// NewBinlogEntryAt creates an empty, ready to go BinlogEntry object
func NewBinlogEntryAt(coordinates mysql.BinlogCoordinates) *BinlogEntry {
binlogEntry := &BinlogEntry{
Coordinates: coordinates,
@ -41,7 +41,7 @@ func (this *BinlogEntry) Duplicate() *BinlogEntry {
return binlogEntry
}
// Duplicate creates and returns a new binlog entry, with some of the attributes pre-assigned
// String() returns a string representation of this binlog entry
func (this *BinlogEntry) String() string {
return fmt.Sprintf("[BinlogEntry at %+v; dml:%+v]", this.Coordinates, this.DmlEvent)
}

View File

@ -13,13 +13,13 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
gomysql "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/replication"
"golang.org/x/net/context"
)
type GoMySQLReader struct {
migrationContext *base.MigrationContext
connectionConfig *mysql.ConnectionConfig
binlogSyncer *replication.BinlogSyncer
binlogStreamer *replication.BinlogStreamer
@ -30,6 +30,7 @@ type GoMySQLReader struct {
func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *GoMySQLReader, err error) {
binlogReader = &GoMySQLReader{
migrationContext: migrationContext,
connectionConfig: migrationContext.InspectorConnectionConfig,
currentCoordinates: mysql.BinlogCoordinates{},
currentCoordinatesMutex: &sync.Mutex{},
@ -40,12 +41,14 @@ func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *Go
serverId := uint32(migrationContext.ReplicaServerId)
binlogSyncerConfig := replication.BinlogSyncerConfig{
ServerID: serverId,
Flavor: "mysql",
Host: binlogReader.connectionConfig.Key.Hostname,
Port: uint16(binlogReader.connectionConfig.Key.Port),
User: binlogReader.connectionConfig.User,
Password: binlogReader.connectionConfig.Password,
ServerID: serverId,
Flavor: "mysql",
Host: binlogReader.connectionConfig.Key.Hostname,
Port: uint16(binlogReader.connectionConfig.Key.Port),
User: binlogReader.connectionConfig.User,
Password: binlogReader.connectionConfig.Password,
TLSConfig: binlogReader.connectionConfig.TLSConfig(),
UseDecimal: true,
}
binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
@ -55,11 +58,11 @@ func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *Go
// ConnectBinlogStreamer
func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) {
if coordinates.IsEmpty() {
return log.Errorf("Empty coordinates at ConnectBinlogStreamer()")
return this.migrationContext.Log.Errorf("Empty coordinates at ConnectBinlogStreamer()")
}
this.currentCoordinates = coordinates
log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
// Start sync with specified binlog file and position
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
@ -76,7 +79,7 @@ func (this *GoMySQLReader) GetCurrentBinlogCoordinates() *mysql.BinlogCoordinate
// StreamEvents
func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *replication.RowsEvent, entriesChannel chan<- *BinlogEntry) error {
if this.currentCoordinates.SmallerThanOrEquals(&this.LastAppliedRowsEventHint) {
log.Debugf("Skipping handled query at %+v", this.currentCoordinates)
this.migrationContext.Log.Debugf("Skipping handled query at %+v", this.currentCoordinates)
return nil
}
@ -111,7 +114,7 @@ func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEven
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
}
}
// The channel will do the throttling. Whoever is reding from the channel
// The channel will do the throttling. Whoever is reading from the channel
// decides whether action is taken synchronously (meaning we wait before
// next iteration) or asynchronously (we keep pushing more events)
// In reality, reads will be synchronous
@ -145,14 +148,14 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
defer this.currentCoordinatesMutex.Unlock()
this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
}()
log.Infof("rotate to next log name: %s", rotateEvent.NextLogName)
this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), rotateEvent.NextLogName)
} else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
return err
}
}
}
log.Debugf("done streaming events")
this.migrationContext.Log.Debugf("done streaming events")
return nil
}

View File

@ -14,6 +14,8 @@ import (
"github.com/github/gh-ost/go/base"
"github.com/github/gh-ost/go/logic"
"github.com/github/gh-ost/go/sql"
_ "github.com/go-sql-driver/mysql"
"github.com/outbrain/golib/log"
"golang.org/x/crypto/ssh/terminal"
@ -30,7 +32,7 @@ func acceptSignals(migrationContext *base.MigrationContext) {
for sig := range c {
switch sig {
case syscall.SIGHUP:
log.Infof("Received SIGHUP. Reloading configuration")
migrationContext.Log.Infof("Received SIGHUP. Reloading configuration")
if err := migrationContext.ReadConfigFile(); err != nil {
log.Errore(err)
} else {
@ -47,6 +49,7 @@ func main() {
flag.StringVar(&migrationContext.InspectorConnectionConfig.Key.Hostname, "host", "127.0.0.1", "MySQL hostname (preferably a replica, not the master)")
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unable to determine the master")
flag.IntVar(&migrationContext.InspectorConnectionConfig.Key.Port, "port", 3306, "MySQL port (preferably a replica, not the master)")
flag.Float64Var(&migrationContext.InspectorConnectionConfig.Timeout, "mysql-timeout", 0.0, "Connect, read and write timeout for MySQL")
flag.StringVar(&migrationContext.CliUser, "user", "", "MySQL user")
flag.StringVar(&migrationContext.CliPassword, "password", "", "MySQL password")
flag.StringVar(&migrationContext.CliMasterUser, "master-user", "", "MySQL user on master, if different from that on replica. Requires --assume-master-host")
@ -54,6 +57,12 @@ func main() {
flag.StringVar(&migrationContext.ConfigFile, "conf", "", "Config file")
askPass := flag.Bool("ask-pass", false, "prompt for MySQL password")
flag.BoolVar(&migrationContext.UseTLS, "ssl", false, "Enable SSL encrypted connections to MySQL hosts")
flag.StringVar(&migrationContext.TLSCACertificate, "ssl-ca", "", "CA certificate in PEM format for TLS connections to MySQL hosts. Requires --ssl")
flag.StringVar(&migrationContext.TLSCertificate, "ssl-cert", "", "Certificate in PEM format for TLS connections to MySQL hosts. Requires --ssl")
flag.StringVar(&migrationContext.TLSKey, "ssl-key", "", "Key in PEM format for TLS connections to MySQL hosts. Requires --ssl")
flag.BoolVar(&migrationContext.TLSAllowInsecure, "ssl-allow-insecure", false, "Skips verification of MySQL hosts' certificate chain and host name. Requires --ssl")
flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)")
flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)")
flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)")
@ -67,6 +76,9 @@ func main() {
flag.BoolVar(&migrationContext.IsTungsten, "tungsten", false, "explicitly let gh-ost know that you are running on a tungsten-replication based topology (you are likely to also provide --assume-master-host)")
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
@ -79,9 +91,12 @@ func main() {
flag.BoolVar(&migrationContext.TimestampOldTable, "timestamp-old-table", false, "Use a timestamp in old table name. This makes old table names unique and non conflicting cross migrations")
cutOver := flag.String("cut-over", "atomic", "choose cut-over type (default|atomic, two-step)")
flag.BoolVar(&migrationContext.ForceNamedCutOverCommand, "force-named-cut-over", false, "When true, the 'unpostpone|cut-over' interactive command must name the migrated table")
flag.BoolVar(&migrationContext.ForceNamedPanicCommand, "force-named-panic", false, "When true, the 'panic' interactive command must name the migrated table")
flag.BoolVar(&migrationContext.SwitchToRowBinlogFormat, "switch-to-rbr", false, "let this tool automatically switch binary log format to 'ROW' on the replica, if needed. The format will NOT be switched back. I'm too scared to do that, and wish to protect you if you happen to execute another migration while this one is running")
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').")
exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.")
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
@ -93,6 +108,7 @@ func main() {
throttleControlReplicas := flag.String("throttle-control-replicas", "", "List of replicas on which to check for lag; comma delimited. Example: myhost1.com:3306,myhost2.com,myhost3.com:3307")
throttleQuery := flag.String("throttle-query", "", "when given, issued (every second) to check if operation should throttle. Expecting to return zero for no-throttle, >0 for throttle. Query is issued on the migrated server. Make sure this query is lightweight")
throttleHTTP := flag.String("throttle-http", "", "when given, gh-ost checks given URL via HEAD request; any response code other than 200 (OK) causes throttling; make sure it has low latency response")
ignoreHTTPErrors := flag.Bool("ignore-http-errors", false, "ignore HTTP connection errors during throttle check")
heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 100, "how frequently would gh-ost inject a heartbeat value")
flag.StringVar(&migrationContext.ThrottleFlagFile, "throttle-flag-file", "", "operation pauses when this file exists; hint: use a file that is specific to the table being altered")
flag.StringVar(&migrationContext.ThrottleAdditionalFlagFile, "throttle-additional-flag-file", "/tmp/gh-ost.throttle", "operation pauses when this file exists; hint: keep default, use for throttling multiple gh-ost operations")
@ -105,6 +121,8 @@ func main() {
flag.StringVar(&migrationContext.HooksPath, "hooks-path", "", "directory where hook files are found (default: empty, ie. hooks disabled). Hook files found on this path, and conforming to hook naming conventions will be executed")
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience")
flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience")
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
@ -141,57 +159,80 @@ func main() {
return
}
log.SetLevel(log.ERROR)
migrationContext.Log.SetLevel(log.ERROR)
if *verbose {
log.SetLevel(log.INFO)
migrationContext.Log.SetLevel(log.INFO)
}
if *debug {
log.SetLevel(log.DEBUG)
migrationContext.Log.SetLevel(log.DEBUG)
}
if *stack {
log.SetPrintStackTrace(*stack)
migrationContext.Log.SetPrintStackTrace(*stack)
}
if *quiet {
// Override!!
log.SetLevel(log.ERROR)
migrationContext.Log.SetLevel(log.ERROR)
}
if migrationContext.DatabaseName == "" {
log.Fatalf("--database must be provided and database name must not be empty")
}
if migrationContext.OriginalTableName == "" {
log.Fatalf("--table must be provided and table name must not be empty")
}
if migrationContext.AlterStatement == "" {
log.Fatalf("--alter must be provided and statement must not be empty")
}
parser := sql.NewParserFromAlterStatement(migrationContext.AlterStatement)
migrationContext.AlterStatementOptions = parser.GetAlterStatementOptions()
if migrationContext.DatabaseName == "" {
if parser.HasExplicitSchema() {
migrationContext.DatabaseName = parser.GetExplicitSchema()
} else {
log.Fatalf("--database must be provided and database name must not be empty, or --alter must specify database name")
}
}
if migrationContext.OriginalTableName == "" {
if parser.HasExplicitTable() {
migrationContext.OriginalTableName = parser.GetExplicitTable()
} else {
log.Fatalf("--table must be provided and table name must not be empty, or --alter must specify table name")
}
}
migrationContext.Noop = !(*executeFlag)
if migrationContext.AllowedRunningOnMaster && migrationContext.TestOnReplica {
log.Fatalf("--allow-on-master and --test-on-replica are mutually exclusive")
migrationContext.Log.Fatalf("--allow-on-master and --test-on-replica are mutually exclusive")
}
if migrationContext.AllowedRunningOnMaster && migrationContext.MigrateOnReplica {
log.Fatalf("--allow-on-master and --migrate-on-replica are mutually exclusive")
migrationContext.Log.Fatalf("--allow-on-master and --migrate-on-replica are mutually exclusive")
}
if migrationContext.MigrateOnReplica && migrationContext.TestOnReplica {
log.Fatalf("--migrate-on-replica and --test-on-replica are mutually exclusive")
migrationContext.Log.Fatalf("--migrate-on-replica and --test-on-replica are mutually exclusive")
}
if migrationContext.SwitchToRowBinlogFormat && migrationContext.AssumeRBR {
log.Fatalf("--switch-to-rbr and --assume-rbr are mutually exclusive")
migrationContext.Log.Fatalf("--switch-to-rbr and --assume-rbr are mutually exclusive")
}
if migrationContext.TestOnReplicaSkipReplicaStop {
if !migrationContext.TestOnReplica {
log.Fatalf("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
migrationContext.Log.Fatalf("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
}
log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
migrationContext.Log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
}
if migrationContext.CliMasterUser != "" && migrationContext.AssumeMasterHostname == "" {
log.Fatalf("--master-user requires --assume-master-host")
migrationContext.Log.Fatalf("--master-user requires --assume-master-host")
}
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
log.Fatalf("--master-password requires --assume-master-host")
migrationContext.Log.Fatalf("--master-password requires --assume-master-host")
}
if migrationContext.TLSCACertificate != "" && !migrationContext.UseTLS {
migrationContext.Log.Fatalf("--ssl-ca requires --ssl")
}
if migrationContext.TLSCertificate != "" && !migrationContext.UseTLS {
migrationContext.Log.Fatalf("--ssl-cert requires --ssl")
}
if migrationContext.TLSKey != "" && !migrationContext.UseTLS {
migrationContext.Log.Fatalf("--ssl-key requires --ssl")
}
if migrationContext.TLSAllowInsecure && !migrationContext.UseTLS {
migrationContext.Log.Fatalf("--ssl-allow-insecure requires --ssl")
}
if *replicationLagQuery != "" {
log.Warningf("--replication-lag-query is deprecated")
migrationContext.Log.Warningf("--replication-lag-query is deprecated")
}
switch *cutOver {
@ -200,19 +241,19 @@ func main() {
case "two-step":
migrationContext.CutOverType = base.CutOverTwoStep
default:
log.Fatalf("Unknown cut-over: %s", *cutOver)
migrationContext.Log.Fatalf("Unknown cut-over: %s", *cutOver)
}
if err := migrationContext.ReadConfigFile(); err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
if err := migrationContext.ReadThrottleControlReplicaKeys(*throttleControlReplicas); err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
if err := migrationContext.ReadMaxLoad(*maxLoad); err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
if err := migrationContext.ReadCriticalLoad(*criticalLoad); err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
if migrationContext.ServeSocketFile == "" {
migrationContext.ServeSocketFile = fmt.Sprintf("/tmp/gh-ost.%s.%s.sock", migrationContext.DatabaseName, migrationContext.OriginalTableName)
@ -221,7 +262,7 @@ func main() {
fmt.Println("Password:")
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
migrationContext.CliPassword = string(bytePassword)
}
@ -232,10 +273,17 @@ func main() {
migrationContext.SetMaxLagMillisecondsThrottleThreshold(*maxLagMillis)
migrationContext.SetThrottleQuery(*throttleQuery)
migrationContext.SetThrottleHTTP(*throttleHTTP)
migrationContext.SetIgnoreHTTPErrors(*ignoreHTTPErrors)
migrationContext.SetDefaultNumRetries(*defaultRetries)
migrationContext.ApplyCredentials()
if err := migrationContext.SetupTLS(); err != nil {
migrationContext.Log.Fatale(err)
}
if err := migrationContext.SetCutOverLockTimeoutSeconds(*cutOverLockTimeoutSeconds); err != nil {
log.Errore(err)
migrationContext.Log.Errore(err)
}
if err := migrationContext.SetExponentialBackoffMaxInterval(*exponentialBackoffMaxInterval); err != nil {
migrationContext.Log.Errore(err)
}
log.Infof("starting gh-ost %+v", AppVersion)
@ -245,7 +293,7 @@ func main() {
err := migrator.Migrate()
if err != nil {
migrator.ExecOnFailureHook()
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
fmt.Fprintf(os.Stdout, "# Done\n")
}

View File

@ -16,7 +16,6 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
)
@ -73,31 +72,33 @@ func (this *Applier) InitDBConnections() (err error) {
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
return err
}
singletonApplierUri := fmt.Sprintf("%s?timeout=0", applierUri)
singletonApplierUri := fmt.Sprintf("%s&timeout=0", applierUri)
if this.singletonDB, _, err = mysql.GetDB(this.migrationContext.Uuid, singletonApplierUri); err != nil {
return err
}
this.singletonDB.SetMaxOpenConns(1)
version, err := base.ValidateConnection(this.db, this.connectionConfig)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
if err != nil {
return err
}
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig); err != nil {
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext); err != nil {
return err
}
this.migrationContext.ApplierMySQLVersion = version
if err := this.validateAndReadTimeZone(); err != nil {
return err
}
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
this.connectionConfig.ImpliedKey = impliedKey
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
this.connectionConfig.ImpliedKey = impliedKey
}
}
if err := this.readTableColumns(); err != nil {
return err
}
log.Infof("Applier initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.ApplierMySQLVersion)
this.migrationContext.Log.Infof("Applier initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.ApplierMySQLVersion)
return nil
}
@ -108,14 +109,14 @@ func (this *Applier) validateAndReadTimeZone() error {
return err
}
log.Infof("will use time_zone='%s' on applier", this.migrationContext.ApplierTimeZone)
this.migrationContext.Log.Infof("will use time_zone='%s' on applier", this.migrationContext.ApplierTimeZone)
return nil
}
// readTableColumns reads table columns on applier
func (this *Applier) readTableColumns() (err error) {
log.Infof("Examining table structure on applier")
this.migrationContext.OriginalTableColumnsOnApplier, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName)
this.migrationContext.Log.Infof("Examining table structure on applier")
this.migrationContext.OriginalTableColumnsOnApplier, _, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName)
if err != nil {
return err
}
@ -124,7 +125,6 @@ func (this *Applier) readTableColumns() (err error) {
// showTableStatus returns the output of `show table status like '...'` command
func (this *Applier) showTableStatus(tableName string) (rowMap sqlutils.RowMap) {
rowMap = nil
query := fmt.Sprintf(`show /* gh-ost */ table status from %s like '%s'`, sql.EscapeName(this.migrationContext.DatabaseName), tableName)
sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
rowMap = m
@ -156,7 +156,7 @@ func (this *Applier) ValidateOrDropExistingTables() error {
}
}
if len(this.migrationContext.GetOldTableName()) > mysql.MaxTableNameLength {
log.Fatalf("--timestamp-old-table defined, but resulting table name (%s) is too long (only %d characters allowed)", this.migrationContext.GetOldTableName(), mysql.MaxTableNameLength)
this.migrationContext.Log.Fatalf("--timestamp-old-table defined, but resulting table name (%s) is too long (only %d characters allowed)", this.migrationContext.GetOldTableName(), mysql.MaxTableNameLength)
}
if this.tableExists(this.migrationContext.GetOldTableName()) {
@ -174,14 +174,14 @@ func (this *Applier) CreateGhostTable() error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Creating ghost table %s.%s",
this.migrationContext.Log.Infof("Creating ghost table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Ghost table created")
this.migrationContext.Log.Infof("Ghost table created")
return nil
}
@ -190,17 +190,17 @@ func (this *Applier) AlterGhost() error {
query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s %s`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
this.migrationContext.AlterStatement,
this.migrationContext.AlterStatementOptions,
)
log.Infof("Altering ghost table %s.%s",
this.migrationContext.Log.Infof("Altering ghost table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
log.Debugf("ALTER statement: %s", query)
this.migrationContext.Log.Debugf("ALTER statement: %s", query)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Ghost table altered")
this.migrationContext.Log.Infof("Ghost table altered")
return nil
}
@ -221,14 +221,14 @@ func (this *Applier) CreateChangelogTable() error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
)
log.Infof("Creating changelog table %s.%s",
this.migrationContext.Log.Infof("Creating changelog table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Changelog table created")
this.migrationContext.Log.Infof("Changelog table created")
return nil
}
@ -238,14 +238,14 @@ func (this *Applier) dropTable(tableName string) error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(tableName),
)
log.Infof("Dropping table %s.%s",
this.migrationContext.Log.Infof("Dropping table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(tableName),
)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Table dropped")
this.migrationContext.Log.Infof("Table dropped")
return nil
}
@ -288,7 +288,7 @@ func (this *Applier) WriteChangelog(hint, value string) (string, error) {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
)
_, err := sqlutils.Exec(this.db, query, explicitId, hint, value)
_, err := sqlutils.ExecNoPrepare(this.db, query, explicitId, hint, value)
return hint, err
}
@ -312,7 +312,7 @@ func (this *Applier) InitiateHeartbeat() {
if _, err := this.WriteChangelog("heartbeat", time.Now().Format(time.RFC3339Nano)); err != nil {
numSuccessiveFailures++
if numSuccessiveFailures > this.migrationContext.MaxRetries() {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
} else {
numSuccessiveFailures = 0
@ -347,14 +347,14 @@ func (this *Applier) ExecuteThrottleQuery() (int64, error) {
}
var result int64
if err := this.db.QueryRow(throttleQuery).Scan(&result); err != nil {
return 0, log.Errore(err)
return 0, this.migrationContext.Log.Errore(err)
}
return result, nil
}
// ReadMigrationMinValues returns the minimum values to be iterated on rowcopy
func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
this.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
query, err := sql.BuildUniqueKeyMinValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &uniqueKey.Columns)
if err != nil {
return err
@ -369,13 +369,13 @@ func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
return err
}
}
log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues)
this.migrationContext.Log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues)
return err
}
// ReadMigrationMaxValues returns the maximum values to be iterated on rowcopy
func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
this.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
query, err := sql.BuildUniqueKeyMaxValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &uniqueKey.Columns)
if err != nil {
return err
@ -390,7 +390,7 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
return err
}
}
log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues)
this.migrationContext.Log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues)
return err
}
@ -448,7 +448,7 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo
return hasFurtherRange, nil
}
}
log.Debugf("Iteration complete: no further range to iterate")
this.migrationContext.Log.Debugf("Iteration complete: no further range to iterate")
return hasFurtherRange, nil
}
@ -480,10 +480,14 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
if err != nil {
return nil, err
}
sessionQuery := fmt.Sprintf(`SET
SESSION time_zone = '%s',
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
`, this.migrationContext.ApplierTimeZone)
defer tx.Rollback()
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
if !this.migrationContext.SkipStrictMode {
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
}
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
if _, err := tx.Exec(sessionQuery); err != nil {
return nil, err
}
@ -502,7 +506,7 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
}
rowsAffected, _ = sqlResult.RowsAffected()
duration = time.Since(startTime)
log.Debugf(
this.migrationContext.Log.Debugf(
"Issued INSERT on range: [%s]..[%s]; iteration: %d; chunk-size: %d",
this.migrationContext.MigrationIterationRangeMinValues,
this.migrationContext.MigrationIterationRangeMaxValues,
@ -517,7 +521,7 @@ func (this *Applier) LockOriginalTable() error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Locking %s.%s",
this.migrationContext.Log.Infof("Locking %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
@ -525,18 +529,18 @@ func (this *Applier) LockOriginalTable() error {
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
return err
}
log.Infof("Table locked")
this.migrationContext.Log.Infof("Table locked")
return nil
}
// UnlockTables makes tea. No wait, it unlocks tables.
func (this *Applier) UnlockTables() error {
query := `unlock /* gh-ost */ tables`
log.Infof("Unlocking tables")
this.migrationContext.Log.Infof("Unlocking tables")
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
return err
}
log.Infof("Tables unlocked")
this.migrationContext.Log.Infof("Tables unlocked")
return nil
}
@ -550,7 +554,7 @@ func (this *Applier) SwapTablesQuickAndBumpy() error {
sql.EscapeName(this.migrationContext.OriginalTableName),
sql.EscapeName(this.migrationContext.GetOldTableName()),
)
log.Infof("Renaming original table")
this.migrationContext.Log.Infof("Renaming original table")
this.migrationContext.RenameTablesStartTime = time.Now()
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
return err
@ -560,13 +564,13 @@ func (this *Applier) SwapTablesQuickAndBumpy() error {
sql.EscapeName(this.migrationContext.GetGhostTableName()),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Renaming ghost table")
this.migrationContext.Log.Infof("Renaming ghost table")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
this.migrationContext.RenameTablesEndTime = time.Now()
log.Infof("Tables renamed")
this.migrationContext.Log.Infof("Tables renamed")
return nil
}
@ -585,7 +589,7 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Renaming back both tables")
this.migrationContext.Log.Infof("Renaming back both tables")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err == nil {
return nil
}
@ -596,7 +600,7 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
log.Infof("Renaming back to ghost table")
this.migrationContext.Log.Infof("Renaming back to ghost table")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
renameError = err
}
@ -606,11 +610,11 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Renaming back to original table")
this.migrationContext.Log.Infof("Renaming back to original table")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
renameError = err
}
return log.Errore(renameError)
return this.migrationContext.Log.Errore(renameError)
}
// StopSlaveIOThread is applicable with --test-on-replica; it stops the IO thread, duh.
@ -618,44 +622,44 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
// and have them written to the binary log, so that we can then read them via streamer.
func (this *Applier) StopSlaveIOThread() error {
query := `stop /* gh-ost */ slave io_thread`
log.Infof("Stopping replication IO thread")
this.migrationContext.Log.Infof("Stopping replication IO thread")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Replication IO thread stopped")
this.migrationContext.Log.Infof("Replication IO thread stopped")
return nil
}
// StartSlaveIOThread is applicable with --test-on-replica
func (this *Applier) StartSlaveIOThread() error {
query := `start /* gh-ost */ slave io_thread`
log.Infof("Starting replication IO thread")
this.migrationContext.Log.Infof("Starting replication IO thread")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Replication IO thread started")
this.migrationContext.Log.Infof("Replication IO thread started")
return nil
}
// StartSlaveSQLThread is applicable with --test-on-replica
func (this *Applier) StopSlaveSQLThread() error {
query := `stop /* gh-ost */ slave sql_thread`
log.Infof("Verifying SQL thread is stopped")
this.migrationContext.Log.Infof("Verifying SQL thread is stopped")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("SQL thread stopped")
this.migrationContext.Log.Infof("SQL thread stopped")
return nil
}
// StartSlaveSQLThread is applicable with --test-on-replica
func (this *Applier) StartSlaveSQLThread() error {
query := `start /* gh-ost */ slave sql_thread`
log.Infof("Verifying SQL thread is running")
this.migrationContext.Log.Infof("Verifying SQL thread is running")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("SQL thread started")
this.migrationContext.Log.Infof("SQL thread started")
return nil
}
@ -672,7 +676,7 @@ func (this *Applier) StopReplication() error {
if err != nil {
return err
}
log.Infof("Replication IO thread at %+v. SQL thread is at %+v", *readBinlogCoordinates, *executeBinlogCoordinates)
this.migrationContext.Log.Infof("Replication IO thread at %+v. SQL thread is at %+v", *readBinlogCoordinates, *executeBinlogCoordinates)
return nil
}
@ -684,7 +688,7 @@ func (this *Applier) StartReplication() error {
if err := this.StartSlaveSQLThread(); err != nil {
return err
}
log.Infof("Replication started")
this.migrationContext.Log.Infof("Replication started")
return nil
}
@ -698,7 +702,7 @@ func (this *Applier) ExpectUsedLock(sessionId int64) error {
var result int64
query := `select is_used_lock(?)`
lockName := this.GetSessionLockName(sessionId)
log.Infof("Checking session lock: %s", lockName)
this.migrationContext.Log.Infof("Checking session lock: %s", lockName)
if err := this.db.QueryRow(query, lockName).Scan(&result); err != nil || result != sessionId {
return fmt.Errorf("Session lock %s expected to be found but wasn't", lockName)
}
@ -733,7 +737,7 @@ func (this *Applier) ExpectProcess(sessionId int64, stateHint, infoHint string)
// DropAtomicCutOverSentryTableIfExists checks if the "old" table name
// happens to be a cut-over magic table; if so, it drops it.
func (this *Applier) DropAtomicCutOverSentryTableIfExists() error {
log.Infof("Looking for magic cut-over table")
this.migrationContext.Log.Infof("Looking for magic cut-over table")
tableName := this.migrationContext.GetOldTableName()
rowMap := this.showTableStatus(tableName)
if rowMap == nil {
@ -743,7 +747,7 @@ func (this *Applier) DropAtomicCutOverSentryTableIfExists() error {
if rowMap["Comment"].String != atomicCutOverMagicHint {
return fmt.Errorf("Expected magic comment on %s, did not find it", tableName)
}
log.Infof("Dropping magic cut-over table")
this.migrationContext.Log.Infof("Dropping magic cut-over table")
return this.dropTable(tableName)
}
@ -763,14 +767,14 @@ func (this *Applier) CreateAtomicCutOverSentryTable() error {
this.migrationContext.TableEngine,
atomicCutOverMagicHint,
)
log.Infof("Creating magic cut-over table %s.%s",
this.migrationContext.Log.Infof("Creating magic cut-over table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(tableName),
)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Magic cut-over table created")
this.migrationContext.Log.Infof("Magic cut-over table created")
return nil
}
@ -799,7 +803,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
lockResult := 0
query := `select get_lock(?, 0)`
lockName := this.GetSessionLockName(sessionId)
log.Infof("Grabbing voluntary lock: %s", lockName)
this.migrationContext.Log.Infof("Grabbing voluntary lock: %s", lockName)
if err := tx.QueryRow(query, lockName).Scan(&lockResult); err != nil || lockResult != 1 {
err := fmt.Errorf("Unable to acquire lock %s", lockName)
tableLocked <- err
@ -807,7 +811,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
}
tableLockTimeoutSeconds := this.migrationContext.CutOverLockTimeoutSeconds * 2
log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds)
this.migrationContext.Log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds)
query = fmt.Sprintf(`set session lock_wait_timeout:=%d`, tableLockTimeoutSeconds)
if _, err := tx.Exec(query); err != nil {
tableLocked <- err
@ -825,7 +829,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetOldTableName()),
)
log.Infof("Locking %s.%s, %s.%s",
this.migrationContext.Log.Infof("Locking %s.%s, %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
sql.EscapeName(this.migrationContext.DatabaseName),
@ -836,7 +840,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
tableLocked <- err
return err
}
log.Infof("Tables locked")
this.migrationContext.Log.Infof("Tables locked")
tableLocked <- nil // No error.
// From this point on, we are committed to UNLOCK TABLES. No matter what happens,
@ -845,22 +849,22 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
// The cut-over phase will proceed to apply remaining backlog onto ghost table,
// and issue RENAME. We wait here until told to proceed.
<-okToUnlockTable
log.Infof("Will now proceed to drop magic table and unlock tables")
this.migrationContext.Log.Infof("Will now proceed to drop magic table and unlock tables")
// The magic table is here because we locked it. And we are the only ones allowed to drop it.
// And in fact, we will:
log.Infof("Dropping magic cut-over table")
this.migrationContext.Log.Infof("Dropping magic cut-over table")
query = fmt.Sprintf(`drop /* gh-ost */ table if exists %s.%s`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetOldTableName()),
)
if _, err := tx.Exec(query); err != nil {
log.Errore(err)
this.migrationContext.Log.Errore(err)
// We DO NOT return here because we must `UNLOCK TABLES`!
}
// Tables still locked
log.Infof("Releasing lock from %s.%s, %s.%s",
this.migrationContext.Log.Infof("Releasing lock from %s.%s, %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
sql.EscapeName(this.migrationContext.DatabaseName),
@ -869,9 +873,9 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
query = `unlock tables`
if _, err := tx.Exec(query); err != nil {
tableUnlocked <- err
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
log.Infof("Tables unlocked")
this.migrationContext.Log.Infof("Tables unlocked")
tableUnlocked <- nil
return nil
}
@ -893,7 +897,7 @@ func (this *Applier) AtomicCutoverRename(sessionIdChan chan int64, tablesRenamed
}
sessionIdChan <- sessionId
log.Infof("Setting RENAME timeout as %d seconds", this.migrationContext.CutOverLockTimeoutSeconds)
this.migrationContext.Log.Infof("Setting RENAME timeout as %d seconds", this.migrationContext.CutOverLockTimeoutSeconds)
query := fmt.Sprintf(`set session lock_wait_timeout:=%d`, this.migrationContext.CutOverLockTimeoutSeconds)
if _, err := tx.Exec(query); err != nil {
return err
@ -909,13 +913,13 @@ func (this *Applier) AtomicCutoverRename(sessionIdChan chan int64, tablesRenamed
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Issuing and expecting this to block: %s", query)
this.migrationContext.Log.Infof("Issuing and expecting this to block: %s", query)
if _, err := tx.Exec(query); err != nil {
tablesRenamed <- err
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
tablesRenamed <- nil
log.Infof("Tables renamed")
this.migrationContext.Log.Infof("Tables renamed")
return nil
}
@ -975,59 +979,6 @@ func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (result
return append(results, newDmlBuildResultError(fmt.Errorf("Unknown dml event type: %+v", dmlEvent.DML)))
}
// ApplyDMLEventQuery writes an entry to the ghost table, in response to an intercepted
// original-table binlog event
func (this *Applier) ApplyDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) error {
for _, buildResult := range this.buildDMLEventQuery(dmlEvent) {
if buildResult.err != nil {
return buildResult.err
}
// TODO The below is in preparation for transactional writes on the ghost tables.
// Such writes would be, for example:
// - prepended with sql_mode setup
// - prepended with time zone setup
// - prepended with SET SQL_LOG_BIN=0
// - prepended with SET FK_CHECKS=0
// etc.
//
// a known problem: https://github.com/golang/go/issues/9373 -- bitint unsigned values, not supported in database/sql
// is solved by silently converting unsigned bigints to string values.
//
err := func() error {
tx, err := this.db.Begin()
if err != nil {
return err
}
sessionQuery := `SET
SESSION time_zone = '+00:00',
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
`
if _, err := tx.Exec(sessionQuery); err != nil {
return err
}
if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}()
if err != nil {
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args)
return log.Errore(err)
}
// no error
atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, 1)
if this.migrationContext.CountTableRows {
atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, buildResult.rowsDelta)
}
}
return nil
}
// ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table
func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error {
@ -1044,10 +995,14 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
return err
}
sessionQuery := `SET
SESSION time_zone = '+00:00',
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
`
sessionQuery := "SET SESSION time_zone = '+00:00'"
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
if !this.migrationContext.SkipStrictMode {
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
}
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
if _, err := tx.Exec(sessionQuery); err != nil {
return rollback(err)
}
@ -1070,19 +1025,19 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
}()
if err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
// no error
atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, int64(len(dmlEvents)))
if this.migrationContext.CountTableRows {
atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, totalDelta)
}
log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents))
this.migrationContext.Log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents))
return nil
}
func (this *Applier) Teardown() {
log.Debugf("Tearing down...")
this.migrationContext.Log.Debugf("Tearing down...")
this.db.Close()
this.singletonDB.Close()
atomic.StoreInt64(&this.finishedMigrating, 1)

View File

@ -63,7 +63,12 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
env = append(env, fmt.Sprintf("GH_OST_MIGRATED_HOST=%s", this.migrationContext.GetApplierHostname()))
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
for _, variable := range extraVariables {
env = append(env, variable)

View File

@ -17,7 +17,6 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
)
@ -53,10 +52,12 @@ func (this *Inspector) InitDBConnections() (err error) {
if err := this.validateConnection(); err != nil {
return err
}
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
this.connectionConfig.ImpliedKey = impliedKey
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
this.connectionConfig.ImpliedKey = impliedKey
}
}
if err := this.validateGrants(); err != nil {
return err
@ -67,7 +68,7 @@ func (this *Inspector) InitDBConnections() (err error) {
if err := this.applyBinlogFormat(); err != nil {
return err
}
log.Infof("Inspector initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.InspectorMySQLVersion)
this.migrationContext.Log.Infof("Inspector initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.InspectorMySQLVersion)
return nil
}
@ -87,24 +88,24 @@ func (this *Inspector) ValidateOriginalTable() (err error) {
return nil
}
func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) {
func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, virtualColumns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) {
uniqueKeys, err = this.getCandidateUniqueKeys(tableName)
if err != nil {
return columns, uniqueKeys, err
return columns, virtualColumns, uniqueKeys, err
}
if len(uniqueKeys) == 0 {
return columns, uniqueKeys, fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out")
return columns, virtualColumns, uniqueKeys, fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out")
}
columns, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, tableName)
columns, virtualColumns, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, tableName)
if err != nil {
return columns, uniqueKeys, err
return columns, virtualColumns, uniqueKeys, err
}
return columns, uniqueKeys, nil
return columns, virtualColumns, uniqueKeys, nil
}
func (this *Inspector) InspectOriginalTable() (err error) {
this.migrationContext.OriginalTableColumns, this.migrationContext.OriginalTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.OriginalTableName)
this.migrationContext.OriginalTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.OriginalTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.OriginalTableName)
if err != nil {
return err
}
@ -120,7 +121,7 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
return fmt.Errorf("It seems like table structure is not identical between master and replica. This scenario is not supported.")
}
this.migrationContext.GhostTableColumns, this.migrationContext.GhostTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.GetGhostTableName())
this.migrationContext.GhostTableColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.GhostTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.GetGhostTableName())
if err != nil {
return err
}
@ -135,14 +136,14 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
switch column.Type {
case sql.FloatColumnType:
{
log.Warning("Will not use %+v as shared key due to FLOAT data type", sharedUniqueKey.Name)
this.migrationContext.Log.Warning("Will not use %+v as shared key due to FLOAT data type", sharedUniqueKey.Name)
uniqueKeyIsValid = false
}
case sql.JSONColumnType:
{
// Noteworthy that at this time MySQL does not allow JSON indexing anyhow, but this code
// will remain in place to potentially handle the future case where JSON is supported in indexes.
log.Warning("Will not use %+v as shared key due to JSON data type", sharedUniqueKey.Name)
this.migrationContext.Log.Warning("Will not use %+v as shared key due to JSON data type", sharedUniqueKey.Name)
uniqueKeyIsValid = false
}
}
@ -155,29 +156,23 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
if this.migrationContext.UniqueKey == nil {
return fmt.Errorf("No shared unique key can be found after ALTER! Bailing out")
}
log.Infof("Chosen shared unique key is %s", this.migrationContext.UniqueKey.Name)
this.migrationContext.Log.Infof("Chosen shared unique key is %s", this.migrationContext.UniqueKey.Name)
if this.migrationContext.UniqueKey.HasNullable {
if this.migrationContext.NullableUniqueKeyAllowed {
log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", this.migrationContext.UniqueKey)
this.migrationContext.Log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", this.migrationContext.UniqueKey)
} else {
return fmt.Errorf("Chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", this.migrationContext.UniqueKey)
}
}
if !this.migrationContext.UniqueKey.IsPrimary() {
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
return fmt.Errorf("binlog_row_image is '%s' and chosen key is %s, which is not the primary key. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.migrationContext.OriginalBinlogRowImage, this.migrationContext.UniqueKey)
}
}
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.ColumnRenameMap)
log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.ColumnRenameMap)
this.migrationContext.Log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
// By fact that a non-empty unique key exists we also know the shared columns are non-empty
// This additional step looks at which columns are unsigned. We could have merged this within
// the `getTableColumns()` function, but it's a later patch and introduces some complexity; I feel
// comfortable in doing this as a separate step.
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns)
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &this.migrationContext.UniqueKey.Columns)
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, &this.migrationContext.UniqueKey.Columns)
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.GhostTableColumns, this.migrationContext.MappedSharedColumns)
for i := range this.migrationContext.SharedColumns.Columns() {
@ -203,7 +198,7 @@ func (this *Inspector) validateConnection() error {
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
}
version, err := base.ValidateConnection(this.db, this.connectionConfig)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
this.migrationContext.InspectorMySQLVersion = version
return err
}
@ -236,6 +231,9 @@ func (this *Inspector) validateGrants() error {
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", this.migrationContext.DatabaseName)) {
foundDBAll = true
}
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", strings.Replace(this.migrationContext.DatabaseName, "_", "\\_", -1))) {
foundDBAll = true
}
if base.StringContainsAll(grant, `ALTER`, `CREATE`, `DELETE`, `DROP`, `INDEX`, `INSERT`, `LOCK TABLES`, `SELECT`, `TRIGGER`, `UPDATE`, ` ON *.*`) {
foundDBAll = true
}
@ -251,19 +249,19 @@ func (this *Inspector) validateGrants() error {
this.migrationContext.HasSuperPrivilege = foundSuper
if foundAll {
log.Infof("User has ALL privileges")
this.migrationContext.Log.Infof("User has ALL privileges")
return nil
}
if foundSuper && foundReplicationSlave && foundDBAll {
log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
this.migrationContext.Log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
return nil
}
if foundReplicationClient && foundReplicationSlave && foundDBAll {
log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
this.migrationContext.Log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
return nil
}
log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(this.migrationContext.DatabaseName), foundDBAll)
return log.Errorf("User has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
this.migrationContext.Log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(this.migrationContext.DatabaseName), foundDBAll)
return this.migrationContext.Log.Errorf("User has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
}
// restartReplication is required so that we are _certain_ the binlog format and
@ -271,7 +269,7 @@ func (this *Inspector) validateGrants() error {
// It is entirely possible, for example, that the replication is using 'STATEMENT'
// binlog format even as the variable says 'ROW'
func (this *Inspector) restartReplication() error {
log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.connectionConfig)
if masterKey == nil {
@ -290,7 +288,7 @@ func (this *Inspector) restartReplication() error {
}
time.Sleep(startSlavePostWaitMilliseconds)
log.Debugf("Replication restarted")
this.migrationContext.Log.Debugf("Replication restarted")
return nil
}
@ -310,7 +308,7 @@ func (this *Inspector) applyBinlogFormat() error {
if err := this.restartReplication(); err != nil {
return err
}
log.Debugf("'ROW' binlog format applied")
this.migrationContext.Log.Debugf("'ROW' binlog format applied")
return nil
}
// We already have RBR, no explicit switch
@ -348,7 +346,7 @@ func (this *Inspector) validateBinlogs() error {
if countReplicas > 0 {
return fmt.Errorf("%s:%d has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
}
log.Infof("%s:%d has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
this.migrationContext.Log.Infof("%s:%d has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
}
query = `select @@global.binlog_row_image`
if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil {
@ -356,8 +354,11 @@ func (this *Inspector) validateBinlogs() error {
this.migrationContext.OriginalBinlogRowImage = "FULL"
}
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
return fmt.Errorf("%s:%d has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogRowImage)
}
log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return nil
}
@ -370,12 +371,12 @@ func (this *Inspector) validateLogSlaveUpdates() error {
}
if logSlaveUpdates {
log.Infof("log_slave_updates validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Infof("log_slave_updates validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return nil
}
if this.migrationContext.IsTungsten {
log.Warningf("log_slave_updates not found on %s:%d, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return nil
}
@ -384,7 +385,7 @@ func (this *Inspector) validateLogSlaveUpdates() error {
}
if this.migrationContext.InspectorIsAlsoApplier() {
log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return nil
}
@ -411,17 +412,17 @@ func (this *Inspector) validateTable() error {
return err
}
if !tableFound {
return log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine)
log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate)
this.migrationContext.Log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine)
this.migrationContext.Log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate)
return nil
}
// validateTableForeignKeys makes sure no foreign keys exist on the migrated table
func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) error {
if this.migrationContext.SkipForeignKeyChecks {
log.Warning("--skip-foreign-key-checks provided: will not check for foreign keys")
this.migrationContext.Log.Warning("--skip-foreign-key-checks provided: will not check for foreign keys")
return nil
}
query := `
@ -455,16 +456,16 @@ func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) erro
return err
}
if numParentForeignKeys > 0 {
return log.Errorf("Found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
if numChildForeignKeys > 0 {
if allowChildForeignKeys {
log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag")
this.migrationContext.Log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag")
return nil
}
return log.Errorf("Found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Debugf("Validated no foreign keys exist on table")
this.migrationContext.Log.Debugf("Validated no foreign keys exist on table")
return nil
}
@ -490,9 +491,9 @@ func (this *Inspector) validateTableTriggers() error {
return err
}
if numTriggers > 0 {
return log.Errorf("Found triggers on %s.%s. Triggers are not supported at this time. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Found triggers on %s.%s. Triggers are not supported at this time. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Debugf("Validated no triggers exist on table")
this.migrationContext.Log.Debugf("Validated no triggers exist on table")
return nil
}
@ -512,9 +513,9 @@ func (this *Inspector) estimateTableRowsViaExplain() error {
return err
}
if !outputFound {
return log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate)
this.migrationContext.Log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate)
return nil
}
@ -523,7 +524,7 @@ func (this *Inspector) CountTableRows() error {
atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1)
defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0)
log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
query := fmt.Sprintf(`select /* gh-ost */ count(*) as rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
var rowsEstimate int64
@ -533,7 +534,7 @@ func (this *Inspector) CountTableRows() error {
atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate)
this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate
log.Infof("Exact number of rows via COUNT: %d", rowsEstimate)
this.migrationContext.Log.Infof("Exact number of rows via COUNT: %d", rowsEstimate)
return nil
}
@ -552,44 +553,35 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
columnName := m.GetString("COLUMN_NAME")
columnType := m.GetString("COLUMN_TYPE")
if strings.Contains(columnType, "unsigned") {
for _, columnsList := range columnsLists {
columnsList.SetUnsigned(columnName)
for _, columnsList := range columnsLists {
column := columnsList.GetColumn(columnName)
if column == nil {
continue
}
}
if strings.Contains(columnType, "mediumint") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.MediumIntColumnType
if strings.Contains(columnType, "unsigned") {
column.IsUnsigned = true
}
}
if strings.Contains(columnType, "timestamp") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.TimestampColumnType
if strings.Contains(columnType, "mediumint") {
column.Type = sql.MediumIntColumnType
}
}
if strings.Contains(columnType, "datetime") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.DateTimeColumnType
if strings.Contains(columnType, "timestamp") {
column.Type = sql.TimestampColumnType
}
}
if strings.Contains(columnType, "json") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.JSONColumnType
if strings.Contains(columnType, "datetime") {
column.Type = sql.DateTimeColumnType
}
}
if strings.Contains(columnType, "float") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.FloatColumnType
if strings.Contains(columnType, "json") {
column.Type = sql.JSONColumnType
}
}
if strings.HasPrefix(columnType, "enum") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.EnumColumnType
if strings.Contains(columnType, "float") {
column.Type = sql.FloatColumnType
}
}
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
for _, columnsList := range columnsLists {
columnsList.SetCharset(columnName, charset)
if strings.HasPrefix(columnType, "enum") {
column.Type = sql.EnumColumnType
}
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
column.Charset = charset
}
}
return nil
@ -629,8 +621,6 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*
GROUP BY TABLE_SCHEMA, TABLE_NAME, INDEX_NAME
) AS UNIQUES
ON (
COLUMNS.TABLE_SCHEMA = UNIQUES.TABLE_SCHEMA AND
COLUMNS.TABLE_NAME = UNIQUES.TABLE_NAME AND
COLUMNS.COLUMN_NAME = UNIQUES.FIRST_COLUMN_NAME
)
WHERE
@ -672,7 +662,7 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*
if err != nil {
return uniqueKeys, err
}
log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys)
this.migrationContext.Log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys)
return uniqueKeys, nil
}
@ -692,21 +682,34 @@ func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [
}
// getSharedColumns returns the intersection of two lists of columns in same order as the first list
func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) {
func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, originalVirtualColumns, ghostVirtualColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) {
sharedColumnNames := []string{}
for _, originalColumn := range originalColumns.Names() {
isSharedColumn := false
for _, ghostColumn := range ghostColumns.Names() {
if strings.EqualFold(originalColumn, ghostColumn) {
isSharedColumn = true
break
}
if strings.EqualFold(columnRenameMap[originalColumn], ghostColumn) {
isSharedColumn = true
break
}
}
for droppedColumn := range this.migrationContext.DroppedColumnsMap {
if strings.EqualFold(originalColumn, droppedColumn) {
isSharedColumn = false
break
}
}
for _, virtualColumn := range originalVirtualColumns.Names() {
if strings.EqualFold(originalColumn, virtualColumn) {
isSharedColumn = false
}
}
for _, virtualColumn := range ghostVirtualColumns.Names() {
if strings.EqualFold(originalColumn, virtualColumn) {
isSharedColumn = false
}
}
if isSharedColumn {
@ -749,15 +752,14 @@ func (this *Inspector) readChangelogState(hint string) (string, error) {
}
func (this *Inspector) getMasterConnectionConfig() (applierConfig *mysql.ConnectionConfig, err error) {
log.Infof("Recursively searching for replication master")
this.migrationContext.Log.Infof("Recursively searching for replication master")
visitedKeys := mysql.NewInstanceKeyMap()
return mysql.GetMasterConnectionConfigSafe(this.connectionConfig, visitedKeys, this.migrationContext.AllowedMasterMaster)
}
func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err error) {
replicationLag, err = mysql.GetReplicationLag(
replicationLag, err = mysql.GetReplicationLagFromSlaveStatus(
this.informationSchemaDb,
this.migrationContext.InspectorConnectionConfig,
)
return replicationLag, err
}

View File

@ -18,8 +18,6 @@ import (
"github.com/github/gh-ost/go/binlog"
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
)
type ChangelogState string
@ -62,7 +60,7 @@ const (
// Migrator is the main schema migration flow manager.
type Migrator struct {
parser *sql.Parser
parser *sql.AlterTableParser
inspector *Inspector
applier *Applier
eventsStreamer *EventsStreamer
@ -78,7 +76,7 @@ type Migrator struct {
rowCopyCompleteFlag int64
// copyRowsQueue should not be buffered; if buffered some non-damaging but
// excessive work happens at the end of the iteration as new copy-jobs arrive befroe realizing the copy is complete
// excessive work happens at the end of the iteration as new copy-jobs arrive before realizing the copy is complete
copyRowsQueue chan tableWriteFunc
applyEventsQueue chan *applyEventStruct
@ -90,7 +88,7 @@ type Migrator struct {
func NewMigrator(context *base.MigrationContext) *Migrator {
migrator := &Migrator{
migrationContext: context,
parser: sql.NewParser(),
parser: sql.NewAlterTableParser(),
ghostTableMigrated: make(chan bool),
firstThrottlingCollected: make(chan bool, 3),
rowCopyComplete: make(chan error),
@ -149,6 +147,34 @@ func (this *Migrator) retryOperation(operation func() error, notFatalHint ...boo
return err
}
// `retryOperationWithExponentialBackoff` attempts running given function, waiting 2^(n-1)
// seconds between each attempt, where `n` is the running number of attempts. Exits
// as soon as the function returns with non-error, or as soon as `MaxRetries`
// attempts are reached. Wait intervals between attempts obey a maximum of
// `ExponentialBackoffMaxInterval`.
func (this *Migrator) retryOperationWithExponentialBackoff(operation func() error, notFatalHint ...bool) (err error) {
var interval int64
maxRetries := int(this.migrationContext.MaxRetries())
maxInterval := this.migrationContext.ExponentialBackoffMaxInterval
for i := 0; i < maxRetries; i++ {
newInterval := int64(math.Exp2(float64(i - 1)))
if newInterval <= maxInterval {
interval = newInterval
}
if i != 0 {
time.Sleep(time.Duration(interval) * time.Second)
}
err = operation()
if err == nil {
return nil
}
}
if len(notFatalHint) == 0 {
this.migrationContext.PanicAbort <- err
}
return err
}
// executeAndThrottleOnError executes a given function. If it errors, it
// throttles.
func (this *Migrator) executeAndThrottleOnError(operation func() error) (err error) {
@ -188,7 +214,7 @@ func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (er
}
changelogStateString := dmlEvent.NewColumnValues.StringColumn(3)
changelogState := ReadChangelogState(changelogStateString)
log.Infof("Intercepted changelog state %s", changelogState)
this.migrationContext.Log.Infof("Intercepted changelog state %s", changelogState)
switch changelogState {
case GhostTableMigrated:
{
@ -214,26 +240,30 @@ func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (er
return fmt.Errorf("Unknown changelog state: %+v", changelogState)
}
}
log.Infof("Handled changelog state %s", changelogState)
this.migrationContext.Log.Infof("Handled changelog state %s", changelogState)
return nil
}
// listenOnPanicAbort aborts on abort request
func (this *Migrator) listenOnPanicAbort() {
err := <-this.migrationContext.PanicAbort
log.Fatale(err)
this.migrationContext.Log.Fatale(err)
}
// validateStatement validates the `alter` statement meets criteria.
// At this time this means:
// - column renames are approved
// - no table rename allowed
func (this *Migrator) validateStatement() (err error) {
if this.parser.IsRenameTable() {
return fmt.Errorf("ALTER statement seems to RENAME the table. This is not supported, and you should run your RENAME outside gh-ost.")
}
if this.parser.HasNonTrivialRenames() && !this.migrationContext.SkipRenamedColumns {
this.migrationContext.ColumnRenameMap = this.parser.GetNonTrivialRenames()
if !this.migrationContext.ApproveRenamedColumns {
return fmt.Errorf("gh-ost believes the ALTER statement renames columns, as follows: %v; as precaution, you are asked to confirm gh-ost is correct, and provide with `--approve-renamed-columns`, and we're all happy. Or you can skip renamed columns via `--skip-renamed-columns`, in which case column data may be lost", this.parser.GetNonTrivialRenames())
}
log.Infof("Alter statement has column(s) renamed. gh-ost finds the following renames: %v; --approve-renamed-columns is given and so migration proceeds.", this.parser.GetNonTrivialRenames())
this.migrationContext.Log.Infof("Alter statement has column(s) renamed. gh-ost finds the following renames: %v; --approve-renamed-columns is given and so migration proceeds.", this.parser.GetNonTrivialRenames())
}
this.migrationContext.DroppedColumnsMap = this.parser.DroppedColumnsMap()
return nil
@ -245,7 +275,7 @@ func (this *Migrator) countTableRows() (err error) {
return nil
}
if this.migrationContext.Noop {
log.Debugf("Noop operation; not really counting table rows")
this.migrationContext.Log.Debugf("Noop operation; not really counting table rows")
return nil
}
@ -260,7 +290,7 @@ func (this *Migrator) countTableRows() (err error) {
}
if this.migrationContext.ConcurrentCountTableRows {
log.Infof("As instructed, counting rows in the background; meanwhile I will use an estimated count, and will update it later on")
this.migrationContext.Log.Infof("As instructed, counting rows in the background; meanwhile I will use an estimated count, and will update it later on")
go countRowsFunc()
// and we ignore errors, because this turns to be a background job
return nil
@ -272,9 +302,9 @@ func (this *Migrator) createFlagFiles() (err error) {
if this.migrationContext.PostponeCutOverFlagFile != "" {
if !base.FileExists(this.migrationContext.PostponeCutOverFlagFile) {
if err := base.TouchFile(this.migrationContext.PostponeCutOverFlagFile); err != nil {
return log.Errorf("--postpone-cut-over-flag-file indicated by gh-ost is unable to create said file: %s", err.Error())
return this.migrationContext.Log.Errorf("--postpone-cut-over-flag-file indicated by gh-ost is unable to create said file: %s", err.Error())
}
log.Infof("Created postpone-cut-over-flag-file: %s", this.migrationContext.PostponeCutOverFlagFile)
this.migrationContext.Log.Infof("Created postpone-cut-over-flag-file: %s", this.migrationContext.PostponeCutOverFlagFile)
}
}
return nil
@ -282,7 +312,7 @@ func (this *Migrator) createFlagFiles() (err error) {
// Migrate executes the complete migration logic. This is *the* major gh-ost function.
func (this *Migrator) Migrate() (err error) {
log.Infof("Migrating %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
this.migrationContext.Log.Infof("Migrating %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
this.migrationContext.StartTime = time.Now()
if this.migrationContext.Hostname, err = os.Hostname(); err != nil {
return err
@ -321,9 +351,9 @@ func (this *Migrator) Migrate() (err error) {
}
initialLag, _ := this.inspector.getReplicationLag()
log.Infof("Waiting for ghost table to be migrated. Current lag is %+v", initialLag)
this.migrationContext.Log.Infof("Waiting for ghost table to be migrated. Current lag is %+v", initialLag)
<-this.ghostTableMigrated
log.Debugf("ghost table migrated")
this.migrationContext.Log.Debugf("ghost table migrated")
// Yay! We now know the Ghost and Changelog tables are good to examine!
// When running on replica, this means the replica has those tables. When running
// on master this is always true, of course, and yet it also implies this knowledge
@ -361,9 +391,9 @@ func (this *Migrator) Migrate() (err error) {
this.migrationContext.MarkRowCopyStartTime()
go this.initiateStatus()
log.Debugf("Operating until row copy is complete")
this.migrationContext.Log.Debugf("Operating until row copy is complete")
this.consumeRowCopyComplete()
log.Infof("Row copy complete")
this.migrationContext.Log.Infof("Row copy complete")
if err := this.hooksExecutor.onRowCopyComplete(); err != nil {
return err
}
@ -372,7 +402,13 @@ func (this *Migrator) Migrate() (err error) {
if err := this.hooksExecutor.onBeforeCutOver(); err != nil {
return err
}
if err := this.retryOperation(this.cutOver); err != nil {
var retrier func(func() error, ...bool) error
if this.migrationContext.CutOverExponentialBackoff {
retrier = this.retryOperationWithExponentialBackoff
} else {
retrier = this.retryOperation
}
if err := retrier(this.cutOver); err != nil {
return err
}
atomic.StoreInt64(&this.migrationContext.CutOverCompleteFlag, 1)
@ -383,7 +419,7 @@ func (this *Migrator) Migrate() (err error) {
if err := this.hooksExecutor.onSuccess(); err != nil {
return err
}
log.Infof("Done migrating %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
this.migrationContext.Log.Infof("Done migrating %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return nil
}
@ -409,14 +445,14 @@ func (this *Migrator) handleCutOverResult(cutOverError error) (err error) {
// and swap the tables.
// The difference is that we will later swap the tables back.
if err := this.hooksExecutor.onStartReplication(); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
if this.migrationContext.TestOnReplicaSkipReplicaStop {
log.Warningf("--test-on-replica-skip-replica-stop enabled, we are not starting replication.")
this.migrationContext.Log.Warningf("--test-on-replica-skip-replica-stop enabled, we are not starting replication.")
} else {
log.Debugf("testing on replica. Starting replication IO thread after cut-over failure")
this.migrationContext.Log.Debugf("testing on replica. Starting replication IO thread after cut-over failure")
if err := this.retryOperation(this.applier.StartReplication); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
}
}
@ -427,16 +463,16 @@ func (this *Migrator) handleCutOverResult(cutOverError error) (err error) {
// type (on replica? atomic? safe?)
func (this *Migrator) cutOver() (err error) {
if this.migrationContext.Noop {
log.Debugf("Noop operation; not really swapping tables")
this.migrationContext.Log.Debugf("Noop operation; not really swapping tables")
return nil
}
this.migrationContext.MarkPointOfInterest()
this.throttler.throttle(func() {
log.Debugf("throttling before swapping tables")
this.migrationContext.Log.Debugf("throttling before swapping tables")
})
this.migrationContext.MarkPointOfInterest()
log.Debugf("checking for cut-over postpone")
this.migrationContext.Log.Debugf("checking for cut-over postpone")
this.sleepWhileTrue(
func() (bool, error) {
if this.migrationContext.PostponeCutOverFlagFile == "" {
@ -461,7 +497,7 @@ func (this *Migrator) cutOver() (err error) {
)
atomic.StoreInt64(&this.migrationContext.IsPostponingCutOver, 0)
this.migrationContext.MarkPointOfInterest()
log.Debugf("checking for cut-over postpone: complete")
this.migrationContext.Log.Debugf("checking for cut-over postpone: complete")
if this.migrationContext.TestOnReplica {
// With `--test-on-replica` we stop replication thread, and then proceed to use
@ -472,9 +508,9 @@ func (this *Migrator) cutOver() (err error) {
return err
}
if this.migrationContext.TestOnReplicaSkipReplicaStop {
log.Warningf("--test-on-replica-skip-replica-stop enabled, we are not stopping replication.")
this.migrationContext.Log.Warningf("--test-on-replica-skip-replica-stop enabled, we are not stopping replication.")
} else {
log.Debugf("testing on replica. Stopping replication IO thread")
this.migrationContext.Log.Debugf("testing on replica. Stopping replication IO thread")
if err := this.retryOperation(this.applier.StopReplication); err != nil {
return err
}
@ -492,7 +528,7 @@ func (this *Migrator) cutOver() (err error) {
this.handleCutOverResult(err)
return err
}
return log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType)
return this.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType)
}
// Inject the "AllEventsUpToLockProcessed" state hint, wait for it to appear in the binary logs,
@ -504,32 +540,32 @@ func (this *Migrator) waitForEventsUpToLock() (err error) {
waitForEventsUpToLockStartTime := time.Now()
allEventsUpToLockProcessedChallenge := fmt.Sprintf("%s:%d", string(AllEventsUpToLockProcessed), waitForEventsUpToLockStartTime.UnixNano())
log.Infof("Writing changelog state: %+v", allEventsUpToLockProcessedChallenge)
this.migrationContext.Log.Infof("Writing changelog state: %+v", allEventsUpToLockProcessedChallenge)
if _, err := this.applier.WriteChangelogState(allEventsUpToLockProcessedChallenge); err != nil {
return err
}
log.Infof("Waiting for events up to lock")
this.migrationContext.Log.Infof("Waiting for events up to lock")
atomic.StoreInt64(&this.migrationContext.AllEventsUpToLockProcessedInjectedFlag, 1)
for found := false; !found; {
select {
case <-timeout.C:
{
return log.Errorf("Timeout while waiting for events up to lock")
return this.migrationContext.Log.Errorf("Timeout while waiting for events up to lock")
}
case state := <-this.allEventsUpToLockProcessed:
{
if state == allEventsUpToLockProcessedChallenge {
log.Infof("Waiting for events up to lock: got %s", state)
this.migrationContext.Log.Infof("Waiting for events up to lock: got %s", state)
found = true
} else {
log.Infof("Waiting for events up to lock: skipping %s", state)
this.migrationContext.Log.Infof("Waiting for events up to lock: skipping %s", state)
}
}
}
}
waitForEventsUpToLockDuration := time.Since(waitForEventsUpToLockStartTime)
log.Infof("Done waiting for events up to lock; duration=%+v", waitForEventsUpToLockDuration)
this.migrationContext.Log.Infof("Done waiting for events up to lock; duration=%+v", waitForEventsUpToLockDuration)
this.printStatus(ForcePrintStatusAndHintRule)
return nil
@ -560,7 +596,7 @@ func (this *Migrator) cutOverTwoStep() (err error) {
lockAndRenameDuration := this.migrationContext.RenameTablesEndTime.Sub(this.migrationContext.LockTablesStartTime)
renameDuration := this.migrationContext.RenameTablesEndTime.Sub(this.migrationContext.RenameTablesStartTime)
log.Debugf("Lock & rename duration: %s (rename only: %s). During this time, queries on %s were locked or failing", lockAndRenameDuration, renameDuration, sql.EscapeName(this.migrationContext.OriginalTableName))
this.migrationContext.Log.Debugf("Lock & rename duration: %s (rename only: %s). During this time, queries on %s were locked or failing", lockAndRenameDuration, renameDuration, sql.EscapeName(this.migrationContext.OriginalTableName))
return nil
}
@ -582,18 +618,18 @@ func (this *Migrator) atomicCutOver() (err error) {
tableUnlocked := make(chan error, 2)
go func() {
if err := this.applier.AtomicCutOverMagicLock(lockOriginalSessionIdChan, tableLocked, okToUnlockTable, tableUnlocked); err != nil {
log.Errore(err)
this.migrationContext.Log.Errore(err)
}
}()
if err := <-tableLocked; err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
lockOriginalSessionId := <-lockOriginalSessionIdChan
log.Infof("Session locking original & magic tables is %+v", lockOriginalSessionId)
this.migrationContext.Log.Infof("Session locking original & magic tables is %+v", lockOriginalSessionId)
// At this point we know the original table is locked.
// We know any newly incoming DML on original table is blocked.
if err := this.waitForEventsUpToLock(); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
// Step 2
@ -611,7 +647,7 @@ func (this *Migrator) atomicCutOver() (err error) {
}
}()
renameSessionId := <-renameSessionIdChan
log.Infof("Session renaming tables is %+v", renameSessionId)
this.migrationContext.Log.Infof("Session renaming tables is %+v", renameSessionId)
waitForRename := func() error {
if atomic.LoadInt64(&tableRenameKnownToHaveFailed) == 1 {
@ -628,13 +664,13 @@ func (this *Migrator) atomicCutOver() (err error) {
return err
}
if atomic.LoadInt64(&tableRenameKnownToHaveFailed) == 0 {
log.Infof("Found atomic RENAME to be blocking, as expected. Double checking the lock is still in place (though I don't strictly have to)")
this.migrationContext.Log.Infof("Found atomic RENAME to be blocking, as expected. Double checking the lock is still in place (though I don't strictly have to)")
}
if err := this.applier.ExpectUsedLock(lockOriginalSessionId); err != nil {
// Abort operation. Just make sure to drop the magic table.
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
log.Infof("Connection holding lock on original table still exists")
this.migrationContext.Log.Infof("Connection holding lock on original table still exists")
// Now that we've found the RENAME blocking, AND the locking connection still alive,
// we know it is safe to proceed to release the lock
@ -643,16 +679,16 @@ func (this *Migrator) atomicCutOver() (err error) {
// BAM! magic table dropped, original table lock is released
// -> RENAME released -> queries on original are unblocked.
if err := <-tableUnlocked; err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
if err := <-tablesRenamed; err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
this.migrationContext.RenameTablesEndTime = time.Now()
// ooh nice! We're actually truly and thankfully done
lockAndRenameDuration := this.migrationContext.RenameTablesEndTime.Sub(this.migrationContext.LockTablesStartTime)
log.Infof("Lock & rename duration: %s. During this time, queries on %s were blocked", lockAndRenameDuration, sql.EscapeName(this.migrationContext.OriginalTableName))
this.migrationContext.Log.Infof("Lock & rename duration: %s. During this time, queries on %s were blocked", lockAndRenameDuration, sql.EscapeName(this.migrationContext.OriginalTableName))
return nil
}
@ -698,7 +734,7 @@ func (this *Migrator) initiateInspector() (err error) {
if this.migrationContext.ApplierConnectionConfig, err = this.inspector.getMasterConnectionConfig(); err != nil {
return err
}
log.Infof("Master found to be %+v", *this.migrationContext.ApplierConnectionConfig.ImpliedKey)
this.migrationContext.Log.Infof("Master found to be %+v", *this.migrationContext.ApplierConnectionConfig.ImpliedKey)
} else {
// Forced master host.
key, err := mysql.ParseRawInstanceKeyLoose(this.migrationContext.AssumeMasterHostname)
@ -712,14 +748,14 @@ func (this *Migrator) initiateInspector() (err error) {
if this.migrationContext.CliMasterPassword != "" {
this.migrationContext.ApplierConnectionConfig.Password = this.migrationContext.CliMasterPassword
}
log.Infof("Master forced to be %+v", *this.migrationContext.ApplierConnectionConfig.ImpliedKey)
this.migrationContext.Log.Infof("Master forced to be %+v", *this.migrationContext.ApplierConnectionConfig.ImpliedKey)
}
// validate configs
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
if this.migrationContext.InspectorIsAlsoApplier() {
return fmt.Errorf("Instructed to --test-on-replica or --migrate-on-replica, but the server we connect to doesn't seem to be a replica")
}
log.Infof("--test-on-replica or --migrate-on-replica given. Will not execute on master %+v but rather on replica %+v itself",
this.migrationContext.Log.Infof("--test-on-replica or --migrate-on-replica given. Will not execute on master %+v but rather on replica %+v itself",
*this.migrationContext.ApplierConnectionConfig.ImpliedKey, *this.migrationContext.InspectorConnectionConfig.ImpliedKey,
)
this.migrationContext.ApplierConnectionConfig = this.migrationContext.InspectorConnectionConfig.Duplicate()
@ -857,6 +893,8 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
} else {
progressPct = 100.0 * float64(totalRowsCopied) / float64(rowsEstimate)
}
// we take the opportunity to update migration context with progressPct
this.migrationContext.SetProgressPct(progressPct)
// Before status, let's see if we should print a nice reminder for what exactly we're doing here.
shouldPrintMigrationStatusHint := (elapsedSeconds%600 == 0)
if rule == ForcePrintStatusAndHintRule {
@ -873,7 +911,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
eta := "N/A"
if progressPct >= 100.0 {
eta = "due"
} else if progressPct >= 1.0 {
} else if progressPct >= 0.1 {
elapsedRowCopySeconds := this.migrationContext.ElapsedRowCopyTime().Seconds()
totalExpectedSeconds := elapsedRowCopySeconds * float64(rowsEstimate) / float64(totalRowsCopied)
etaSeconds = totalExpectedSeconds - elapsedRowCopySeconds
@ -920,12 +958,13 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
currentBinlogCoordinates := *this.eventsStreamer.GetCurrentBinlogCoordinates()
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; State: %s; ETA: %s",
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, State: %s; ETA: %s",
totalRowsCopied, rowsEstimate, progressPct,
atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied),
len(this.applyEventsQueue), cap(this.applyEventsQueue),
base.PrettifyDurationOutput(elapsedTime), base.PrettifyDurationOutput(this.migrationContext.ElapsedRowCopyTime()),
currentBinlogCoordinates,
this.migrationContext.GetCurrentLagDuration().Seconds(),
state,
eta,
)
@ -957,12 +996,12 @@ func (this *Migrator) initiateStreaming() error {
)
go func() {
log.Debugf("Beginning streaming")
this.migrationContext.Log.Debugf("Beginning streaming")
err := this.eventsStreamer.StreamEvents(this.canStopStreaming)
if err != nil {
this.migrationContext.PanicAbort <- err
}
log.Debugf("Done streaming")
this.migrationContext.Log.Debugf("Done streaming")
}()
go func() {
@ -997,11 +1036,11 @@ func (this *Migrator) initiateThrottler() error {
this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector)
go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected)
log.Infof("Waiting for first throttle metrics to be collected")
this.migrationContext.Log.Infof("Waiting for first throttle metrics to be collected")
<-this.firstThrottlingCollected // replication lag
<-this.firstThrottlingCollected // HTTP status
<-this.firstThrottlingCollected // other, general metrics
log.Infof("First throttle metrics collected")
this.migrationContext.Log.Infof("First throttle metrics collected")
go this.throttler.initiateThrottlerChecks()
return nil
@ -1016,16 +1055,16 @@ func (this *Migrator) initiateApplier() error {
return err
}
if err := this.applier.CreateChangelogTable(); err != nil {
log.Errorf("Unable to create changelog table, see further error details. Perhaps a previous migration failed without dropping the table? OR is there a running migration? Bailing out")
this.migrationContext.Log.Errorf("Unable to create changelog table, see further error details. Perhaps a previous migration failed without dropping the table? OR is there a running migration? Bailing out")
return err
}
if err := this.applier.CreateGhostTable(); err != nil {
log.Errorf("Unable to create ghost table, see further error details. Perhaps a previous migration failed without dropping the table? Bailing out")
this.migrationContext.Log.Errorf("Unable to create ghost table, see further error details. Perhaps a previous migration failed without dropping the table? Bailing out")
return err
}
if err := this.applier.AlterGhost(); err != nil {
log.Errorf("Unable to ALTER ghost table, see further error details. Bailing out")
this.migrationContext.Log.Errorf("Unable to ALTER ghost table, see further error details. Bailing out")
return err
}
@ -1039,34 +1078,43 @@ func (this *Migrator) initiateApplier() error {
func (this *Migrator) iterateChunks() error {
terminateRowIteration := func(err error) error {
this.rowCopyComplete <- err
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
if this.migrationContext.Noop {
log.Debugf("Noop operation; not really copying data")
this.migrationContext.Log.Debugf("Noop operation; not really copying data")
return terminateRowIteration(nil)
}
if this.migrationContext.MigrationRangeMinValues == nil {
log.Debugf("No rows found in table. Rowcopy will be implicitly empty")
this.migrationContext.Log.Debugf("No rows found in table. Rowcopy will be implicitly empty")
return terminateRowIteration(nil)
}
var hasNoFurtherRangeFlag int64
// Iterate per chunk:
for {
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 {
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 {
// Done
// There's another such check down the line
return nil
}
copyRowsFunc := func() error {
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 {
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 {
// Done.
// There's another such check down the line
return nil
}
hasFurtherRange, err := this.applier.CalculateNextIterationRangeEndValues()
if err != nil {
// When hasFurtherRange is false, original table might be write locked and CalculateNextIterationRangeEndValues would hangs forever
hasFurtherRange := false
if err := this.retryOperation(func() (e error) {
hasFurtherRange, e = this.applier.CalculateNextIterationRangeEndValues()
return e
}); err != nil {
return terminateRowIteration(err)
}
if !hasFurtherRange {
atomic.StoreInt64(&hasNoFurtherRangeFlag, 1)
return terminateRowIteration(nil)
}
// Copy task:
@ -1084,7 +1132,7 @@ func (this *Migrator) iterateChunks() error {
}
_, rowsAffected, _, err := this.applier.ApplyIterationInsertQuery()
if err != nil {
return terminateRowIteration(err)
return err // wrapping call will retry
}
atomic.AddInt64(&this.migrationContext.TotalRowsCopied, rowsAffected)
atomic.AddInt64(&this.migrationContext.Iteration, 1)
@ -1105,7 +1153,7 @@ func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error {
handleNonDMLEventStruct := func(eventStruct *applyEventStruct) error {
if eventStruct.writeFunc != nil {
if err := this.retryOperation(*eventStruct.writeFunc); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
}
return nil
@ -1139,13 +1187,13 @@ func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error {
return this.applier.ApplyDMLEventQueries(dmlEvents)
}
if err := this.retryOperation(applyEventFunc); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
if nonDmlStructToApply != nil {
// We pulled DML events from the queue, and then we hit a non-DML event. Wait!
// We need to handle it!
if err := handleNonDMLEventStruct(nonDmlStructToApply); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
}
}
@ -1157,7 +1205,7 @@ func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error {
// Both event backlog and rowcopy events are polled; the backlog events have precedence.
func (this *Migrator) executeWriteFuncs() error {
if this.migrationContext.Noop {
log.Debugf("Noop operation; not really executing write funcs")
this.migrationContext.Log.Debugf("Noop operation; not really executing write funcs")
return nil
}
for {
@ -1184,7 +1232,7 @@ func (this *Migrator) executeWriteFuncs() error {
copyRowsStartTime := time.Now()
// Retries are handled within the copyRowsFunc
if err := copyRowsFunc(); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
if niceRatio := this.migrationContext.GetNiceRatio(); niceRatio > 0 {
copyRowsDuration := time.Since(copyRowsStartTime)
@ -1197,7 +1245,7 @@ func (this *Migrator) executeWriteFuncs() error {
{
// Hmmmmm... nothing in the queue; no events, but also no row copy.
// This is possible upon load. Let's just sleep it over.
log.Debugf("Getting nothing in the write queue. Sleeping...")
this.migrationContext.Log.Debugf("Getting nothing in the write queue. Sleeping...")
time.Sleep(time.Second)
}
}
@ -1213,14 +1261,14 @@ func (this *Migrator) finalCleanup() error {
if this.migrationContext.Noop {
if createTableStatement, err := this.inspector.showCreateTable(this.migrationContext.GetGhostTableName()); err == nil {
log.Infof("New table structure follows")
this.migrationContext.Log.Infof("New table structure follows")
fmt.Println(createTableStatement)
} else {
log.Errore(err)
this.migrationContext.Log.Errore(err)
}
}
if err := this.eventsStreamer.Close(); err != nil {
log.Errore(err)
this.migrationContext.Log.Errore(err)
}
if err := this.retryOperation(this.applier.DropChangelogTable); err != nil {
@ -1232,8 +1280,8 @@ func (this *Migrator) finalCleanup() error {
}
} else {
if !this.migrationContext.Noop {
log.Infof("Am not dropping old table because I want this operation to be as live as possible. If you insist I should do it, please add `--ok-to-drop-table` next time. But I prefer you do not. To drop the old table, issue:")
log.Infof("-- drop table %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.GetOldTableName()))
this.migrationContext.Log.Infof("Am not dropping old table because I want this operation to be as live as possible. If you insist I should do it, please add `--ok-to-drop-table` next time. But I prefer you do not. To drop the old table, issue:")
this.migrationContext.Log.Infof("-- drop table %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.GetOldTableName()))
}
}
if this.migrationContext.Noop {
@ -1249,22 +1297,22 @@ func (this *Migrator) teardown() {
atomic.StoreInt64(&this.finishedMigrating, 1)
if this.inspector != nil {
log.Infof("Tearing down inspector")
this.migrationContext.Log.Infof("Tearing down inspector")
this.inspector.Teardown()
}
if this.applier != nil {
log.Infof("Tearing down applier")
this.migrationContext.Log.Infof("Tearing down applier")
this.applier.Teardown()
}
if this.eventsStreamer != nil {
log.Infof("Tearing down streamer")
this.migrationContext.Log.Infof("Tearing down streamer")
this.eventsStreamer.Teardown()
}
if this.throttler != nil {
log.Infof("Tearing down throttler")
this.migrationContext.Log.Infof("Tearing down throttler")
this.throttler.Teardown()
}
}

View File

@ -16,7 +16,6 @@ import (
"sync/atomic"
"github.com/github/gh-ost/go/base"
"github.com/outbrain/golib/log"
)
type printStatusFunc func(PrintStatusRule, io.Writer)
@ -49,12 +48,12 @@ func (this *Server) BindSocketFile() (err error) {
if err != nil {
return err
}
log.Infof("Listening on unix socket file: %s", this.migrationContext.ServeSocketFile)
this.migrationContext.Log.Infof("Listening on unix socket file: %s", this.migrationContext.ServeSocketFile)
return nil
}
func (this *Server) RemoveSocketFile() (err error) {
log.Infof("Removing socket file: %s", this.migrationContext.ServeSocketFile)
this.migrationContext.Log.Infof("Removing socket file: %s", this.migrationContext.ServeSocketFile)
return os.Remove(this.migrationContext.ServeSocketFile)
}
@ -66,7 +65,7 @@ func (this *Server) BindTCPPort() (err error) {
if err != nil {
return err
}
log.Infof("Listening on tcp port: %d", this.migrationContext.ServeTCPPort)
this.migrationContext.Log.Infof("Listening on tcp port: %d", this.migrationContext.ServeTCPPort)
return nil
}
@ -76,7 +75,7 @@ func (this *Server) Serve() (err error) {
for {
conn, err := this.unixListener.Accept()
if err != nil {
log.Errore(err)
this.migrationContext.Log.Errore(err)
}
go this.handleConnection(conn)
}
@ -88,7 +87,7 @@ func (this *Server) Serve() (err error) {
for {
conn, err := this.tcpListener.Accept()
if err != nil {
log.Errore(err)
this.migrationContext.Log.Errore(err)
}
go this.handleConnection(conn)
}
@ -118,7 +117,7 @@ func (this *Server) onServerCommand(command string, writer *bufio.Writer) (err e
} else {
fmt.Fprintf(writer, "%s\n", err.Error())
}
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
// applyServerCommand parses and executes commands by user
@ -144,7 +143,7 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
switch command {
case "help":
{
fmt.Fprintln(writer, `available commands:
fmt.Fprint(writer, `available commands:
status # Print a detailed status message
sup # Print a short status message
coordinates # Print the currently inspected coordinates
@ -292,12 +291,22 @@ help # This message
}
case "throttle", "pause", "suspend":
{
if arg != "" && arg != this.migrationContext.OriginalTableName {
// User explicitly provided table name. This is a courtesy protection mechanism
err := fmt.Errorf("User commanded 'throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
return NoPrintStatusRule, err
}
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
fmt.Fprintf(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "no-throttle", "unthrottle", "resume", "continue":
{
if arg != "" && arg != this.migrationContext.OriginalTableName {
// User explicitly provided table name. This is a courtesy protection mechanism
err := fmt.Errorf("User commanded 'no-throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
return NoPrintStatusRule, err
}
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 0)
return ForcePrintStatusAndHintRule, nil
}
@ -322,7 +331,16 @@ help # This message
}
case "panic":
{
err := fmt.Errorf("User commanded 'panic'. I will now panic, without cleanup. PANIC!")
if arg == "" && this.migrationContext.ForceNamedPanicCommand {
err := fmt.Errorf("User commanded 'panic' without specifying table name, but --force-named-panic is set")
return NoPrintStatusRule, err
}
if arg != "" && arg != this.migrationContext.OriginalTableName {
// User explicitly provided table name. This is a courtesy protection mechanism
err := fmt.Errorf("User commanded 'panic' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
return NoPrintStatusRule, err
}
err := fmt.Errorf("User commanded 'panic'. The migration will be aborted without cleanup. Please drop the gh-ost tables before trying again.")
this.migrationContext.PanicAbort <- err
return NoPrintStatusRule, err
}

View File

@ -16,7 +16,6 @@ import (
"github.com/github/gh-ost/go/binlog"
"github.com/github/gh-ost/go/mysql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
)
@ -107,7 +106,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
return err
}
if _, err := base.ValidateConnection(this.db, this.connectionConfig); err != nil {
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext); err != nil {
return err
}
if err := this.readCurrentBinlogCoordinates(); err != nil {
@ -160,7 +159,7 @@ func (this *EventsStreamer) readCurrentBinlogCoordinates() error {
if !foundMasterStatus {
return fmt.Errorf("Got no results from SHOW MASTER STATUS. Bailing out")
}
log.Debugf("Streamer binlog coordinates: %+v", *this.initialBinlogCoordinates)
this.migrationContext.Log.Debugf("Streamer binlog coordinates: %+v", *this.initialBinlogCoordinates)
return nil
}
@ -186,7 +185,7 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
return nil
}
log.Infof("StreamEvents encountered unexpected error: %+v", err)
this.migrationContext.Log.Infof("StreamEvents encountered unexpected error: %+v", err)
this.migrationContext.MarkPointOfInterest()
time.Sleep(ReconnectStreamerSleepSeconds * time.Second)
@ -202,7 +201,7 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
// Reposition at same binlog file.
lastAppliedRowsEventHint = this.binlogReader.LastAppliedRowsEventHint
log.Infof("Reconnecting... Will resume at %+v", lastAppliedRowsEventHint)
this.migrationContext.Log.Infof("Reconnecting... Will resume at %+v", lastAppliedRowsEventHint)
if err := this.initBinlogReader(this.GetReconnectBinlogCoordinates()); err != nil {
return err
}
@ -213,7 +212,7 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
func (this *EventsStreamer) Close() (err error) {
err = this.binlogReader.Close()
log.Infof("Closed streamer connection. err=%+v", err)
this.migrationContext.Log.Infof("Closed streamer connection. err=%+v", err)
return err
}

View File

@ -15,24 +15,25 @@ import (
"github.com/github/gh-ost/go/base"
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
)
var (
httpStatusMessages map[int]string = map[int]string{
httpStatusMessages = map[int]string{
200: "OK",
404: "Not found",
417: "Expectation failed",
429: "Too many requests",
500: "Internal server error",
-1: "Connection error",
}
// See https://github.com/github/freno/blob/master/doc/http.md
httpStatusFrenoMessages map[int]string = map[int]string{
httpStatusFrenoMessages = map[int]string{
200: "OK",
404: "freno: unknown metric",
417: "freno: access forbidden",
429: "freno: threshold exceeded",
500: "freno: internal error",
-1: "freno: connection error",
}
)
@ -84,6 +85,7 @@ func (this *Throttler) shouldThrottle() (result bool, reason string, reasonHint
if statusCode != 0 && statusCode != http.StatusOK {
return true, this.throttleHttpMessage(int(statusCode)), base.NoThrottleReasonHint
}
// Replication lag throttle
maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)
lag := atomic.LoadInt64(&this.migrationContext.CurrentLag)
@ -120,7 +122,7 @@ func parseChangelogHeartbeat(heartbeatValue string) (lag time.Duration, err erro
// parseChangelogHeartbeat parses a string timestamp and deduces replication lag
func (this *Throttler) parseChangelogHeartbeat(heartbeatValue string) (err error) {
if lag, err := parseChangelogHeartbeat(heartbeatValue); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
} else {
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
return nil
@ -140,15 +142,15 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
// when running on replica, the heartbeat injection is also done on the replica.
// This means we will always get a good heartbeat value.
// When runnign on replica, we should instead check the `SHOW SLAVE STATUS` output.
if lag, err := mysql.GetReplicationLag(this.inspector.informationSchemaDb, this.inspector.connectionConfig); err != nil {
return log.Errore(err)
// When running on replica, we should instead check the `SHOW SLAVE STATUS` output.
if lag, err := mysql.GetReplicationLagFromSlaveStatus(this.inspector.informationSchemaDb); err != nil {
return this.migrationContext.Log.Errore(err)
} else {
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
}
} else {
if heartbeatValue, err := this.inspector.readChangelogState("heartbeat"); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
} else {
this.parseChangelogHeartbeat(heartbeatValue)
}
@ -288,7 +290,14 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
return false, nil
}
collectFunc()
_, err := collectFunc()
if err != nil {
// If not told to ignore errors, we'll throttle on HTTP connection issues
if !this.migrationContext.IgnoreHTTPErrors {
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1))
}
}
firstThrottlingCollected <- true
ticker := time.Tick(100 * time.Millisecond)
@ -297,7 +306,15 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
return
}
if sleep, _ := collectFunc(); sleep {
sleep, err := collectFunc()
if err != nil {
// If not told to ignore errors, we'll throttle on HTTP connection issues
if !this.migrationContext.IgnoreHTTPErrors {
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1))
}
}
if sleep {
time.Sleep(1 * time.Second)
}
}
@ -330,7 +347,7 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
hibernateDuration := time.Duration(this.migrationContext.CriticalLoadHibernateSeconds) * time.Second
hibernateUntilTime := time.Now().Add(hibernateDuration)
atomic.StoreInt64(&this.migrationContext.HibernateUntil, hibernateUntilTime.UnixNano())
log.Errorf("critical-load met: %s=%d, >=%d. Will hibernate for the duration of %+v, until %+v", variableName, value, threshold, hibernateDuration, hibernateUntilTime)
this.migrationContext.Log.Errorf("critical-load met: %s=%d, >=%d. Will hibernate for the duration of %+v, until %+v", variableName, value, threshold, hibernateDuration, hibernateUntilTime)
go func() {
time.Sleep(hibernateDuration)
this.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(true, "leaving hibernation", base.LeavingHibernationThrottleReasonHint))
@ -343,7 +360,7 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
this.migrationContext.PanicAbort <- fmt.Errorf("critical-load met: %s=%d, >=%d", variableName, value, threshold)
}
if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds > 0 {
log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, this.migrationContext.CriticalLoadIntervalMilliseconds)
this.migrationContext.Log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, this.migrationContext.CriticalLoadIntervalMilliseconds)
go func() {
timer := time.NewTimer(time.Millisecond * time.Duration(this.migrationContext.CriticalLoadIntervalMilliseconds))
<-timer.C
@ -461,6 +478,6 @@ func (this *Throttler) throttle(onThrottled func()) {
}
func (this *Throttler) Teardown() {
log.Debugf("Tearing down...")
this.migrationContext.Log.Debugf("Tearing down...")
atomic.StoreInt64(&this.finishedMigrating, 1)
}

View File

@ -6,8 +6,18 @@
package mysql
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"net"
"github.com/go-sql-driver/mysql"
)
const (
TLS_CONFIG_KEY = "ghost"
)
// ConnectionConfig is the minimal configuration required to connect to a MySQL server
@ -16,6 +26,8 @@ type ConnectionConfig struct {
User string
Password string
ImpliedKey *InstanceKey
tlsConfig *tls.Config
Timeout float64
}
func NewConnectionConfig() *ConnectionConfig {
@ -29,9 +41,11 @@ func NewConnectionConfig() *ConnectionConfig {
// DuplicateCredentials creates a new connection config with given key and with same credentials as this config
func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionConfig {
config := &ConnectionConfig{
Key: key,
User: this.User,
Password: this.Password,
Key: key,
User: this.User,
Password: this.Password,
tlsConfig: this.tlsConfig,
Timeout: this.Timeout,
}
config.ImpliedKey = &config.Key
return config
@ -42,13 +56,54 @@ func (this *ConnectionConfig) Duplicate() *ConnectionConfig {
}
func (this *ConnectionConfig) String() string {
return fmt.Sprintf("%s, user=%s", this.Key.DisplayString(), this.User)
return fmt.Sprintf("%s, user=%s, usingTLS=%t", this.Key.DisplayString(), this.User, this.tlsConfig != nil)
}
func (this *ConnectionConfig) Equals(other *ConnectionConfig) bool {
return this.Key.Equals(&other.Key) || this.ImpliedKey.Equals(other.ImpliedKey)
}
func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clientKey string, allowInsecure bool) error {
var rootCertPool *x509.CertPool
var certs []tls.Certificate
var err error
if caCertificatePath == "" {
rootCertPool, err = x509.SystemCertPool()
if err != nil {
return err
}
} else {
rootCertPool = x509.NewCertPool()
pem, err := ioutil.ReadFile(caCertificatePath)
if err != nil {
return err
}
if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
return errors.New("could not add ca certificate to cert pool")
}
}
if clientCertificate != "" || clientKey != "" {
cert, err := tls.LoadX509KeyPair(clientCertificate, clientKey)
if err != nil {
return err
}
certs = []tls.Certificate{cert}
}
this.tlsConfig = &tls.Config{
Certificates: certs,
RootCAs: rootCertPool,
InsecureSkipVerify: allowInsecure,
}
return mysql.RegisterTLSConfig(TLS_CONFIG_KEY, this.tlsConfig)
}
func (this *ConnectionConfig) TLSConfig() *tls.Config {
return this.tlsConfig
}
func (this *ConnectionConfig) GetDBUri(databaseName string) string {
hostname := this.Key.Hostname
var ip = net.ParseIP(hostname)
@ -56,5 +111,12 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string {
// Wrap IPv6 literals in square brackets
hostname = fmt.Sprintf("[%s]", hostname)
}
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1", this.User, this.Password, hostname, this.Key.Port, databaseName)
interpolateParams := true
// go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to
// simplify construction of the DSN below.
tlsOption := "false"
if this.tlsConfig != nil {
tlsOption = TLS_CONFIG_KEY
}
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?timeout=%fs&readTimeout=%fs&writeTimeout=%fs&interpolateParams=%t&autocommit=true&charset=utf8mb4,utf8,latin1&tls=%s", this.User, this.Password, hostname, this.Key.Port, databaseName, this.Timeout, this.Timeout, this.Timeout, interpolateParams, tlsOption)
}

View File

@ -6,6 +6,7 @@
package mysql
import (
"crypto/tls"
"testing"
"github.com/outbrain/golib/log"
@ -31,6 +32,10 @@ func TestDuplicateCredentials(t *testing.T) {
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
c.tlsConfig = &tls.Config{
InsecureSkipVerify: true,
ServerName: "feathers",
}
dup := c.DuplicateCredentials(InstanceKey{Hostname: "otherhost", Port: 3310})
test.S(t).ExpectEquals(dup.Key.Hostname, "otherhost")
@ -39,6 +44,7 @@ func TestDuplicateCredentials(t *testing.T) {
test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3310)
test.S(t).ExpectEquals(dup.User, "gromit")
test.S(t).ExpectEquals(dup.Password, "penguin")
test.S(t).ExpectEquals(dup.tlsConfig, c.tlsConfig)
}
func TestDuplicate(t *testing.T) {
@ -55,3 +61,24 @@ func TestDuplicate(t *testing.T) {
test.S(t).ExpectEquals(dup.User, "gromit")
test.S(t).ExpectEquals(dup.Password, "penguin")
}
func TestGetDBUri(t *testing.T) {
c := NewConnectionConfig()
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
uri := c.GetDBUri("test")
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?timeout=0.000000s&readTimeout=0.000000s&writeTimeout=0.000000s&interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=false")
}
func TestGetDBUriWithTLSSetup(t *testing.T) {
c := NewConnectionConfig()
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
c.tlsConfig = &tls.Config{}
uri := c.GetDBUri("test")
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?timeout=0.000000s&readTimeout=0.000000s&writeTimeout=0.000000s&interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=ghost")
}

View File

@ -8,6 +8,7 @@ package mysql
import (
gosql "database/sql"
"fmt"
"strings"
"sync"
"time"
@ -57,9 +58,8 @@ func GetDB(migrationUuid string, mysql_uri string) (*gosql.DB, bool, error) {
return knownDBs[cacheKey], exists, nil
}
// GetReplicationLag returns replication lag for a given connection config; either by explicit query
// or via SHOW SLAVE STATUS
func GetReplicationLag(informationSchemaDb *gosql.DB, connectionConfig *ConnectionConfig) (replicationLag time.Duration, err error) {
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
func GetReplicationLagFromSlaveStatus(informationSchemaDb *gosql.DB) (replicationLag time.Duration, err error) {
err = sqlutils.QueryRowsMap(informationSchemaDb, `show slave status`, func(m sqlutils.RowMap) error {
slaveIORunning := m.GetString("Slave_IO_Running")
slaveSQLRunning := m.GetString("Slave_SQL_Running")
@ -83,9 +83,6 @@ func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey
}
defer db.Close()
if err != nil {
return nil, err
}
err = sqlutils.QueryRowsMap(db, `show slave status`, func(rowMap sqlutils.RowMap) error {
// We wish to recognize the case where the topology's master actually has replication configuration.
// This can happen when a DBA issues a `RESET SLAVE` instead of `RESET SLAVE ALL`.
@ -98,7 +95,6 @@ func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey
slaveIORunning := rowMap.GetString("Slave_IO_Running")
slaveSQLRunning := rowMap.GetString("Slave_SQL_Running")
//
if slaveIORunning != "Yes" || slaveSQLRunning != "Yes" {
return fmt.Errorf("Replication on %+v is broken: Slave_IO_Running: %s, Slave_SQL_Running: %s. Please make sure replication runs before using gh-ost.",
connectionConfig.Key,
@ -178,7 +174,7 @@ func GetInstanceKey(db *gosql.DB) (instanceKey *InstanceKey, err error) {
}
// GetTableColumns reads column list from given table
func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnList, error) {
func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnList, *sql.ColumnList, error) {
query := fmt.Sprintf(`
show columns from %s.%s
`,
@ -186,18 +182,24 @@ func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnL
sql.EscapeName(tableName),
)
columnNames := []string{}
virtualColumnNames := []string{}
err := sqlutils.QueryRowsMap(db, query, func(rowMap sqlutils.RowMap) error {
columnNames = append(columnNames, rowMap.GetString("Field"))
columnName := rowMap.GetString("Field")
columnNames = append(columnNames, columnName)
if strings.Contains(rowMap.GetString("Extra"), " GENERATED") {
log.Debugf("%s is a generated column", columnName)
virtualColumnNames = append(virtualColumnNames, columnName)
}
return nil
})
if err != nil {
return nil, err
return nil, nil, err
}
if len(columnNames) == 0 {
return nil, log.Errorf("Found 0 columns on %s.%s. Bailing out",
return nil, nil, log.Errorf("Found 0 columns on %s.%s. Bailing out",
sql.EscapeName(databaseName),
sql.EscapeName(tableName),
)
}
return sql.NewColumnList(columnNames), nil
return sql.NewColumnList(columnNames), sql.NewColumnList(virtualColumnNames), nil
}

View File

@ -15,11 +15,11 @@ type ValueComparisonSign string
const (
LessThanComparisonSign ValueComparisonSign = "<"
LessThanOrEqualsComparisonSign = "<="
EqualsComparisonSign = "="
GreaterThanOrEqualsComparisonSign = ">="
GreaterThanComparisonSign = ">"
NotEqualsComparisonSign = "!="
LessThanOrEqualsComparisonSign ValueComparisonSign = "<="
EqualsComparisonSign ValueComparisonSign = "="
GreaterThanOrEqualsComparisonSign ValueComparisonSign = ">="
GreaterThanComparisonSign ValueComparisonSign = ">"
NotEqualsComparisonSign ValueComparisonSign = "!="
)
// EscapeName will escape a db/table/column/... name by wrapping with backticks.
@ -140,13 +140,12 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
comparisons := []string{}
for i, column := range columns {
//
value := values[i]
rangeComparison, err := BuildValueComparison(column, value, comparisonSign)
if err != nil {
return "", explodedArgs, err
}
if len(columns[0:i]) > 0 {
if i > 0 {
equalitiesComparison, err := BuildEqualsComparison(columns[0:i], values[0:i])
if err != nil {
return "", explodedArgs, err
@ -493,6 +492,9 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
}
setClause, err := BuildSetPreparedClause(mappedSharedColumns)
if err != nil {
return "", sharedArgs, uniqueKeyArgs, err
}
equalsComparison, err := BuildEqualsPreparedComparison(uniqueKeyColumns.Names())
result = fmt.Sprintf(`

View File

@ -8,6 +8,7 @@ package sql
import (
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/simplifiedchinese"
)
type charsetEncoding map[string]encoding.Encoding
@ -18,4 +19,5 @@ func init() {
charsetEncodingMap = make(map[string]encoding.Encoding)
// Begin mappings
charsetEncodingMap["latin1"] = charmap.Windows1252
charsetEncodingMap["gbk"] = simplifiedchinese.GBK
}

View File

@ -12,24 +12,54 @@ import (
)
var (
sanitizeQuotesRegexp = regexp.MustCompile("('[^']*')")
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
sanitizeQuotesRegexp = regexp.MustCompile("('[^']*')")
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
alterTableExplicitSchemaTableRegexps = []*regexp.Regexp{
// ALTER TABLE `scm`.`tbl` something
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
// ALTER TABLE `scm`.tbl something
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]([\S]+)\s+(.*$)`),
// ALTER TABLE scm.`tbl` something
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
// ALTER TABLE scm.tbl something
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]([\S]+)\s+(.*$)`),
}
alterTableExplicitTableRegexps = []*regexp.Regexp{
// ALTER TABLE `tbl` something
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
// ALTER TABLE tbl something
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)\s+(.*$)`),
}
)
type Parser struct {
type AlterTableParser struct {
columnRenameMap map[string]string
droppedColumns map[string]bool
isRenameTable bool
alterStatementOptions string
alterTokens []string
explicitSchema string
explicitTable string
}
func NewParser() *Parser {
return &Parser{
func NewAlterTableParser() *AlterTableParser {
return &AlterTableParser{
columnRenameMap: make(map[string]string),
droppedColumns: make(map[string]bool),
}
}
func (this *Parser) tokenizeAlterStatement(alterStatement string) (tokens []string, err error) {
func NewParserFromAlterStatement(alterStatement string) *AlterTableParser {
parser := NewAlterTableParser()
parser.ParseAlterStatement(alterStatement)
return parser
}
func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tokens []string, err error) {
terminatingQuote := rune(0)
f := func(c rune) bool {
switch {
@ -56,13 +86,13 @@ func (this *Parser) tokenizeAlterStatement(alterStatement string) (tokens []stri
return tokens, nil
}
func (this *Parser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) {
func (this *AlterTableParser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) {
strippedStatement = alterStatement
strippedStatement = sanitizeQuotesRegexp.ReplaceAllString(strippedStatement, "''")
return strippedStatement
}
func (this *Parser) parseAlterToken(alterToken string) (err error) {
func (this *AlterTableParser) parseAlterToken(alterToken string) (err error) {
{
// rename
allStringSubmatch := renameColumnRegexp.FindAllStringSubmatch(alterToken, -1)
@ -86,19 +116,43 @@ func (this *Parser) parseAlterToken(alterToken string) (err error) {
this.droppedColumns[submatch[2]] = true
}
}
return nil
}
func (this *Parser) ParseAlterStatement(alterStatement string) (err error) {
alterTokens, _ := this.tokenizeAlterStatement(alterStatement)
for _, alterToken := range alterTokens {
alterToken = this.sanitizeQuotesFromAlterStatement(alterToken)
this.parseAlterToken(alterToken)
{
// rename table
if renameTableRegexp.MatchString(alterToken) {
this.isRenameTable = true
}
}
return nil
}
func (this *Parser) GetNonTrivialRenames() map[string]string {
func (this *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) {
this.alterStatementOptions = alterStatement
for _, alterTableRegexp := range alterTableExplicitSchemaTableRegexps {
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
this.explicitSchema = submatch[1]
this.explicitTable = submatch[2]
this.alterStatementOptions = submatch[3]
break
}
}
for _, alterTableRegexp := range alterTableExplicitTableRegexps {
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
this.explicitTable = submatch[1]
this.alterStatementOptions = submatch[2]
break
}
}
alterTokens, _ := this.tokenizeAlterStatement(this.alterStatementOptions)
for _, alterToken := range alterTokens {
alterToken = this.sanitizeQuotesFromAlterStatement(alterToken)
this.parseAlterToken(alterToken)
this.alterTokens = append(this.alterTokens, alterToken)
}
return nil
}
func (this *AlterTableParser) GetNonTrivialRenames() map[string]string {
result := make(map[string]string)
for column, renamed := range this.columnRenameMap {
if column != renamed {
@ -108,10 +162,33 @@ func (this *Parser) GetNonTrivialRenames() map[string]string {
return result
}
func (this *Parser) HasNonTrivialRenames() bool {
func (this *AlterTableParser) HasNonTrivialRenames() bool {
return len(this.GetNonTrivialRenames()) > 0
}
func (this *Parser) DroppedColumnsMap() map[string]bool {
func (this *AlterTableParser) DroppedColumnsMap() map[string]bool {
return this.droppedColumns
}
func (this *AlterTableParser) IsRenameTable() bool {
return this.isRenameTable
}
func (this *AlterTableParser) GetExplicitSchema() string {
return this.explicitSchema
}
func (this *AlterTableParser) HasExplicitSchema() bool {
return this.GetExplicitSchema() != ""
}
func (this *AlterTableParser) GetExplicitTable() string {
return this.explicitTable
}
func (this *AlterTableParser) HasExplicitTable() bool {
return this.GetExplicitTable() != ""
}
func (this *AlterTableParser) GetAlterStatementOptions() string {
return this.alterStatementOptions
}

View File

@ -19,17 +19,19 @@ func init() {
func TestParseAlterStatement(t *testing.T) {
statement := "add column t int, engine=innodb"
parser := NewParser()
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
}
func TestParseAlterStatementTrivialRename(t *testing.T) {
statement := "add column t int, change ts ts timestamp, engine=innodb"
parser := NewParser()
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 1)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
@ -37,9 +39,10 @@ func TestParseAlterStatementTrivialRename(t *testing.T) {
func TestParseAlterStatementTrivialRenames(t *testing.T) {
statement := "add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb"
parser := NewParser()
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 2)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
@ -58,9 +61,10 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
}
for _, statement := range statements {
parser := NewParser()
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
renames := parser.GetNonTrivialRenames()
test.S(t).ExpectEquals(len(renames), 2)
test.S(t).ExpectEquals(renames["i"], "count")
@ -69,7 +73,7 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
}
func TestTokenizeAlterStatement(t *testing.T) {
parser := NewParser()
parser := NewAlterTableParser()
{
alterStatement := "add column t int"
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
@ -108,7 +112,7 @@ func TestTokenizeAlterStatement(t *testing.T) {
}
func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
parser := NewParser()
parser := NewAlterTableParser()
{
alterStatement := "add column e enum('a','b','c')"
strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement)
@ -124,7 +128,7 @@ func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
func TestParseAlterStatementDroppedColumns(t *testing.T) {
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
@ -132,16 +136,17 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
test.S(t).ExpectTrue(parser.droppedColumns["b"])
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b, drop key c_idx, drop column `d`"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectEquals(len(parser.droppedColumns), 2)
test.S(t).ExpectTrue(parser.droppedColumns["b"])
test.S(t).ExpectTrue(parser.droppedColumns["d"])
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b, drop key c_idx, drop column `d`, drop `e`, drop primary key, drop foreign key fk_1"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
@ -151,7 +156,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
test.S(t).ExpectTrue(parser.droppedColumns["e"])
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b, drop bad statement, add column i int"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
@ -159,3 +164,137 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
test.S(t).ExpectTrue(parser.droppedColumns["b"])
}
}
func TestParseAlterStatementRenameTable(t *testing.T) {
{
parser := NewAlterTableParser()
statement := "drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectFalse(parser.isRenameTable)
}
{
parser := NewAlterTableParser()
statement := "rename as something_else"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
{
parser := NewAlterTableParser()
statement := "drop column b, rename as something_else"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectTrue(parser.isRenameTable)
}
{
parser := NewAlterTableParser()
statement := "engine=innodb rename as something_else"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
{
parser := NewAlterTableParser()
statement := "rename as something_else, engine=innodb"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
}
func TestParseAlterStatementExplicitTable(t *testing.T) {
{
parser := NewAlterTableParser()
statement := "drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "")
test.S(t).ExpectEquals(parser.explicitTable, "")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table tbl drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table `tbl` drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table `scm with spaces`.`tbl` drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm with spaces")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table `scm`.`tbl with spaces` drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl with spaces")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table `scm`.tbl drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table scm.`tbl` drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table scm.tbl drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table scm.tbl drop column b, add index idx(i)"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b, add index idx(i)")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b", "add index idx(i)"}))
}
}

View File

@ -15,13 +15,13 @@ import (
type ColumnType int
const (
UnknownColumnType ColumnType = iota
TimestampColumnType = iota
DateTimeColumnType = iota
EnumColumnType = iota
MediumIntColumnType = iota
JSONColumnType = iota
FloatColumnType = iota
UnknownColumnType ColumnType = iota
TimestampColumnType
DateTimeColumnType
EnumColumnType
MediumIntColumnType
JSONColumnType
FloatColumnType
)
const maxMediumintUnsigned int32 = 16777215

View File

@ -0,0 +1,9 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
set session sql_mode='NO_AUTO_VALUE_ON_ZERO';
insert into gh_ost_test values (0, 23);

View File

@ -0,0 +1,21 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id bigint auto_increment,
val bigint not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 18446744073709551615);
insert into gh_ost_test values (null, 18446744073709551614);
insert into gh_ost_test values (null, 18446744073709551613);
end ;;

View File

@ -0,0 +1 @@
--alter="change val val bigint"

View File

@ -0,0 +1,20 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11);
insert into gh_ost_test values (null, 13);
end ;;

View File

@ -0,0 +1 @@
--alter="add column is_good bit null default 0"

View File

@ -0,0 +1 @@
id, i

View File

@ -0,0 +1 @@
id, i

View File

@ -0,0 +1,24 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
is_good bit null default 0,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, 0);
insert into gh_ost_test values (null, 13, 1);
insert into gh_ost_test values (null, 17, 1);
update gh_ost_test set is_good=0 where i=13 order by id desc limit 1;
end ;;

View File

@ -0,0 +1 @@
--alter="modify column is_good bit not null default 0" --approve-renamed-columns

View File

@ -0,0 +1,31 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
t varchar(128) charset utf8 collate utf8_general_ci,
tl varchar(128) charset latin1 not null,
ta varchar(128) charset ascii not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 'átesting');
insert into gh_ost_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, md5(rand()), 'átesting-a', 'a');
insert into gh_ost_test values (null, 'novo proprietário', 'átesting-b', 'b');
insert into gh_ost_test values (null, '2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm', 'átesting-c', 'c');
insert into gh_ost_test values (null, 'usuário', 'átesting-x', 'x');
delete from gh_ost_test where ta='x' order by id desc limit 1;
end ;;

View File

@ -0,0 +1 @@
--alter='convert to character set utf8mb4'

View File

@ -0,0 +1,27 @@
set session time_zone='+00:00';
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
create_time timestamp NULL DEFAULT '0000-00-00 00:00:00',
update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
counter int(10) unsigned DEFAULT NULL,
primary key(id)
) auto_increment=1;
set session time_zone='+00:00';
insert into gh_ost_test values (1, '0000-00-00 00:00:00', now(), 0);
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
set session time_zone='+00:00';
update gh_ost_test set counter = counter + 1 where id = 1;
end ;;

View File

@ -0,0 +1 @@
--alter='add column name varchar(1)'

View File

@ -0,0 +1 @@
id, create_time, update_time, counter

View File

@ -0,0 +1 @@
id, create_time, update_time, counter

View File

View File

@ -17,7 +17,7 @@ create event gh_ost_test
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
disable on slave
do
begin
insert into gh_ost_test values (null, 11, now(), now(), now(), 0);

View File

@ -0,0 +1,23 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
dec0 decimal(65,30) unsigned NOT NULL DEFAULT '0.000000000000000000000000000000',
dec1 decimal(65,30) unsigned NOT NULL DEFAULT '1.000000000000000000000000000000',
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 0.0, 0.0);
insert into gh_ost_test values (null, 2.0, 4.0);
insert into gh_ost_test values (null, 99999999999999999999999999999999999.000, 6.0);
update gh_ost_test set dec1=4.5 where dec2=4.0 order by id desc limit 1;
end ;;

View File

@ -0,0 +1,22 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
ts timestamp,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, now());
insert into gh_ost_test values (null, 13, now());
insert into gh_ost_test values (null, 17, now());
end ;;

View File

@ -0,0 +1 @@
ALTER statement seems to RENAME the table

View File

@ -0,0 +1 @@
--alter="rename as something_else"

View File

@ -0,0 +1,25 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int(11) NOT NULL AUTO_INCREMENT,
name varchar(512) DEFAULT NULL,
v varchar(255) DEFAULT NULL COMMENT '添加普通列测试',
PRIMARY KEY (id)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=gbk;
insert into gh_ost_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试');
insert into gh_ost_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (name) values ('gbk-test-default');
insert into gh_ost_test values (null, 'gbk-test', '添加普通列测试-添加普通列测试');
update gh_ost_test set v='添加普通列测试' where v='添加普通列测试-添加普通列测试' order by id desc limit 1;
end ;;

View File

View File

@ -0,0 +1,29 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
a int not null,
b int not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (id, a, b) values (null, 2,3);
insert into gh_ost_test (id, a, b) values (null, 2,4);
insert into gh_ost_test (id, a, b) values (null, 2,5);
insert into gh_ost_test (id, a, b) values (null, 2,6);
insert into gh_ost_test (id, a, b) values (null, 2,7);
insert into gh_ost_test (id, a, b) values (null, 2,8);
insert into gh_ost_test (id, a, b) values (null, 2,9);
insert into gh_ost_test (id, a, b) values (null, 2,0);
insert into gh_ost_test (id, a, b) values (null, 2,1);
insert into gh_ost_test (id, a, b) values (null, 2,2);
end ;;

View File

@ -0,0 +1 @@
--alter="add column sum_ab int as (a + b) virtual not null"

View File

@ -0,0 +1 @@
id, a, b

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -0,0 +1 @@
id

View File

@ -0,0 +1 @@
id, a, b

View File

@ -0,0 +1,30 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
a int not null,
b int not null,
sum_ab int as (a + b) virtual not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (id, a, b) values (null, 2,3);
insert into gh_ost_test (id, a, b) values (null, 2,4);
insert into gh_ost_test (id, a, b) values (null, 2,5);
insert into gh_ost_test (id, a, b) values (null, 2,6);
insert into gh_ost_test (id, a, b) values (null, 2,7);
insert into gh_ost_test (id, a, b) values (null, 2,8);
insert into gh_ost_test (id, a, b) values (null, 2,9);
insert into gh_ost_test (id, a, b) values (null, 2,0);
insert into gh_ost_test (id, a, b) values (null, 2,1);
insert into gh_ost_test (id, a, b) values (null, 2,2);
end ;;

View File

@ -0,0 +1 @@
--alter="change sum_ab total_ab int as (a + b) virtual not null" --approve-renamed-columns

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -0,0 +1,30 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
a int not null,
b int not null,
sum_ab int as (a + b) virtual not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (id, a, b) values (null, 2,3);
insert into gh_ost_test (id, a, b) values (null, 2,4);
insert into gh_ost_test (id, a, b) values (null, 2,5);
insert into gh_ost_test (id, a, b) values (null, 2,6);
insert into gh_ost_test (id, a, b) values (null, 2,7);
insert into gh_ost_test (id, a, b) values (null, 2,8);
insert into gh_ost_test (id, a, b) values (null, 2,9);
insert into gh_ost_test (id, a, b) values (null, 2,0);
insert into gh_ost_test (id, a, b) values (null, 2,1);
insert into gh_ost_test (id, a, b) values (null, 2,2);
end ;;

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -20,6 +20,7 @@ begin
insert into gh_ost_test (id, i, j) values (null, 11, '"sometext"');
insert into gh_ost_test (id, i, j) values (null, 13, '{"key":"val"}');
insert into gh_ost_test (id, i, j) values (null, 17, '{"is-it": true, "count": 3, "elements": []}');
insert into gh_ost_test (id, i, j) values (null, 19, '{"text":"Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet a, venenatis vitae, justo. Nullam dictum felis eu pede mollis pretium. Integer tincidunt. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. Aliquam lorem ante, dapibus in, viverra quis, feugiat a, tellus. Phasellus viverra nulla ut metus varius laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam eget dui. Etiam rhoncus. Maecenas tempus, tellus eget condimentum rhoncus, sem quam semper libero, sit amet adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. Donec vitae sapien ut libero venenatis faucibus. Nullam quis ante. Etiam sit amet orci eget eros faucibus tincidunt. Duis leo. Sed fringilla mauris sit amet nibh. Donec sodales sagittis magna. Sed consequat, leo eget bibendum sodales, augue velit cursus nunc, quis gravida magna mi a libero. Fusce vulputate eleifend sapien. Vestibulum purus quam, scelerisque ut, mollis sed, nonummy id, metus. Nullam accumsan lorem in dui. Cras ultricies mi eu turpis hendrerit fringilla. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In ac dui quis mi consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu tortor, suscipit eget, imperdiet nec, imperdiet iaculis, ipsum. Sed aliquam ultrices mauris. Integer ante arcu, accumsan a, consectetuer eget, posuere ut, mauris. Praesent adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc nonummy metus. Vestibulum volutpat pretium libero. Cras id dui. Aenean ut eros et nisl sagittis vestibulum. Nullam nulla eros, ultricies sit amet, nonummy id, imperdiet feugiat, pede. Sed lectus. Donec mollis hendrerit risus. Phasellus nec sem in justo pellentesque facilisis. Etiam imperdiet imperdiet orci. Nunc nec neque. Phasellus leo dolor, tempus non, auctor et, hendrerit quis, nisi. Curabitur ligula sapien, tincidunt non, euismod vitae, posuere imperdiet, leo. Maecenas malesuada. Praesent congue erat at massa. Sed cursus turpis vitae tortor. Donec posuere vulputate arcu. Phasellus accumsan cursus velit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed aliquam, nisi quis porttitor congue, elit erat euismod orci, ac"}');
update gh_ost_test set j = '{"updated": 11}', updated = 1 where i = 11 and updated = 0;
update gh_ost_test set j = json_set(j, '$.count', 13, '$.id', id), updated = 1 where i = 13 and updated = 0;

View File

@ -0,0 +1,25 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
t text charset latin1 collate latin1_swedish_ci,
primary key(id)
) auto_increment=1 charset latin1 collate latin1_swedish_ci;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, md5(rand()));
insert into gh_ost_test values (null, 'átesting');
insert into gh_ost_test values (null, 'ádelete');
insert into gh_ost_test values (null, 'testátest');
update gh_ost_test set t='áupdated' order by id desc limit 1;
update gh_ost_test set t='áupdated1' where t='áupdated' order by id desc limit 1;
delete from gh_ost_test where t='ádelete';
end ;;

View File

@ -1,8 +1,8 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id bigint,
id bigint not null,
i int not null,
ts timestamp(6),
ts timestamp(6) not null,
unique key id_uidx(id),
unique key its_uidx(i, ts)
) ;

View File

@ -14,11 +14,13 @@ ghost_binary=""
exec_command_file=/tmp/gh-ost-test.bash
orig_content_output_file=/tmp/gh-ost-test.orig.content.csv
ghost_content_output_file=/tmp/gh-ost-test.ghost.content.csv
throttle_flag_file=/tmp/gh-ost-test.ghost.throttle.flag
master_host=
master_port=
replica_host=
replica_port=
original_sql_mode=
OPTIND=1
while getopts "b:" OPTION
@ -45,6 +47,11 @@ verify_master_and_replica() {
echo "Cannot enable event_scheduler on master"
exit 1
fi
original_sql_mode="$(gh-ost-test-mysql-master -e "select @@global.sql_mode" -s -s)"
echo "sql_mode on master is ${original_sql_mode}"
echo "Gracefully sleeping for 3 seconds while replica is setting up..."
sleep 3
if [ "$(gh-ost-test-mysql-replica -e "select 1" -ss)" != "1" ] ; then
echo "Cannot verify gh-ost-test-mysql-replica"
@ -102,6 +109,12 @@ test_single() {
echo_dot
start_replication
echo_dot
if [ -f $tests_path/$test_name/sql_mode ] ; then
gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'"
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'"
fi
gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/create.sql
extra_args=""
@ -138,10 +151,11 @@ test_single() {
--initially-drop-old-table \
--initially-drop-ghost-table \
--throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _gh_ost_test_ghc' \
--throttle-flag-file=$throttle_flag_file \
--serve-socket-file=/tmp/gh-ost.test.sock \
--initially-drop-socket-file \
--test-on-replica \
--default-retries=1 \
--default-retries=3 \
--chunk-size=10 \
--verbose \
--debug \
@ -154,6 +168,11 @@ test_single() {
execution_result=$?
if [ -f $tests_path/$test_name/sql_mode ] ; then
gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'"
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'"
fi
if [ -f $tests_path/$test_name/destroy.sql ] ; then
gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/destroy.sql
fi
@ -197,6 +216,7 @@ test_single() {
diff $orig_content_output_file $ghost_content_output_file
echo "diff $orig_content_output_file $ghost_content_output_file"
return 1
fi
}

View File

@ -0,0 +1,40 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id binary(16) NOT NULL,
info varchar(255) COLLATE utf8_unicode_ci NOT NULL,
data binary(8) NOT NULL,
primary key (id),
unique key info_uidx (info)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
replace into gh_ost_test (id, info, data) values (X'12ffffffffffffffffffffffffffff00', 'item 1a', X'12ffffffffffffff');
replace into gh_ost_test (id, info, data) values (X'34ffffffffffffffffffffffffffffff', 'item 3a', X'34ffffffffffffff');
replace into gh_ost_test (id, info, data) values (X'90ffffffffffffffffffffffffffffff', 'item 9a', X'90ffffffffffff00');
DELETE FROM gh_ost_test WHERE id = X'11ffffffffffffffffffffffffffff00';
UPDATE gh_ost_test SET info = 'item 2++' WHERE id = X'22ffffffffffffffffffffffffffff00';
UPDATE gh_ost_test SET info = 'item 3++', data = X'33ffffffffffff00' WHERE id = X'33ffffffffffffffffffffffffffffff';
DELETE FROM gh_ost_test WHERE id = X'44ffffffffffffffffffffffffffffff';
UPDATE gh_ost_test SET info = 'item 5++', data = X'55ffffffffffffee' WHERE id = X'55ffffffffffffffffffffffffffffff';
INSERT INTO gh_ost_test (id, info, data) VALUES (X'66ffffffffffffffffffffffffffff00', 'item 6', X'66ffffffffffffff');
INSERT INTO gh_ost_test (id, info, data) VALUES (X'77ffffffffffffffffffffffffffffff', 'item 7', X'77ffffffffffff00');
INSERT INTO gh_ost_test (id, info, data) VALUES (X'88ffffffffffffffffffffffffffffff', 'item 8', X'88ffffffffffffff');
end ;;
INSERT INTO gh_ost_test (id, info, data) VALUES
(X'11ffffffffffffffffffffffffffff00', 'item 1', X'11ffffffffffffff'), -- id ends in 00
(X'22ffffffffffffffffffffffffffff00', 'item 2', X'22ffffffffffffff'), -- id ends in 00
(X'33ffffffffffffffffffffffffffffff', 'item 3', X'33ffffffffffffff'),
(X'44ffffffffffffffffffffffffffffff', 'item 4', X'44ffffffffffffff'),
(X'55ffffffffffffffffffffffffffffff', 'item 5', X'55ffffffffffffff'),
(X'99ffffffffffffffffffffffffffffff', 'item 9', X'99ffffffffffff00'); -- data ends in 00

View File

@ -4,6 +4,7 @@ set -e
# Make sure we have the version of Go we want to depend on, either from the
# system or one we grab ourselves.
# If executing from within Dockerfile then this assumption is inherently true, since we use a `golang` docker image.
. script/ensure-go-installed
# Since we want to be able to build this outside of GOPATH, we set it

35
script/build-deploy-tarball Executable file
View File

@ -0,0 +1,35 @@
#!/bin/sh
set -e
script/build
# Get a fresh directory and make sure to delete it afterwards
build_dir=tmp/build
rm -rf $build_dir
mkdir -p $build_dir
trap "rm -rf $build_dir" EXIT
commit_sha=$(git rev-parse HEAD)
if [ $(uname -s) = "Darwin" ]; then
build_arch="$(uname -sr | tr -d ' ' | tr '[:upper:]' '[:lower:]')-$(uname -m)"
else
build_arch="$(lsb_release -sc | tr -d ' ' | tr '[:upper:]' '[:lower:]')-$(uname -m)"
fi
tarball=$build_dir/${commit_sha}-${build_arch}.tar
# Create the tarball
tar cvf $tarball --mode="ugo=rx" bin/
# Compress it and copy it to the directory for the CI to upload it
gzip $tarball
mkdir -p "$BUILD_ARTIFACT_DIR"/gh-ost
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR"/gh-ost/
### HACK HACK HACK ###
# blame @carlosmn and @mattr-
# Allow builds on stretch to also be used for jessie
jessie_tarball_name=$(echo $(basename "${tarball}") | sed s/-stretch-/-jessie-/)
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${jessie_tarball_name}.gz"

View File

@ -1,17 +1,3 @@
#!/bin/bash
set -e
. script/bootstrap
echo "Verifying code is formatted via 'gofmt -s -w go/'"
gofmt -s -w go/
git diff --exit-code --quiet
echo "Building"
script/build
cd .gopath/src/github.com/github/gh-ost
echo "Running unit tests"
go test ./go/...
script/test

View File

@ -1,37 +1,47 @@
#!/bin/sh
#!/bin/bash
set -e
output_fold() {
# Exit early if no label provided
if [ -z "$1" ]; then
echo "output_fold(): requires a label argument."
return
fi
script/cibuild
exit_value=0 # exit_value is used to record exit status of the given command
label=$1 # human-readable label describing what's being folded up
shift 1 # having retrieved the output_fold()-specific arguments, strip them off $@
# Get a fresh directory and make sure to delete it afterwards
build_dir=tmp/build
rm -rf $build_dir
mkdir -p $build_dir
trap "rm -rf $build_dir" EXIT
# Only echo the tags when in CI_MODE
if [ "$CI_MODE" ]; then
echo "%%%FOLD {$label}%%%"
fi
commit_sha=$(git rev-parse HEAD)
# run the remaining arguments. If the command exits non-0, the `||` will
# prevent the `-e` flag from seeing the failure exit code, and we'll see
# the second echo execute
"$@" || exit_value=$?
if [ $(uname -s) = "Darwin" ]; then
build_arch="$(uname -sr | tr -d ' ' | tr '[:upper:]' '[:lower:]')-$(uname -m)"
else
build_arch="$(lsb_release -sc | tr -d ' ' | tr '[:upper:]' '[:lower:]')-$(uname -m)"
fi
# Only echo the tags when in CI_MODE
if [ "$CI_MODE" ]; then
echo "%%%END FOLD%%%"
fi
tarball=$build_dir/${commit_sha}-${build_arch}.tar
# preserve the exit code from the subcommand.
return $exit_value
}
# Create the tarball
tar cvf $tarball --mode="ugo=rx" bin/
function cleanup() {
echo
echo "%%%FOLD {Shutting down services...}%%%"
docker-compose down
echo "%%%END FOLD%%%"
}
# Compress it and copy it to the directory for the CI to upload it
gzip $tarball
mkdir -p "$BUILD_ARTIFACT_DIR"/gh-ost
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR"/gh-ost/
trap cleanup EXIT
### HACK HACK HACK ###
# Blame @carlosmn. In the good way.
# We don't have any jessie machines for building, but a pure-Go binary depends
# on a version of libc and ld which are widely available, so we can copy the
# tarball over with jessie in its name so we can deploy it on jessie machines.
jessie_tarball_name=$(echo $(basename "${tarball}") | sed s/-precise-/-jessie-/)
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${jessie_tarball_name}.gz"
export CI_MODE=true
output_fold "Bootstrapping container..." docker-compose build
output_fold "Running tests..." docker-compose run --rm app
docker-compose run -e BUILD_ARTIFACT_DIR=$BUILD_ARTIFACT_DIR -v $BUILD_ARTIFACT_DIR:$BUILD_ARTIFACT_DIR app script/build-deploy-tarball

View File

@ -50,7 +50,8 @@ test_mysql_version() {
export PATH="${PWD}/gh-ost-ci-env/bin/:${PATH}"
gh-ost-test-mysql-master -uroot -e "grant all on *.* to 'gh-ost'@'%' identified by 'gh-ost'"
gh-ost-test-mysql-master -uroot -e "create user 'gh-ost'@'%' identified by 'gh-ost'"
gh-ost-test-mysql-master -uroot -e "grant all on *.* to 'gh-ost'@'%'"
echo "### Running gh-ost tests for $mysql_version"
./localtests/test.sh -b bin/gh-ost
@ -61,6 +62,9 @@ test_mysql_version() {
echo "Building..."
. script/build
# Test all versions:
find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.gz" | while read f ; do basename $f ".tar.gz" ; done | sort -r | while read mysql_version ; do
echo "found MySQL version: $mysql_version"
done
find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.gz" | while read f ; do basename $f ".tar.gz" ; done | sort -r | while read mysql_version ; do
test_mysql_version "$mysql_version"
done

25
script/dock Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
# Usage:
# dock <test|packages> [arg]
# dock test: build gh-ost & run unit and integration tests
# docker pkg [target-path]: build gh-ost release packages and copy to target path (default path: /tmp/gh-ost-release)
command="$1"
case "$command" in
"test")
docker_target="gh-ost-test"
docker build . -f Dockerfile.test -t "${docker_target}" && docker run --rm -it "${docker_target}:latest"
;;
"pkg")
packages_path="${2:-/tmp/gh-ost-release}"
docker_target="gh-ost-packaging"
docker build . -f Dockerfile.packaging -t "${docker_target}" && docker run --rm -it -v "${packages_path}:/tmp/pkg" "${docker_target}:latest" bash -c 'find /tmp/gh-ost-release/ -maxdepth 1 -type f | xargs cp -t /tmp/pkg'
echo "packages generated on ${packages_path}:"
ls -l "${packages_path}"
;;
*)
>&2 echo "Usage: dock dock <test|alpine|packages> [arg]"
exit 1
esac

View File

@ -1,20 +1,20 @@
#!/bin/bash
PREFERRED_GO_VERSION=go1.9.2
SUPPORTED_GO_VERSIONS='go1.[89]'
PREFERRED_GO_VERSION=go1.14.7
SUPPORTED_GO_VERSIONS='go1.1[456]'
GO_PKG_DARWIN=${PREFERRED_GO_VERSION}.darwin-amd64.pkg
GO_PKG_DARWIN_SHA=73fd5840d55f5566d8db6c0ffdd187577e8ebe650c783f68bd27cbf95bde6743
GO_PKG_DARWIN_SHA=0f215de06019a054a3da46a0722989986c956d719c7a0a8fc38a5f3c216d6f6b
GO_PKG_LINUX=${PREFERRED_GO_VERSION}.linux-amd64.tar.gz
GO_PKG_LINUX_SHA=de874549d9a8d8d8062be05808509c09a88a248e77ec14eb77453530829ac02b
GO_PKG_LINUX_SHA=4a7fa60f323ee1416a4b1425aefc37ea359e9d64df19c326a58953a97ad41ea5
export ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
cd $ROOTDIR
# If Go isn't installed globally, setup environment variables for local install.
if [ -z "$(which go)" ] || [ -z "$(go version | grep "$SUPPORTED_GO_VERSIONS")" ]; then
GODIR="$ROOTDIR/.vendor/go19"
GODIR="$ROOTDIR/.vendor/golocal"
if [ $(uname -s) = "Darwin" ]; then
export GOROOT="$GODIR/usr/local/go"
@ -32,12 +32,12 @@ if [ -z "$(which go)" ] || [ -z "$(go version | grep "$SUPPORTED_GO_VERSIONS")"
cd "$GODIR";
if [ $(uname -s) = "Darwin" ]; then
curl -L -O https://storage.googleapis.com/golang/$GO_PKG_DARWIN
curl -L -O https://dl.google.com/go/$GO_PKG_DARWIN
shasum -a256 $GO_PKG_DARWIN | grep $GO_PKG_DARWIN_SHA
xar -xf $GO_PKG_DARWIN
cpio -i < com.googlecode.go.pkg/Payload
else
curl -L -O https://storage.googleapis.com/golang/$GO_PKG_LINUX
curl -L -O https://dl.google.com/go/$GO_PKG_LINUX
shasum -a256 $GO_PKG_LINUX | grep $GO_PKG_LINUX_SHA
tar xf $GO_PKG_LINUX
fi

17
script/test Executable file
View File

@ -0,0 +1,17 @@
#!/bin/bash
set -e
. script/bootstrap
echo "Verifying code is formatted via 'gofmt -s -w go/'"
gofmt -s -w go/
git diff --exit-code --quiet
echo "Building"
script/build
cd .gopath/src/github.com/github/gh-ost
echo "Running unit tests"
go test ./go/...

View File

@ -4,28 +4,11 @@
Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
Please provide the following minimum information:
* Your Go-MySQL-Driver version (or git SHA)
* Your Go version (run `go version` in your console)
* A detailed issue description
* Error Log if present
* If possible, a short example
## Contributing Code
By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
Don't forget to add yourself to the AUTHORS file.
### Pull Requests Checklist
Please check the following points before submitting your pull request:
- [x] Code compiles correctly
- [x] Created tests, if possible
- [x] All tests pass
- [x] Extended the README / documentation, if necessary
- [x] Added yourself to the AUTHORS file
### Code Review
Everyone is invited to review and comment on pull requests.

View File

@ -0,0 +1,21 @@
### Issue description
Tell us what should happen and what happens instead
### Example code
```go
If possible, please enter some example code here to reproduce the issue.
```
### Error log
```
If you have an error log, please paste it here.
```
### Configuration
*Driver version (or git SHA):*
*Go version:* run `go version` in your console
*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10

View File

@ -0,0 +1,9 @@
### Description
Please explain the changes you made here.
### Checklist
- [ ] Code compiles correctly
- [ ] Created tests which fail without the change (if possible)
- [ ] All tests passing
- [ ] Extended the README / documentation, if necessary
- [ ] Added myself / the copyright holder to the AUTHORS file

View File

@ -6,3 +6,4 @@
Icon?
ehthumbs.db
Thumbs.db
.idea

View File

@ -1,10 +1,129 @@
sudo: false
language: go
go:
- 1.2
- 1.3
- 1.4
- tip
- 1.10.x
- 1.11.x
- 1.12.x
- 1.13.x
- master
before_install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
before_script:
- echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
- sudo service mysql restart
- .travis/wait_mysql.sh
- mysql -e 'create database gotest;'
matrix:
include:
- env: DB=MYSQL8
sudo: required
dist: trusty
go: 1.10.x
services:
- docker
before_install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- docker pull mysql:8.0
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- cp .travis/docker.cnf ~/.my.cnf
- .travis/wait_mysql.sh
before_script:
- export MYSQL_TEST_USER=gotest
- export MYSQL_TEST_PASS=secret
- export MYSQL_TEST_ADDR=127.0.0.1:3307
- export MYSQL_TEST_CONCURRENT=1
- env: DB=MYSQL57
sudo: required
dist: trusty
go: 1.10.x
services:
- docker
before_install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- docker pull mysql:5.7
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- cp .travis/docker.cnf ~/.my.cnf
- .travis/wait_mysql.sh
before_script:
- export MYSQL_TEST_USER=gotest
- export MYSQL_TEST_PASS=secret
- export MYSQL_TEST_ADDR=127.0.0.1:3307
- export MYSQL_TEST_CONCURRENT=1
- env: DB=MARIA55
sudo: required
dist: trusty
go: 1.10.x
services:
- docker
before_install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- docker pull mariadb:5.5
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- cp .travis/docker.cnf ~/.my.cnf
- .travis/wait_mysql.sh
before_script:
- export MYSQL_TEST_USER=gotest
- export MYSQL_TEST_PASS=secret
- export MYSQL_TEST_ADDR=127.0.0.1:3307
- export MYSQL_TEST_CONCURRENT=1
- env: DB=MARIA10_1
sudo: required
dist: trusty
go: 1.10.x
services:
- docker
before_install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- docker pull mariadb:10.1
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
- cp .travis/docker.cnf ~/.my.cnf
- .travis/wait_mysql.sh
before_script:
- export MYSQL_TEST_USER=gotest
- export MYSQL_TEST_PASS=secret
- export MYSQL_TEST_ADDR=127.0.0.1:3307
- export MYSQL_TEST_CONCURRENT=1
- os: osx
osx_image: xcode10.1
addons:
homebrew:
packages:
- mysql
update: true
go: 1.12.x
before_install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
before_script:
- echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB\nlocal_infile=1" >> /usr/local/etc/my.cnf
- mysql.server start
- mysql -uroot -e 'CREATE USER gotest IDENTIFIED BY "secret"'
- mysql -uroot -e 'GRANT ALL ON *.* TO gotest'
- mysql -uroot -e 'create database gotest;'
- export MYSQL_TEST_USER=gotest
- export MYSQL_TEST_PASS=secret
- export MYSQL_TEST_ADDR=127.0.0.1:3306
- export MYSQL_TEST_CONCURRENT=1
script:
- go test -v -covermode=count -coverprofile=coverage.out
- go vet ./...
- .travis/gofmt.sh
after_script:
- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci

View File

@ -0,0 +1,5 @@
[client]
user = gotest
password = secret
host = 127.0.0.1
port = 3307

7
vendor/github.com/go-sql-driver/mysql/.travis/gofmt.sh generated vendored Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
set -ev
# Only check for go1.10+ since the gofmt style changed
if [[ $(go version) =~ go1\.([0-9]+) ]] && ((${BASH_REMATCH[1]} >= 10)); then
test -z "$(gofmt -d -s . | tee /dev/stderr)"
fi

View File

@ -0,0 +1,8 @@
#!/bin/sh
while :
do
if mysql -e 'select version()' 2>&1 | grep 'version()\|ERROR 2059 (HY000):'; then
break
fi
sleep 3
done

View File

@ -12,35 +12,95 @@
# Individual Persons
Aaron Hopkins <go-sql-driver at die.net>
Achille Roussel <achille.roussel at gmail.com>
Alex Snast <alexsn at fb.com>
Alexey Palazhchenko <alexey.palazhchenko at gmail.com>
Andrew Reid <andrew.reid at tixtrack.com>
Arne Hormann <arnehormann at gmail.com>
Asta Xie <xiemengjun at gmail.com>
Bulat Gaifullin <gaifullinbf at gmail.com>
Carlos Nieto <jose.carlos at menteslibres.net>
Chris Moos <chris at tech9computers.com>
Craig Wilson <craiggwilson at gmail.com>
Daniel Montoya <dsmontoyam at gmail.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
Dave Protasowski <dprotaso at gmail.com>
DisposaBoy <disposaboy at dby.me>
Egor Smolyakov <egorsmkv at gmail.com>
Erwan Martin <hello at erwan.io>
Evan Shaw <evan at vendhq.com>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
Hajime Nakagami <nakagami at gmail.com>
Hanno Braun <mail at hannobraun.com>
Henri Yandell <flamefew at gmail.com>
Hirotaka Yamamoto <ymmt2005 at gmail.com>
Huyiguang <hyg at webterren.com>
ICHINOSE Shogo <shogo82148 at gmail.com>
Ilia Cimpoes <ichimpoesh at gmail.com>
INADA Naoki <songofacandy at gmail.com>
Jacek Szwec <szwec.jacek at gmail.com>
James Harr <james.harr at gmail.com>
Jeff Hodges <jeff at somethingsimilar.com>
Jeffrey Charles <jeffreycharles at gmail.com>
Jerome Meyer <jxmeyer at gmail.com>
Jiajia Zhong <zhong2plus at gmail.com>
Jian Zhen <zhenjl at gmail.com>
Joshua Prunier <joshua.prunier at gmail.com>
Julien Lefevre <julien.lefevr at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
Justin Li <jli at j-li.net>
Justin Nuß <nuss.justin at gmail.com>
Kamil Dziedzic <kamil at klecza.pl>
Kevin Malachowski <kevin at chowski.com>
Kieron Woodhouse <kieron.woodhouse at infosum.com>
Lennart Rudolph <lrudolph at hmc.edu>
Leonardo YongUk Kim <dalinaum at gmail.com>
Linh Tran Tuan <linhduonggnu at gmail.com>
Lion Yang <lion at aosc.xyz>
Luca Looz <luca.looz92 at gmail.com>
Lucas Liu <extrafliu at gmail.com>
Luke Scott <luke at webconnex.com>
Maciej Zimnoch <maciej.zimnoch at codilime.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nathanial Murphy <nathanial.murphy at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
Olivier Mengué <dolmen at cpan.org>
oscarzhao <oscarzhaosl at gmail.com>
Paul Bonser <misterpib at gmail.com>
Peter Schultz <peter.schultz at classmarkets.com>
Rebecca Chin <rchin at pivotal.io>
Reed Allman <rdallman10 at gmail.com>
Richard Wilkes <wilkes at me.com>
Robert Russell <robert at rrbrussell.com>
Runrioter Wung <runrioter at gmail.com>
Shuode Li <elemount at qq.com>
Simon J Mudd <sjmudd at pobox.com>
Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com>
Stanley Gunawan <gunawan.stanley at gmail.com>
Steven Hartland <steven.hartland at multiplay.co.uk>
Thomas Wodarek <wodarekwebpage at gmail.com>
Tim Ruffles <timruffles at gmail.com>
Tom Jenkinson <tom at tjenkinson.me>
Vladimir Kovpak <cn007b at gmail.com>
Xiangyu Hu <xiangyu.hu at outlook.com>
Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc>
Julien Lefevre <julien.lefevr at gmail.com>
Zhenye Xie <xiezhenye at gmail.com>
# Organizations
Barracuda Networks, Inc.
Counting Ltd.
DigitalOcean Inc.
Facebook Inc.
GitHub Inc.
Google Inc.
InfoSum Ltd.
Keybase Inc.
Multiplay Ltd.
Percona LLC
Pivotal Inc.
Stripe Inc.

View File

@ -1,21 +1,135 @@
## HEAD
## Version 1.5 (2020-01-07)
Changes:
- Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017)
- Improve buffer handling (#890)
- Document potentially insecure TLS configs (#901)
- Use a double-buffering scheme to prevent data races (#943)
- Pass uint64 values without converting them to string (#838, #955)
- Update collations and make utf8mb4 default (#877, #1054)
- Make NullTime compatible with sql.NullTime in Go 1.13+ (#995)
- Removed CloudSQL support (#993, #1007)
- Add Go Module support (#1003)
New Features:
- Implement support of optional TLS (#900)
- Check connection liveness (#934, #964, #997, #1048, #1051, #1052)
- Implement Connector Interface (#941, #958, #1020, #1035)
Bugfixes:
- Mark connections as bad on error during ping (#875)
- Mark connections as bad on error during dial (#867)
- Fix connection leak caused by rapid context cancellation (#1024)
- Mark connections as bad on error during Conn.Prepare (#1030)
## Version 1.4.1 (2018-11-14)
Bugfixes:
- Fix TIME format for binary columns (#818)
- Fix handling of empty auth plugin names (#835)
- Fix caching_sha2_password with empty password (#826)
- Fix canceled context broke mysqlConn (#862)
- Fix OldAuthSwitchRequest support (#870)
- Fix Auth Response packet for cleartext password (#887)
## Version 1.4 (2018-06-03)
Changes:
- Documentation fixes (#530, #535, #567)
- Refactoring (#575, #579, #580, #581, #603, #615, #704)
- Cache column names (#444)
- Sort the DSN parameters in DSNs generated from a config (#637)
- Allow native password authentication by default (#644)
- Use the default port if it is missing in the DSN (#668)
- Removed the `strict` mode (#676)
- Do not query `max_allowed_packet` by default (#680)
- Dropped support Go 1.6 and lower (#696)
- Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
- Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
- Improved the compatibility of the authentication system (#807)
New Features:
- Multi-Results support (#537)
- `rejectReadOnly` DSN option (#604)
- `context.Context` support (#608, #612, #627, #761)
- Transaction isolation level support (#619, #744)
- Read-Only transactions support (#618, #634)
- `NewConfig` function which initializes a config with default values (#679)
- Implemented the `ColumnType` interfaces (#667, #724)
- Support for custom string types in `ConvertValue` (#623)
- Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
- `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
- Implemented `driver.SessionResetter` (#779)
- `sha256_password` authentication plugin support (#808)
Bugfixes:
- Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
- Fixed LOAD LOCAL DATA INFILE for empty files (#590)
- Removed columns definition cache since it sometimes cached invalid data (#592)
- Don't mutate registered TLS configs (#600)
- Make RegisterTLSConfig concurrency-safe (#613)
- Handle missing auth data in the handshake packet correctly (#646)
- Do not retry queries when data was written to avoid data corruption (#302, #736)
- Cache the connection pointer for error handling before invalidating it (#678)
- Fixed imports for appengine/cloudsql (#700)
- Fix sending STMT_LONG_DATA for 0 byte data (#734)
- Set correct capacity for []bytes read from length-encoded strings (#766)
- Make RegisterDial concurrency-safe (#773)
## Version 1.3 (2016-12-01)
Changes:
- Go 1.1 is no longer supported
- Use decimals field from MySQL to format time types (#249)
- Use decimals fields in MySQL to format time types (#249)
- Buffer optimizations (#269)
- TLS ServerName defaults to the host (#283)
- Refactoring (#400, #410, #437)
- Adjusted documentation for second generation CloudSQL (#485)
- Documented DSN system var quoting rules (#502)
- Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
New Features:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Support for returning table alias on Columns() (#289, #359, #382)
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
- Exported ParseDSN function and the Config struct (#403, #419, #429)
- Read / Write timeouts (#401)
- Support for JSON field type (#414)
- Support for multi-statements and multi-results (#411, #431)
- DSN parameter to set the driver-side max_allowed_packet value manually (#489)
- Native password authentication plugin support (#494, #524)
Bugfixes:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Fixed handling of queries without columns and rows (#255)
- Fixed a panic when SetKeepAlive() failed (#298)
New Features:
- Support for returning table alias on Columns() (#289)
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318)
- Handle ERR packets while reading rows (#321)
- Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
- Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
- Actually zero out bytes in handshake response (#378)
- Fixed race condition in registering LOAD DATA INFILE handler (#383)
- Fixed tests with MySQL 5.7.9+ (#380)
- QueryUnescape TLS config names (#397)
- Fixed "broken pipe" error by writing to closed socket (#390)
- Fixed LOAD LOCAL DATA INFILE buffering (#424)
- Fixed parsing of floats into float64 when placeholders are used (#434)
- Fixed DSN tests with Go 1.7+ (#459)
- Handle ERR packets while waiting for EOF (#473)
- Invalidate connection on error while discarding additional results (#513)
- Allow terminating packets of length 0 (#516)
## Version 1.2 (2014-06-03)

View File

@ -1,13 +1,9 @@
# Go-MySQL-Driver
A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases)
[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql)
---------------------------------------
* [Features](#features)
* [Requirements](#requirements)
@ -19,6 +15,9 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
* [Address](#address)
* [Parameters](#parameters)
* [Examples](#examples)
* [Connection pool and timeouts](#connection-pool-and-timeouts)
* [context.Context Support](#contextcontext-support)
* [ColumnType Support](#columntype-support)
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
* [time.Time support](#timetime-support)
* [Unicode support](#unicode-support)
@ -30,31 +29,31 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
## Features
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
* Native Go implementation. No C-bindings, just pure Go
* Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
* Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
* Automatic handling of broken connections
* Automatic Connection Pooling *(by database/sql package)*
* Supports queries larger than 16MB
* Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
* Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
* Intelligent `LONG DATA` handling in prepared statements
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
## Requirements
* Go 1.2 or higher
* Go 1.10 or higher. We aim to support the 3 latest versions of Go.
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
---------------------------------------
## Installation
Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
$ go get github.com/go-sql-driver/mysql
$ go get -u github.com/go-sql-driver/mysql
```
Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
## Usage
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
```go
@ -93,17 +92,20 @@ This has the same effect as an empty DSN string:
```
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
#### Password
Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
#### Address
For TCP and UDP networks, addresses have the form `host:port`.
For TCP and UDP networks, addresses have the form `host[:port]`.
If `port` is omitted, the default port will be used.
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
@ -133,6 +135,15 @@ Default: false
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
##### `allowNativePasswords`
```
Type: bool
Valid Values: true, false
Default: true
```
`allowNativePasswords=false` disallows the usage of MySQL native password method.
##### `allowOldPasswords`
```
@ -155,18 +166,34 @@ Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If
Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
Unless you need the fallback behavior, please use `collation` instead.
##### `checkConnLiveness`
```
Type: bool
Valid Values: true, false
Default: true
```
On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection.
`checkConnLiveness=false` disables this liveness check of connections.
##### `collation`
```
Type: string
Valid Values: <name>
Default: utf8_general_ci
Default: utf8mb4_general_ci
```
Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
##### `clientFoundRows`
```
@ -213,12 +240,31 @@ Valid Values: <escaped name>
Default: UTC
```
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
##### `maxAllowedPacket`
```
Type: decimal number
Default: 4194304
```
Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
##### `multiStatements`
```
Type: bool
Valid Values: true, false
Default: false
```
Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
When `multiStatements` is used, `?` parameters must only be used in the first statement.
##### `parseTime`
@ -229,9 +275,19 @@ Default: false
```
`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
##### `strict`
##### `readTimeout`
```
Type: duration
Default: 0
```
I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### `rejectReadOnly`
```
Type: bool
@ -239,41 +295,89 @@ Valid Values: true, false
Default: false
```
`strict=true` enables the strict mode in which MySQL warnings are treated as errors.
By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example.
`rejectReadOnly=true` causes the driver to reject read-only connections. This
is for a possible race condition during an automatic failover, where the mysql
client gets connected to a read-only replica after the failover.
Note that this should be a fairly rare case, as an automatic failover normally
happens when the primary is down, and the race condition shouldn't happen
unless it comes back up online as soon as the failover is kicked off. On the
other hand, when this happens, a MySQL application can get stuck on a
read-only connection until restarted. It is however fairly easy to reproduce,
for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
If you are not relying on read-only transactions to reject writes that aren't
supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
is safer for failovers.
Note that ERROR 1290 can be returned for a `read-only` server and this option will
cause a retry for that error. However the same error number is used for some
other cases. You should ensure your application will never cause an ERROR 1290
except for `read-only` mode when enabling this option.
##### `serverPubKey`
```
Type: string
Valid Values: <name>
Default: none
```
Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
Public keys are used to transmit encrypted data, e.g. for authentication.
If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
##### `timeout`
```
Type: decimal number
Type: duration
Default: OS default
```
*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### `tls`
```
Type: bool / string
Valid Values: true, false, skip-verify, <name>
Valid Values: true, false, skip-verify, preferred, <name>
Default: false
```
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
##### `writeTimeout`
```
Type: duration
Default: 0
```
I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### System Variables
All other parameters are interpreted as system variables:
* `autocommit`: `"SET autocommit=<value>"`
* [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone=<value>"`
* [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation=<value>"`
* `param`: `"SET <param>=<value>"`
Any other parameters are interpreted as system variables:
* `<boolean_var>=<value>`: `SET <boolean_var>=<value>`
* `<enum_var>=<value>`: `SET <enum_var>=<value>`
* `<string_var>=%27<value>%27`: `SET <string_var>='<value>'`
Rules:
* The values for string variables must be quoted with `'`.
* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
(which implies values of string variables must be wrapped with `%27`).
Examples:
* `autocommit=1`: `SET autocommit=1`
* [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
* [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!*
#### Examples
```
@ -288,9 +392,9 @@ root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
```
Use the [strict mode](#strict) but ignore notes:
Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
```
user:password@/dbname?strict=true&sql_notes=false
user:password@/dbname?sql_mode=TRADITIONAL
```
TCP via IPv6:
@ -305,7 +409,7 @@ id:password@tcp(your-amazonaws-uri.com:3306)/dbname
Google Cloud SQL on App Engine:
```
user@cloudsql(project-id:instance-name)/dbname
user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname
```
TCP using default port (3306) on localhost:
@ -323,6 +427,18 @@ No Database preselected:
user:password@/
```
### Connection pool and timeouts
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
## `ColumnType` Support
This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
## `context.Context` Support
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
```go
@ -333,28 +449,27 @@ Files must be whitelisted by registering them with `mysql.RegisterLocalFile(file
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
### `time.Time` support
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
### Unicode support
Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
## Testing / Development
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
@ -374,13 +489,13 @@ Mozilla summarizes the license scope as follows:
That means:
* You can **use** the **unchanged** source code both in private and commercially
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
* You can **use** the **unchanged** source code both in private and commercially.
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")

422
vendor/github.com/go-sql-driver/mysql/auth.go generated vendored Normal file
View File

@ -0,0 +1,422 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"sync"
)
// server pub keys registry
var (
serverPubKeyLock sync.RWMutex
serverPubKeyRegistry map[string]*rsa.PublicKey
)
// RegisterServerPubKey registers a server RSA public key which can be used to
// send data in a secure manner to the server without receiving the public key
// in a potentially insecure way from the server first.
// Registered keys can afterwards be used adding serverPubKey=<name> to the DSN.
//
// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
// after registering it and may not be modified.
//
// data, err := ioutil.ReadFile("mykey.pem")
// if err != nil {
// log.Fatal(err)
// }
//
// block, _ := pem.Decode(data)
// if block == nil || block.Type != "PUBLIC KEY" {
// log.Fatal("failed to decode PEM block containing public key")
// }
//
// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
// if err != nil {
// log.Fatal(err)
// }
//
// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
// mysql.RegisterServerPubKey("mykey", rsaPubKey)
// } else {
// log.Fatal("not a RSA public key")
// }
//
func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
serverPubKeyLock.Lock()
if serverPubKeyRegistry == nil {
serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
}
serverPubKeyRegistry[name] = pubKey
serverPubKeyLock.Unlock()
}
// DeregisterServerPubKey removes the public key registered with the given name.
func DeregisterServerPubKey(name string) {
serverPubKeyLock.Lock()
if serverPubKeyRegistry != nil {
delete(serverPubKeyRegistry, name)
}
serverPubKeyLock.Unlock()
}
func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
serverPubKeyLock.RLock()
if v, ok := serverPubKeyRegistry[name]; ok {
pubKey = v
}
serverPubKeyLock.RUnlock()
return
}
// Hash password using pre 4.1 (old password) method
// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
type myRnd struct {
seed1, seed2 uint32
}
const myRndMaxVal = 0x3FFFFFFF
// Pseudo random number generator
func newMyRnd(seed1, seed2 uint32) *myRnd {
return &myRnd{
seed1: seed1 % myRndMaxVal,
seed2: seed2 % myRndMaxVal,
}
}
// Tested to be equivalent to MariaDB's floating point variant
// http://play.golang.org/p/QHvhd4qved
// http://play.golang.org/p/RG0q4ElWDx
func (r *myRnd) NextByte() byte {
r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
return byte(uint64(r.seed1) * 31 / myRndMaxVal)
}
// Generate binary hash from byte string using insecure pre 4.1 method
func pwHash(password []byte) (result [2]uint32) {
var add uint32 = 7
var tmp uint32
result[0] = 1345345333
result[1] = 0x12345671
for _, c := range password {
// skip spaces and tabs in password
if c == ' ' || c == '\t' {
continue
}
tmp = uint32(c)
result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
result[1] += (result[1] << 8) ^ result[0]
add += tmp
}
// Remove sign bit (1<<31)-1)
result[0] &= 0x7FFFFFFF
result[1] &= 0x7FFFFFFF
return
}
// Hash password using insecure pre 4.1 method
func scrambleOldPassword(scramble []byte, password string) []byte {
if len(password) == 0 {
return nil
}
scramble = scramble[:8]
hashPw := pwHash([]byte(password))
hashSc := pwHash(scramble)
r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
var out [8]byte
for i := range out {
out[i] = r.NextByte() + 64
}
mask := r.NextByte()
for i := range out {
out[i] ^= mask
}
return out[:]
}
// Hash password using 4.1+ method (SHA1)
func scramblePassword(scramble []byte, password string) []byte {
if len(password) == 0 {
return nil
}
// stage1Hash = SHA1(password)
crypt := sha1.New()
crypt.Write([]byte(password))
stage1 := crypt.Sum(nil)
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
// inner Hash
crypt.Reset()
crypt.Write(stage1)
hash := crypt.Sum(nil)
// outer Hash
crypt.Reset()
crypt.Write(scramble)
crypt.Write(hash)
scramble = crypt.Sum(nil)
// token = scrambleHash XOR stage1Hash
for i := range scramble {
scramble[i] ^= stage1[i]
}
return scramble
}
// Hash password using MySQL 8+ method (SHA256)
func scrambleSHA256Password(scramble []byte, password string) []byte {
if len(password) == 0 {
return nil
}
// XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
crypt := sha256.New()
crypt.Write([]byte(password))
message1 := crypt.Sum(nil)
crypt.Reset()
crypt.Write(message1)
message1Hash := crypt.Sum(nil)
crypt.Reset()
crypt.Write(message1Hash)
crypt.Write(scramble)
message2 := crypt.Sum(nil)
for i := range message1 {
message1[i] ^= message2[i]
}
return message1
}
func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
plain := make([]byte, len(password)+1)
copy(plain, password)
for i := range plain {
j := i % len(seed)
plain[i] ^= seed[j]
}
sha1 := sha1.New()
return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
}
func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
if err != nil {
return err
}
return mc.writeAuthSwitchPacket(enc)
}
func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
switch plugin {
case "caching_sha2_password":
authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
return authResp, nil
case "mysql_old_password":
if !mc.cfg.AllowOldPasswords {
return nil, ErrOldPassword
}
// Note: there are edge cases where this should work but doesn't;
// this is currently "wontfix":
// https://github.com/go-sql-driver/mysql/issues/184
authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
return authResp, nil
case "mysql_clear_password":
if !mc.cfg.AllowCleartextPasswords {
return nil, ErrCleartextPassword
}
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
// http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
return append([]byte(mc.cfg.Passwd), 0), nil
case "mysql_native_password":
if !mc.cfg.AllowNativePasswords {
return nil, ErrNativePassword
}
// https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
// Native password authentication only need and will need 20-byte challenge.
authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
return authResp, nil
case "sha256_password":
if len(mc.cfg.Passwd) == 0 {
return []byte{0}, nil
}
if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
// write cleartext auth packet
return append([]byte(mc.cfg.Passwd), 0), nil
}
pubKey := mc.cfg.pubKey
if pubKey == nil {
// request public key from server
return []byte{1}, nil
}
// encrypted password
enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
return enc, err
default:
errLog.Print("unknown auth plugin:", plugin)
return nil, ErrUnknownPlugin
}
}
func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
// Read Result Packet
authData, newPlugin, err := mc.readAuthResult()
if err != nil {
return err
}
// handle auth plugin switch, if requested
if newPlugin != "" {
// If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
// sent and we have to keep using the cipher sent in the init packet.
if authData == nil {
authData = oldAuthData
} else {
// copy data from read buffer to owned slice
copy(oldAuthData, authData)
}
plugin = newPlugin
authResp, err := mc.auth(authData, plugin)
if err != nil {
return err
}
if err = mc.writeAuthSwitchPacket(authResp); err != nil {
return err
}
// Read Result Packet
authData, newPlugin, err = mc.readAuthResult()
if err != nil {
return err
}
// Do not allow to change the auth plugin more than once
if newPlugin != "" {
return ErrMalformPkt
}
}
switch plugin {
// https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
case "caching_sha2_password":
switch len(authData) {
case 0:
return nil // auth successful
case 1:
switch authData[0] {
case cachingSha2PasswordFastAuthSuccess:
if err = mc.readResultOK(); err == nil {
return nil // auth successful
}
case cachingSha2PasswordPerformFullAuthentication:
if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
// write cleartext auth packet
err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
if err != nil {
return err
}
} else {
pubKey := mc.cfg.pubKey
if pubKey == nil {
// request public key from server
data, err := mc.buf.takeSmallBuffer(4 + 1)
if err != nil {
return err
}
data[4] = cachingSha2PasswordRequestPublicKey
mc.writePacket(data)
// parse public key
if data, err = mc.readPacket(); err != nil {
return err
}
block, _ := pem.Decode(data[1:])
pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return err
}
pubKey = pkix.(*rsa.PublicKey)
}
// send encrypted password
err = mc.sendEncryptedPassword(oldAuthData, pubKey)
if err != nil {
return err
}
}
return mc.readResultOK()
default:
return ErrMalformPkt
}
default:
return ErrMalformPkt
}
case "sha256_password":
switch len(authData) {
case 0:
return nil // auth successful
default:
block, _ := pem.Decode(authData)
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return err
}
// send encrypted password
err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
if err != nil {
return err
}
return mc.readResultOK()
}
default:
return nil // auth successful
}
return err
}

1330
vendor/github.com/go-sql-driver/mysql/auth_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More