Merge branch 'master' into test-latin1text
This commit is contained in:
commit
3cc41e24a8
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -11,7 +11,7 @@ Related issue: https://github.com/github/gh-ost/issues/0123456789
|
|||||||
|
|
||||||
### Description
|
### Description
|
||||||
|
|
||||||
This PR [briefly explain what is does]
|
This PR [briefly explain what it does]
|
||||||
|
|
||||||
> In case this PR introduced Go code changes:
|
> In case this PR introduced Go code changes:
|
||||||
|
|
||||||
|
20
.github/workflows/ci.yml
vendored
Normal file
20
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@master
|
||||||
|
|
||||||
|
- name: Set up Go 1.12
|
||||||
|
uses: actions/setup-go@v1
|
||||||
|
with:
|
||||||
|
version: 1.12
|
||||||
|
id: go
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: script/cibuild
|
20
.github/workflows/replica-tests.yml
vendored
Normal file
20
.github/workflows/replica-tests.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
name: migration tests
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@master
|
||||||
|
|
||||||
|
- name: Set up Go 1.12
|
||||||
|
uses: actions/setup-go@v1
|
||||||
|
with:
|
||||||
|
version: 1.12
|
||||||
|
id: go
|
||||||
|
|
||||||
|
- name: migration tests
|
||||||
|
run: script/cibuild-gh-ost-replica-tests
|
20
.travis.yml
20
.travis.yml
@ -1,20 +0,0 @@
|
|||||||
# http://docs.travis-ci.com/user/languages/go/
|
|
||||||
language: go
|
|
||||||
|
|
||||||
go: 1.8
|
|
||||||
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
|
|
||||||
env:
|
|
||||||
- MYSQL_USER=root
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- mysql -e 'CREATE DATABASE IF NOT EXISTS test;'
|
|
||||||
|
|
||||||
install: true
|
|
||||||
|
|
||||||
script: script/cibuild
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
email: false
|
|
22
Dockerfile.packaging
Normal file
22
Dockerfile.packaging
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
#
|
||||||
|
|
||||||
|
FROM golang:1.12.6
|
||||||
|
|
||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get install -y ruby ruby-dev rubygems build-essential
|
||||||
|
RUN gem install --no-ri --no-rdoc fpm
|
||||||
|
ENV GOPATH=/tmp/go
|
||||||
|
|
||||||
|
RUN apt-get install -y curl
|
||||||
|
RUN apt-get install -y rsync
|
||||||
|
RUN apt-get install -y gcc
|
||||||
|
RUN apt-get install -y g++
|
||||||
|
RUN apt-get install -y bash
|
||||||
|
RUN apt-get install -y git
|
||||||
|
RUN apt-get install -y tar
|
||||||
|
RUN apt-get install -y rpm
|
||||||
|
|
||||||
|
RUN mkdir -p $GOPATH/src/github.com/github/gh-ost
|
||||||
|
WORKDIR $GOPATH/src/github.com/github/gh-ost
|
||||||
|
COPY . .
|
||||||
|
RUN bash build.sh
|
11
Dockerfile.test
Normal file
11
Dockerfile.test
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
FROM golang:1.12.1
|
||||||
|
LABEL maintainer="github@github.com"
|
||||||
|
|
||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get install -y lsb-release
|
||||||
|
RUN rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
COPY . /go/src/github.com/github/gh-ost
|
||||||
|
WORKDIR /go/src/github.com/github/gh-ost
|
||||||
|
|
||||||
|
CMD ["script/test"]
|
@ -94,7 +94,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
|
|||||||
|
|
||||||
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
|
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
|
||||||
|
|
||||||
`gh-ost` is a Go project; it is built with Go `1.8` (though `1.7` should work as well). To build on your own, use either:
|
`gh-ost` is a Go project; it is built with Go `1.12` and above. To build on your own, use either:
|
||||||
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
|
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
|
||||||
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
|
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
|
||||||
|
|
||||||
@ -107,3 +107,5 @@ Generally speaking, `master` branch is stable, but only [releases](https://githu
|
|||||||
- [@ggunson](https://github.com/ggunson)
|
- [@ggunson](https://github.com/ggunson)
|
||||||
- [@tomkrouper](https://github.com/tomkrouper)
|
- [@tomkrouper](https://github.com/tomkrouper)
|
||||||
- [@shlomi-noach](https://github.com/shlomi-noach)
|
- [@shlomi-noach](https://github.com/shlomi-noach)
|
||||||
|
- [@jessbreckenridge](https://github.com/jessbreckenridge)
|
||||||
|
- [@gtowey](https://github.com/gtowey)
|
||||||
|
@ -1 +1 @@
|
|||||||
1.0.42
|
1.0.49
|
||||||
|
90
build.sh
90
build.sh
@ -2,36 +2,70 @@
|
|||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
|
||||||
RELEASE_VERSION=$(cat RELEASE_VERSION)
|
RELEASE_VERSION=
|
||||||
|
buildpath=
|
||||||
|
|
||||||
function build {
|
function setuptree() {
|
||||||
osname=$1
|
b=$( mktemp -d $buildpath/gh-ostXXXXXX ) || return 1
|
||||||
osshort=$2
|
mkdir -p $b/gh-ost
|
||||||
GOOS=$3
|
mkdir -p $b/gh-ost/usr/bin
|
||||||
GOARCH=$4
|
echo $b
|
||||||
|
|
||||||
echo "Building ${osname} binary"
|
|
||||||
export GOOS
|
|
||||||
export GOARCH
|
|
||||||
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
|
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Build failed for ${osname}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
buildpath=/tmp/gh-ost
|
function build {
|
||||||
target=gh-ost
|
osname=$1
|
||||||
timestamp=$(date "+%Y%m%d%H%M%S")
|
osshort=$2
|
||||||
ldflags="-X main.AppVersion=${RELEASE_VERSION}"
|
GOOS=$3
|
||||||
export GO15VENDOREXPERIMENT=1
|
GOARCH=$4
|
||||||
|
|
||||||
mkdir -p ${buildpath}
|
if ! go version | egrep -q 'go(1\.1[234])' ; then
|
||||||
build macOS osx darwin amd64
|
echo "go version must be 1.12 or above"
|
||||||
build GNU/Linux linux linux amd64
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Binaries found in:"
|
echo "Building ${osname} binary"
|
||||||
ls -1 $buildpath/gh-ost-binary*${timestamp}.tar.gz
|
export GOOS
|
||||||
|
export GOARCH
|
||||||
|
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
|
||||||
|
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Build failed for ${osname}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
|
||||||
|
|
||||||
|
if [ "$GOOS" == "linux" ] ; then
|
||||||
|
echo "Creating Distro full packages"
|
||||||
|
builddir=$(setuptree)
|
||||||
|
cp $buildpath/$target $builddir/gh-ost/usr/bin
|
||||||
|
cd $buildpath
|
||||||
|
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
|
||||||
|
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
if [ -z "${RELEASE_VERSION}" ] ; then
|
||||||
|
RELEASE_VERSION=$(git describe --abbrev=0 --tags | tr -d 'v')
|
||||||
|
fi
|
||||||
|
if [ -z "${RELEASE_VERSION}" ] ; then
|
||||||
|
RELEASE_VERSION=$(cat RELEASE_VERSION)
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
buildpath=/tmp/gh-ost-release
|
||||||
|
target=gh-ost
|
||||||
|
timestamp=$(date "+%Y%m%d%H%M%S")
|
||||||
|
ldflags="-X main.AppVersion=${RELEASE_VERSION}"
|
||||||
|
|
||||||
|
mkdir -p ${buildpath}
|
||||||
|
rm -rf ${buildpath:?}/*
|
||||||
|
build GNU/Linux linux linux amd64
|
||||||
|
# build macOS osx darwin amd64
|
||||||
|
|
||||||
|
echo "Binaries found in:"
|
||||||
|
find $buildpath/gh-ost* -type f -maxdepth 1
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
|
@ -2,6 +2,14 @@
|
|||||||
|
|
||||||
A more in-depth discussion of various `gh-ost` command line flags: implementation, implication, use cases.
|
A more in-depth discussion of various `gh-ost` command line flags: implementation, implication, use cases.
|
||||||
|
|
||||||
|
### aliyun-rds
|
||||||
|
|
||||||
|
Add this flag when executing on Aliyun RDS.
|
||||||
|
|
||||||
|
### allow-master-master
|
||||||
|
|
||||||
|
See [`--assume-master-host`](#assume-master-host).
|
||||||
|
|
||||||
### allow-on-master
|
### allow-on-master
|
||||||
|
|
||||||
By default, `gh-ost` would like you to connect to a replica, from where it figures out the master by itself. This wiring is required should your master execute using `binlog_format=STATEMENT`.
|
By default, `gh-ost` would like you to connect to a replica, from where it figures out the master by itself. This wiring is required should your master execute using `binlog_format=STATEMENT`.
|
||||||
@ -10,24 +18,24 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c
|
|||||||
|
|
||||||
### approve-renamed-columns
|
### approve-renamed-columns
|
||||||
|
|
||||||
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try an associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
|
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
|
||||||
|
|
||||||
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
|
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
|
||||||
|
|
||||||
If you think `gh-ost` is mistaken and that there's actually no _rename_ involved, you may pass `--skip-renamed-columns` instead. This will cause `gh-ost` to disassociate the column values; data will not be copied between those columns.
|
If you think `gh-ost` is mistaken and that there's actually no _rename_ involved, you may pass [`--skip-renamed-columns`](#skip-renamed-columns) instead. This will cause `gh-ost` to disassociate the column values; data will not be copied between those columns.
|
||||||
|
|
||||||
### assume-master-host
|
### assume-master-host
|
||||||
|
|
||||||
`gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in:
|
`gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in:
|
||||||
|
|
||||||
- master-master topologies (together with `--allow-master-master`), where `gh-ost` can arbitrarily pick one of the co-master and you prefer that it picks a specific one
|
- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters and you prefer that it picks a specific one
|
||||||
- _tungsten replicator_ topologies (together with `--tungsten`), where `gh-ost` is unable to crawl and detect the master
|
- _tungsten replicator_ topologies (together with [`--tungsten`](#tungsten)), where `gh-ost` is unable to crawl and detect the master
|
||||||
|
|
||||||
### assume-rbr
|
### assume-rbr
|
||||||
|
|
||||||
If you happen to _know_ your servers use RBR (Row Based Replication, i.e. `binlog_format=ROW`), you may specify `--assume-rbr`. This skips a verification step where `gh-ost` would issue a `STOP SLAVE; START SLAVE`.
|
If you happen to _know_ your servers use RBR (Row Based Replication, i.e. `binlog_format=ROW`), you may specify `--assume-rbr`. This skips a verification step where `gh-ost` would issue a `STOP SLAVE; START SLAVE`.
|
||||||
Skipping this step means `gh-ost` would not need the `SUPER` privilege in order to operate.
|
Skipping this step means `gh-ost` would not need the `SUPER` privilege in order to operate.
|
||||||
You may want to use this on Amazon RDS
|
You may want to use this on Amazon RDS.
|
||||||
|
|
||||||
### conf
|
### conf
|
||||||
|
|
||||||
@ -41,21 +49,29 @@ password=123456
|
|||||||
|
|
||||||
### concurrent-rowcount
|
### concurrent-rowcount
|
||||||
|
|
||||||
See `exact-rowcount`
|
Defaults to `true`. See [`exact-rowcount`](#exact-rowcount)
|
||||||
|
|
||||||
### critical-load-interval-millis
|
### critical-load
|
||||||
|
|
||||||
|
Comma delimited status-name=threshold, same format as [`--max-load`](#max-load).
|
||||||
|
|
||||||
`--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold.
|
`--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold.
|
||||||
|
|
||||||
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10 hour migration.
|
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10 hour migration.
|
||||||
|
|
||||||
|
### critical-load-interval-millis
|
||||||
|
|
||||||
When `--critical-load-interval-millis` is specified (e.g. `--critical-load-interval-millis=2500`), `gh-ost` gives a second chance: when it meets `critical-load` threshold, it doesn't bail out. Instead, it starts a timer (in this example: `2.5` seconds) and re-checks `critical-load` when the timer expires. If `critical-load` is met again, `gh-ost` panics and bails out. If not, execution continues.
|
When `--critical-load-interval-millis` is specified (e.g. `--critical-load-interval-millis=2500`), `gh-ost` gives a second chance: when it meets `critical-load` threshold, it doesn't bail out. Instead, it starts a timer (in this example: `2.5` seconds) and re-checks `critical-load` when the timer expires. If `critical-load` is met again, `gh-ost` panics and bails out. If not, execution continues.
|
||||||
|
|
||||||
This is somewhat similar to a Nagios `n`-times test, where `n` in our case is always `2`.
|
This is somewhat similar to a Nagios `n`-times test, where `n` in our case is always `2`.
|
||||||
|
|
||||||
### cut-over
|
### cut-over
|
||||||
|
|
||||||
Optional. Default is `safe`. See more discussion in [cut-over](cut-over.md)
|
Optional. Default is `safe`. See more discussion in [`cut-over`](cut-over.md)
|
||||||
|
|
||||||
|
### cut-over-lock-timeout-seconds
|
||||||
|
|
||||||
|
Default `3`. Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout).
|
||||||
|
|
||||||
### discard-foreign-keys
|
### discard-foreign-keys
|
||||||
|
|
||||||
@ -74,7 +90,7 @@ The `--dml-batch-size` flag controls the size of the batched write. Allowed valu
|
|||||||
|
|
||||||
Why is this behavior configurable? Different workloads have different characteristics. Some workloads have very large writes, such that aggregating even `50` writes into a transaction makes for a significant transaction size. On other workloads write rate is high such that one just can't allow for a hundred more syncs to disk per second. The default value of `10` is a modest compromise that should probably work very well for most workloads. Your mileage may vary.
|
Why is this behavior configurable? Different workloads have different characteristics. Some workloads have very large writes, such that aggregating even `50` writes into a transaction makes for a significant transaction size. On other workloads write rate is high such that one just can't allow for a hundred more syncs to disk per second. The default value of `10` is a modest compromise that should probably work very well for most workloads. Your mileage may vary.
|
||||||
|
|
||||||
Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `gh-ost` blocks or waits on writes. The batch size is an upper limit on transaction size, not a minimal one. If `gh-ost` doesn't have "enough" events in the pipe, it does not wait on the binary log, it just writes what it already has. This conveniently suggests that if write load is light enough for `gh-ost` to only see a few events in the binary log at a given time, then it is also light neough for `gh-ost` to apply a fraction of the batch size.
|
Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `gh-ost` blocks or waits on writes. The batch size is an upper limit on transaction size, not a minimal one. If `gh-ost` doesn't have "enough" events in the pipe, it does not wait on the binary log, it just writes what it already has. This conveniently suggests that if write load is light enough for `gh-ost` to only see a few events in the binary log at a given time, then it is also light enough for `gh-ost` to apply a fraction of the batch size.
|
||||||
|
|
||||||
### exact-rowcount
|
### exact-rowcount
|
||||||
|
|
||||||
@ -84,8 +100,8 @@ A `gh-ost` execution need to copy whatever rows you have in your existing table
|
|||||||
`gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen:
|
`gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen:
|
||||||
- An initial, authoritative `select count(*) from your_table`.
|
- An initial, authoritative `select count(*) from your_table`.
|
||||||
This query may take a long time to complete, but is performed before we begin the massive operations.
|
This query may take a long time to complete, but is performed before we begin the massive operations.
|
||||||
When `--concurrent-rowcount` is also specified, this runs in parallel to row copy.
|
When [`--concurrent-rowcount`](#concurrent-rowcount) is also specified, this runs in parallel to row copy.
|
||||||
Note: `--concurrent-rowcount` now defaults to `true`.
|
Note: [`--concurrent-rowcount`](#concurrent-rowcount) now defaults to `true`.
|
||||||
- A continuous update to the estimate as we make progress applying events.
|
- A continuous update to the estimate as we make progress applying events.
|
||||||
We heuristically update the number of rows based on the queries we process from the binlogs.
|
We heuristically update the number of rows based on the queries we process from the binlogs.
|
||||||
|
|
||||||
@ -95,6 +111,26 @@ While the ongoing estimated number of rows is still heuristic, it's almost exact
|
|||||||
|
|
||||||
Without this parameter, migration is a _noop_: testing table creation and validity of migration, but not touching data.
|
Without this parameter, migration is a _noop_: testing table creation and validity of migration, but not touching data.
|
||||||
|
|
||||||
|
### force-named-cut-over
|
||||||
|
|
||||||
|
If given, a `cut-over` command must name the migrated table, or else ignored.
|
||||||
|
|
||||||
|
### force-named-panic
|
||||||
|
|
||||||
|
If given, a `panic` command must name the migrated table, or else ignored.
|
||||||
|
|
||||||
|
### force-table-names
|
||||||
|
|
||||||
|
Table name prefix to be used on the temporary tables.
|
||||||
|
|
||||||
|
### gcp
|
||||||
|
|
||||||
|
Add this flag when executing on a 1st generation Google Cloud Platform (GCP).
|
||||||
|
|
||||||
|
### heartbeat-interval-millis
|
||||||
|
|
||||||
|
Default 100. See [`subsecond-lag`](subsecond-lag.md) for details.
|
||||||
|
|
||||||
### initially-drop-ghost-table
|
### initially-drop-ghost-table
|
||||||
|
|
||||||
`gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand.
|
`gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand.
|
||||||
@ -103,37 +139,87 @@ We think `gh-ost` should not take chances or make assumptions about the user's t
|
|||||||
|
|
||||||
### initially-drop-old-table
|
### initially-drop-old-table
|
||||||
|
|
||||||
See #initially-drop-ghost-table
|
See [`initially-drop-ghost-table`](#initially-drop-ghost-table)
|
||||||
|
|
||||||
|
### initially-drop-socket-file
|
||||||
|
|
||||||
|
Default False. Should `gh-ost` forcibly delete an existing socket file. Be careful: this might drop the socket file of a running migration!
|
||||||
|
|
||||||
### max-lag-millis
|
### max-lag-millis
|
||||||
|
|
||||||
On a replication topology, this is perhaps the most important migration throttling factor: the maximum lag allowed for migration to work. If lag exceeds this value, migration throttles.
|
On a replication topology, this is perhaps the most important migration throttling factor: the maximum lag allowed for migration to work. If lag exceeds this value, migration throttles.
|
||||||
|
|
||||||
When using [Connect to replica, migrate on master](cheatsheet.md), this lag is primarily tested on the very replica `gh-ost` operates on. Lag is measured by checking the heartbeat events injected by `gh-ost` itself on the utility changelog table. That is, to measure this replica's lag, `gh-ost` doesn't need to issue `show slave status` nor have any external heartbeat mechanism.
|
When using [Connect to replica, migrate on master](cheatsheet.md#a-connect-to-replica-migrate-on-master), this lag is primarily tested on the very replica `gh-ost` operates on. Lag is measured by checking the heartbeat events injected by `gh-ost` itself on the utility changelog table. That is, to measure this replica's lag, `gh-ost` doesn't need to issue `show slave status` nor have any external heartbeat mechanism.
|
||||||
|
|
||||||
When `--throttle-control-replicas` is provided, throttling also considers lag on specified hosts. Lag measurements on listed hosts is done by querying `gh-ost`'s _changelog_ table, where `gh-ost` injects a heartbeat.
|
When [`--throttle-control-replicas`](#throttle-control-replicas) is provided, throttling also considers lag on specified hosts. Lag measurements on listed hosts is done by querying `gh-ost`'s _changelog_ table, where `gh-ost` injects a heartbeat.
|
||||||
|
|
||||||
See also: [Sub-second replication lag throttling](subsecond-lag.md)
|
See also: [Sub-second replication lag throttling](subsecond-lag.md)
|
||||||
|
|
||||||
|
### max-load
|
||||||
|
|
||||||
|
List of metrics and threshold values; topping the threshold of any will cause throttler to kick in. See also: [`throttling`](throttle.md#status-thresholds)
|
||||||
|
|
||||||
### migrate-on-replica
|
### migrate-on-replica
|
||||||
|
|
||||||
Typically `gh-ost` is used to migrate tables on a master. If you wish to only perform the migration in full on a replica, connect `gh-ost` to said replica and pass `--migrate-on-replica`. `gh-ost` will briefly connect to the master but other issue no changes on the master. Migration will be fully executed on the replica, while making sure to maintain a small replication lag.
|
Typically `gh-ost` is used to migrate tables on a master. If you wish to only perform the migration in full on a replica, connect `gh-ost` to said replica and pass `--migrate-on-replica`. `gh-ost` will briefly connect to the master but otherwise will make no changes on the master. Migration will be fully executed on the replica, while making sure to maintain a small replication lag.
|
||||||
|
|
||||||
|
### postpone-cut-over-flag-file
|
||||||
|
|
||||||
|
Indicate a file name, such that the final [cut-over](cut-over.md) step does not take place as long as the file exists.
|
||||||
|
When this flag is set, `gh-ost` expects the file to exist on startup, or else tries to create it. `gh-ost` exits with error if the file does not exist and `gh-ost` is unable to create it.
|
||||||
|
With this flag set, the migration will cut-over upon deletion of the file or upon `cut-over` [interactive command](interactive-commands.md).
|
||||||
|
|
||||||
|
### replica-server-id
|
||||||
|
|
||||||
|
Defaults to 99999. If you run multiple migrations then you must provide a different, unique `--replica-server-id` for each `gh-ost` process.
|
||||||
|
Optionally involve the process ID, for example: `--replica-server-id=$((1000000000+$$))`.
|
||||||
|
|
||||||
|
It's on you to choose a number that does not collide with another `gh-ost` or another running replica.
|
||||||
|
See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet.
|
||||||
|
|
||||||
### skip-foreign-key-checks
|
### skip-foreign-key-checks
|
||||||
|
|
||||||
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not referenece other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
|
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
|
||||||
|
|
||||||
|
### skip-strict-mode
|
||||||
|
|
||||||
|
By default `gh-ost` enforces STRICT_ALL_TABLES sql_mode as a safety measure. In some cases this changes the behaviour of other modes (namely ERROR_FOR_DIVISION_BY_ZERO, NO_ZERO_DATE, and NO_ZERO_IN_DATE) which may lead to errors during migration. Use `--skip-strict-mode` to explicitly tell `gh-ost` not to enforce this. **Danger** This may have some unexpected disastrous side effects.
|
||||||
|
|
||||||
### skip-renamed-columns
|
### skip-renamed-columns
|
||||||
|
|
||||||
See `approve-renamed-columns`
|
See [`approve-renamed-columns`](#approve-renamed-columns)
|
||||||
|
|
||||||
|
### ssl
|
||||||
|
|
||||||
|
By default `gh-ost` does not use ssl/tls connections to the database servers when performing migrations. This flag instructs `gh-ost` to use encrypted connections. If enabled, `gh-ost` will use the system's ca certificate pool for server certificate verification. If a different certificate is needed for server verification, see `--ssl-ca`. If you wish to skip server verification, but still use encrypted connections, use with `--ssl-allow-insecure`.
|
||||||
|
|
||||||
|
### ssl-allow-insecure
|
||||||
|
|
||||||
|
Allows `gh-ost` to connect to the MySQL servers using encrypted connections, but without verifying the validity of the certificate provided by the server during the connection. Requires `--ssl`.
|
||||||
|
|
||||||
|
### ssl-ca
|
||||||
|
|
||||||
|
`--ssl-ca=/path/to/ca-cert.pem`: ca certificate file (in PEM format) to use for server certificate verification. If specified, the default system ca cert pool will not be used for verification, only the ca cert provided here. Requires `--ssl`.
|
||||||
|
|
||||||
|
### ssl-cert
|
||||||
|
|
||||||
|
`--ssl-cert=/path/to/ssl-cert.crt`: SSL public key certificate file (in PEM format).
|
||||||
|
|
||||||
|
### ssl-key
|
||||||
|
|
||||||
|
`--ssl-key=/path/to/ssl-key.key`: SSL private key file (in PEM format).
|
||||||
|
|
||||||
### test-on-replica
|
### test-on-replica
|
||||||
|
|
||||||
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [testing-on-replica](testing-on-replica.md)
|
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [`testing-on-replica`](testing-on-replica.md)
|
||||||
|
|
||||||
|
### test-on-replica-skip-replica-stop
|
||||||
|
|
||||||
|
Default `False`. When `--test-on-replica` is enabled, do not issue commands stop replication (requires `--test-on-replica`).
|
||||||
|
|
||||||
### throttle-control-replicas
|
### throttle-control-replicas
|
||||||
|
|
||||||
Provide a command delimited list of replicas; `gh-ost` will throttle when any of the given replicas lag beyond `--max-lag-millis`. The list can be queried and updated dynamically via [interactive commands](interactive-commands.md)
|
Provide a command delimited list of replicas; `gh-ost` will throttle when any of the given replicas lag beyond [`--max-lag-millis`](#max-lag-millis). The list can be queried and updated dynamically via [interactive commands](interactive-commands.md)
|
||||||
|
|
||||||
### throttle-http
|
### throttle-http
|
||||||
|
|
||||||
@ -142,3 +228,7 @@ Provide a HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and th
|
|||||||
### timestamp-old-table
|
### timestamp-old-table
|
||||||
|
|
||||||
Makes the _old_ table include a timestamp value. The _old_ table is what the original table is renamed to at the end of a successful migration. For example, if the table is `gh_ost_test`, then the _old_ table would normally be `_gh_ost_test_del`. With `--timestamp-old-table` it would be, for example, `_gh_ost_test_20170221103147_del`.
|
Makes the _old_ table include a timestamp value. The _old_ table is what the original table is renamed to at the end of a successful migration. For example, if the table is `gh_ost_test`, then the _old_ table would normally be `_gh_ost_test_del`. With `--timestamp-old-table` it would be, for example, `_gh_ost_test_20170221103147_del`.
|
||||||
|
|
||||||
|
### tungsten
|
||||||
|
|
||||||
|
See [`tungsten`](cheatsheet.md#tungsten) on the cheatsheet.
|
||||||
|
@ -65,10 +65,15 @@ The following variables are available on all hooks:
|
|||||||
- `GH_OST_ELAPSED_COPY_SECONDS` - row-copy time (excluding startup, row-count and postpone time)
|
- `GH_OST_ELAPSED_COPY_SECONDS` - row-copy time (excluding startup, row-count and postpone time)
|
||||||
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
|
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
|
||||||
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
|
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
|
||||||
|
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
|
||||||
|
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
|
||||||
- `GH_OST_MIGRATED_HOST`
|
- `GH_OST_MIGRATED_HOST`
|
||||||
- `GH_OST_INSPECTED_HOST`
|
- `GH_OST_INSPECTED_HOST`
|
||||||
- `GH_OST_EXECUTING_HOST`
|
- `GH_OST_EXECUTING_HOST`
|
||||||
- `GH_OST_HOOKS_HINT` - copy of `--hooks-hint` value
|
- `GH_OST_HOOKS_HINT` - copy of `--hooks-hint` value
|
||||||
|
- `GH_OST_HOOKS_HINT_OWNER` - copy of `--hooks-hint-owner` value
|
||||||
|
- `GH_OST_HOOKS_HINT_TOKEN` - copy of `--hooks-hint-token` value
|
||||||
|
- `GH_OST_DRY_RUN` - whether or not the `gh-ost` run is a dry run
|
||||||
|
|
||||||
The following variable are available on particular hooks:
|
The following variable are available on particular hooks:
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ Both interfaces may serve at the same time. Both respond to simple text command,
|
|||||||
|
|
||||||
### Querying for data
|
### Querying for data
|
||||||
|
|
||||||
For commands that accept an argumetn as value, pass `?` (question mark) to _get_ current value rather than _set_ a new one.
|
For commands that accept an argument as value, pass `?` (question mark) to _get_ current value rather than _set_ a new one.
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
|
@ -28,3 +28,9 @@ It is therefore unlikely that `gh-ost` will support this behavior.
|
|||||||
Yes. TL;DR if running all on same replica/master, make sure to provide `--replica-server-id`. [Read more](cheatsheet.md#concurrent-migrations)
|
Yes. TL;DR if running all on same replica/master, make sure to provide `--replica-server-id`. [Read more](cheatsheet.md#concurrent-migrations)
|
||||||
|
|
||||||
# Why
|
# Why
|
||||||
|
|
||||||
|
### Why Is the "Connect to Replica" mode preferred?
|
||||||
|
|
||||||
|
To avoid placing extra load on the master. `gh-ost` connects as a replication client. Each additional replica adds some load to the master.
|
||||||
|
|
||||||
|
To monitor replication lag from a replica. This makes the replication lag throttle, `--max-lag-millis`, more representative of the lag experienced by other replicas following the master (perhaps N levels deep in a tree of replicas).
|
||||||
|
12
doc/rds.md
12
doc/rds.md
@ -1,4 +1,4 @@
|
|||||||
`gh-ost` has been updated to work with Amazon RDS however due to GitHub not relying using AWS for databases, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
|
`gh-ost` has been updated to work with Amazon RDS however due to GitHub not using AWS for databases, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
|
||||||
|
|
||||||
# Amazon RDS
|
# Amazon RDS
|
||||||
|
|
||||||
@ -26,6 +26,14 @@ If you use `pt-table-checksum` as a part of your data integrity checks, you migh
|
|||||||
This tool requires binlog_format=STATEMENT, but the current binlog_format is set to ROW and an error occurred while attempting to change it. If running MySQL 5.1.29 or newer, setting binlog_format requires the SUPER privilege. You will need to manually set binlog_format to 'STATEMENT' before running this tool.
|
This tool requires binlog_format=STATEMENT, but the current binlog_format is set to ROW and an error occurred while attempting to change it. If running MySQL 5.1.29 or newer, setting binlog_format requires the SUPER privilege. You will need to manually set binlog_format to 'STATEMENT' before running this tool.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Binlog filtering
|
||||||
|
|
||||||
|
In Aurora, the [binlog filtering feature][aws_replication_docs_bin_log_filtering] is enabled by default. This becomes an issue when gh-ost tries to do the cut-over, because gh-ost waits for an entry in the binlog to proceed but this entry will never end up in the binlog because it gets filtered out by the binlog filtering feature.
|
||||||
|
You need to turn this feature off during the migration process.
|
||||||
|
Set the `aurora_enable_repl_bin_log_filtering` parameter to 0 in the Parameter Group for your cluster.
|
||||||
|
When the migration is done, set it back to 1 (default).
|
||||||
|
|
||||||
|
|
||||||
#### Preflight checklist
|
#### Preflight checklist
|
||||||
|
|
||||||
Before trying to run any `gh-ost` migrations you will want to confirm the following:
|
Before trying to run any `gh-ost` migrations you will want to confirm the following:
|
||||||
@ -35,6 +43,7 @@ Before trying to run any `gh-ost` migrations you will want to confirm the follow
|
|||||||
- [ ] Executing `SHOW SLAVE STATUS\G` on your replica cluster displays the correct master host, binlog position, etc.
|
- [ ] Executing `SHOW SLAVE STATUS\G` on your replica cluster displays the correct master host, binlog position, etc.
|
||||||
- [ ] Database backup retention is greater than 1 day to enable binlogs
|
- [ ] Database backup retention is greater than 1 day to enable binlogs
|
||||||
- [ ] You have setup [`hooks`][ghost_hooks] to issue RDS procedures for stopping and starting replication. (see [github/gh-ost#163][ghost_rds_issue_tracking] for examples)
|
- [ ] You have setup [`hooks`][ghost_hooks] to issue RDS procedures for stopping and starting replication. (see [github/gh-ost#163][ghost_rds_issue_tracking] for examples)
|
||||||
|
- [ ] The parameter `aurora_enable_repl_bin_log_filtering` is set to 0
|
||||||
|
|
||||||
[new_issue]: https://github.com/github/gh-ost/issues/new
|
[new_issue]: https://github.com/github/gh-ost/issues/new
|
||||||
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
|
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
|
||||||
@ -43,3 +52,4 @@ Before trying to run any `gh-ost` migrations you will want to confirm the follow
|
|||||||
[percona_toolkit_patch]: https://github.com/jacobbednarz/percona-toolkit/commit/0271ba6a094da446a5e5bb8d99b5c26f1777f2b9
|
[percona_toolkit_patch]: https://github.com/jacobbednarz/percona-toolkit/commit/0271ba6a094da446a5e5bb8d99b5c26f1777f2b9
|
||||||
[ghost_hooks]: https://github.com/github/gh-ost/blob/master/doc/hooks.md
|
[ghost_hooks]: https://github.com/github/gh-ost/blob/master/doc/hooks.md
|
||||||
[ghost_rds_issue_tracking]: https://github.com/github/gh-ost/issues/163
|
[ghost_rds_issue_tracking]: https://github.com/github/gh-ost/issues/163
|
||||||
|
[aws_replication_docs_bin_log_filtering]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Replication.html#AuroraMySQL.Replication.Performance
|
@ -22,14 +22,10 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
|
|||||||
|
|
||||||
### Limitations
|
### Limitations
|
||||||
|
|
||||||
- Foreign keys not supported. They may be supported in the future, to some extent.
|
- Foreign key constraints are not supported. They may be supported in the future, to some extent.
|
||||||
|
|
||||||
- Triggers are not supported. They may be supported in the future.
|
- Triggers are not supported. They may be supported in the future.
|
||||||
|
|
||||||
- MySQL 5.7 generated columns are not supported. They may be supported in the future.
|
|
||||||
|
|
||||||
- MySQL 5.7 `POINT` column type is not supported.
|
|
||||||
|
|
||||||
- MySQL 5.7 `JSON` columns are supported but not as part of `PRIMARY KEY`
|
- MySQL 5.7 `JSON` columns are supported but not as part of `PRIMARY KEY`
|
||||||
|
|
||||||
- The two _before_ & _after_ tables must share a `PRIMARY KEY` or other `UNIQUE KEY`. This key will be used by `gh-ost` to iterate through the table rows when copying. [Read more](shared-key.md)
|
- The two _before_ & _after_ tables must share a `PRIMARY KEY` or other `UNIQUE KEY`. This key will be used by `gh-ost` to iterate through the table rows when copying. [Read more](shared-key.md)
|
||||||
@ -42,13 +38,16 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
|
|||||||
- It is not allowed to migrate a table where another table exists with same name and different upper/lower case.
|
- It is not allowed to migrate a table where another table exists with same name and different upper/lower case.
|
||||||
- For example, you may not migrate `MyTable` if another table called `MYtable` exists in the same schema.
|
- For example, you may not migrate `MyTable` if another table called `MYtable` exists in the same schema.
|
||||||
|
|
||||||
- Amazon RDS works, but has it's own [limitations](rds.md).
|
- Amazon RDS works, but has its own [limitations](rds.md).
|
||||||
- Google Cloud SQL is currently not supported
|
- Google Cloud SQL works, `--gcp` flag required.
|
||||||
|
- Aliyun RDS works, `--aliyun-rds` flag required.
|
||||||
|
|
||||||
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)
|
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)
|
||||||
|
|
||||||
- Master-master setup is only supported in active-passive setup. Active-active (where table is being written to on both masters concurrently) is unsupported. It may be supported in the future.
|
- Master-master setup is only supported in active-passive setup. Active-active (where table is being written to on both masters concurrently) is unsupported. It may be supported in the future.
|
||||||
|
|
||||||
- If you have en `enum` field as part of your migration key (typically the `PRIMARY KEY`), migration performance will be degraded and potentially bad. [Read more](https://github.com/github/gh-ost/pull/277#issuecomment-254811520)
|
- If you have an `enum` field as part of your migration key (typically the `PRIMARY KEY`), migration performance will be degraded and potentially bad. [Read more](https://github.com/github/gh-ost/pull/277#issuecomment-254811520)
|
||||||
|
|
||||||
- Migrating a `FEDERATED` table is unsupported and is irrelevant to the problem `gh-ost` tackles.
|
- Migrating a `FEDERATED` table is unsupported and is irrelevant to the problem `gh-ost` tackles.
|
||||||
|
|
||||||
|
- `ALTER TABLE ... RENAME TO some_other_name` is not supported (and you shouldn't use `gh-ost` for such a trivial operation).
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
# Shared key
|
# Shared key
|
||||||
|
|
||||||
A requirement for a migration to run is that the two _before_ and _after_ tables have a shared unique key. This is to elaborate and illustrate on the matter.
|
gh-ost requires for every migration that both the _before_ and _after_ versions of the table share the same unique not-null key columns. This page illustrates this rule.
|
||||||
|
|
||||||
### Introduction
|
### Introduction
|
||||||
|
|
||||||
Consider a classic, simple migration. The table is any normal:
|
Consider a simple migration, with a normal table,
|
||||||
|
|
||||||
```
|
```sql
|
||||||
CREATE TABLE tbl (
|
CREATE TABLE tbl (
|
||||||
id bigint unsigned not null auto_increment,
|
id bigint unsigned not null auto_increment,
|
||||||
data varchar(255),
|
data varchar(255),
|
||||||
@ -15,54 +15,72 @@ CREATE TABLE tbl (
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
And the migration is a simple `add column ts timestamp`.
|
and the migration `add column ts timestamp`. The _after_ table version would be:
|
||||||
|
|
||||||
In such migration there is no change in indexes, and in particular no change to any unique key, and specifically no change to the `PRIMARY KEY`. To run this migration, `gh-ost` would iterate the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` by order of `id`, and then apply binlog events onto `_tbl_gho`.
|
|
||||||
|
|
||||||
Applying the binlog events assumes the existence of a shared unique key. For example, an `UPDATE` statement in the binary log translate to a `REPLACE` statement which `gh-ost` applies to the _ghost_ table. Such statement expects to add or replace an existing row based on given row data. In particular, it would _replace_ an existing row if a unique key violation is met.
|
|
||||||
|
|
||||||
So `gh-ost` correlates `tbl` and `_tbl_gho` rows using a unique key. In the above example that would be the `PRIMARY KEY`.
|
|
||||||
|
|
||||||
### Rules
|
|
||||||
|
|
||||||
There must be a shared set of not-null columns for which there is a unique constraint in both the original table and the migration (_ghost_) table.
|
|
||||||
|
|
||||||
### Interpreting the rules
|
|
||||||
|
|
||||||
The same columns must be covered by a unique key in both tables. This doesn't have to be the `PRIMARY KEY`. This doesn't have to be a key of the same name.
|
|
||||||
|
|
||||||
Upon migration, `gh-ost` inspects both the original and _ghost_ table and attempts to find at least one such unique key (or rather, a set of columns) that is shared between the two. Typically this would just be the `PRIMARY KEY`, but sometimes you may change the `PRIMARY KEY` itself, in which case `gh-ost` will look for other options.
|
|
||||||
|
|
||||||
`gh-ost` expects unique keys where no `NULL` values are found, i.e. all columns covered by the unique key are defined as `NOT NULL`. This is implicitly true for `PRIMARY KEY`s. If no such key can be found, `gh-ost` bails out. In the event there is no such key, but you happen to _know_ your columns have no `NULL` values even though they're `NULL`-able, you may take responsibility and pass the `--allow-nullable-unique-key`. The migration will run well as long as no `NULL` values are found in the unique key's columns. Any actual `NULL`s may corrupt the migration.
|
|
||||||
|
|
||||||
### Examples: allowed and not allowed
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE tbl (
|
||||||
|
id bigint unsigned not null auto_increment,
|
||||||
|
data varchar(255),
|
||||||
|
more_data int,
|
||||||
|
ts timestamp,
|
||||||
|
PRIMARY KEY(id)
|
||||||
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
(This is also the definition of the _ghost_ table, except that that table would be called `_tbl_gho`).
|
||||||
|
|
||||||
|
In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tble` onto `_tbl_gho`.
|
||||||
|
|
||||||
|
The applying of the binlog events is what requires the shared unique key. For example, an `UPDATE` statement to `tbl` translates to a `REPLACE` statement which `gh-ost` applies to `_tbl_gho`. A `REPLACE` statement expects to insert or replace an existing row based on its row's values and the table's unique key constraints. In particular, if inserting that row would result in a unique key violation (e.g., a row with that primary key already exists), it would _replace_ that existing row with the new values.
|
||||||
|
|
||||||
|
So `gh-ost` correlates `tbl` and `_tbl_gho` rows one to one using a unique key. In the above example that would be the `PRIMARY KEY`.
|
||||||
|
|
||||||
|
### Interpreting the rule
|
||||||
|
|
||||||
|
The _before_ and _after_ versions of the table share the same unique not-null key, but:
|
||||||
|
- the key doesn't have to be the PRIMARY KEY
|
||||||
|
- the key can have a different name between the _before_ and _after_ versions (e.g., renamed via DROP INDEX and ADD INDEX) so long as it contains the exact same column(s)
|
||||||
|
|
||||||
|
At the start of the migration, `gh-ost` inspects both the original and _ghost_ table it created, and attempts to find at least one such unique key (or rather, a set of columns) that is shared between the two. Typically this would just be the `PRIMARY KEY`, but some tables don't have primary keys, or sometimes it is the primary key that is being modified by the migration. In these cases `gh-ost` will look for other options.
|
||||||
|
|
||||||
|
`gh-ost` expects unique keys where no `NULL` values are found, i.e. all columns contained in the unique key are defined as `NOT NULL`. This is implicitly true for primary keys. If no such key can be found, `gh-ost` bails out.
|
||||||
|
|
||||||
|
If the table contains a unique key with nullable columns, but you know your columns contain no `NULL` values, use the `--allow-nullable-unique-key` option. The migration will run well as long as no `NULL` values are found in the unique key's columns. **Any actual `NULL`s may corrupt the migration.**
|
||||||
|
|
||||||
|
### Examples: Allowed and Not Allowed
|
||||||
|
|
||||||
|
```sql
|
||||||
create table some_table (
|
create table some_table (
|
||||||
id int auto_increment,
|
id int not null auto_increment,
|
||||||
ts timestamp,
|
ts timestamp,
|
||||||
name varchar(128) not null,
|
name varchar(128) not null,
|
||||||
owner_id int not null,
|
owner_id int not null,
|
||||||
loc_id int,
|
loc_id int not null,
|
||||||
primary key(id),
|
primary key(id),
|
||||||
unique key name_uidx(name)
|
unique key name_uidx(name)
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
Following are examples of migrations that are _good to run_:
|
Note the two unique, not-null indexes: the primary key and `name_uidx`.
|
||||||
|
|
||||||
|
Allowed migrations:
|
||||||
|
|
||||||
- `add column i int`
|
- `add column i int`
|
||||||
- `add key owner_idx(owner_id)`
|
- `add key owner_idx (owner_id)`
|
||||||
- `add unique key owner_name_idx(owner_id, name)` - though you need to make sure to not write conflicting rows while this migration runs
|
- `add unique key owner_name_idx (owner_id, name)` - **be careful not to write conflicting rows while this migration runs**
|
||||||
- `drop key name_uidx` - `primary key` is shared between the tables
|
- `drop key name_uidx` - `primary key` is shared between the tables
|
||||||
- `drop primary key, add primary key(owner_id, loc_id)` - `name_uidx` is shared between the tables and is used for migration
|
- `drop primary key, add primary key(owner_id, loc_id)` - `name_uidx` is shared between the tables
|
||||||
- `change id bigint unsigned` - the `'primary key` is used. The change of type still makes the `primary key` workable.
|
- `change id bigint unsigned not null auto_increment` - the `primary key` changes datatype but not value, and can be used
|
||||||
- `drop primary key, drop key name_uidx, create primary key(name), create unique key id_uidx(id)` - swapping the two keys. `gh-ost` is still happy because `id` is still unique in both tables. So is `name`.
|
- `drop primary key, drop key name_uidx, add primary key(name), add unique key id_uidx(id)` - swapping the two keys. Either `id` or `name` could be used
|
||||||
|
|
||||||
|
Not allowed:
|
||||||
|
|
||||||
|
- `drop primary key, drop key name_uidx` - the _ghost_ table has no unique key
|
||||||
|
- `drop primary key, drop key name_uidx, create primary key(name, owner_id)` - no shared columns to the unique keys on both tables. Even though `name` exists in the _ghost_ table's `primary key`, it is only part of the key and in itself does not guarantee uniqueness in the _ghost_ table.
|
||||||
|
|
||||||
|
|
||||||
Following are examples of migrations that _cannot run_:
|
### Workarounds
|
||||||
|
|
||||||
- `drop primary key, drop key name_uidx` - no unique key to _ghost_ table, so clearly cannot run
|
If you need to change your primary key or only not-null unique index to use different columns, you will want to do it as two separate migrations:
|
||||||
- `drop primary key, drop key name_uidx, create primary key(name, owner_id)` - no shared columns to both tables. Even though `name` exists in the _ghost_ table's `primary key`, it is only part of the key and in itself does not guarantee uniqueness in the _ghost_ table.
|
1. `ADD UNIQUE KEY temp_pk (temp_pk_column,...)`
|
||||||
|
1. `DROP PRIMARY KEY, DROP KEY temp_pk, ADD PRIMARY KEY (temp_pk_column,...)`
|
||||||
Also, you cannot run a migration on a table that doesn't have some form of `unique key` in the first place, such as `some_table (id int, ts timestamp)`
|
|
||||||
|
@ -46,6 +46,14 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
|
|||||||
|
|
||||||
An example query could be: `--throttle-query="select hour(now()) between 8 and 17"` which implies throttling auto-starts `8:00am` and migration auto-resumes at `18:00pm`.
|
An example query could be: `--throttle-query="select hour(now()) between 8 and 17"` which implies throttling auto-starts `8:00am` and migration auto-resumes at `18:00pm`.
|
||||||
|
|
||||||
|
#### HTTP Throttle
|
||||||
|
|
||||||
|
The `--throttle-http` flag allows for throttling via HTTP. Every 100ms `gh-ost` issues a `HEAD` request to the provided URL. If the response status code is not `200` throttling will kick in until a `200` response status code is returned.
|
||||||
|
|
||||||
|
If no URL is provided or the URL provided doesn't contain the scheme then the HTTP check will be disabled. For example `--throttle-http="http://1.2.3.4:6789/throttle"` will enable the HTTP check/throttling, but `--throttle-http="1.2.3.4:6789/throttle"` will not.
|
||||||
|
|
||||||
|
The URL can be queried and updated dynamically via [interactive interface](interactive-commands.md).
|
||||||
|
|
||||||
#### Manual control
|
#### Manual control
|
||||||
|
|
||||||
In addition to the above, you are able to take control and throttle the operation any time you like.
|
In addition to the above, you are able to take control and throttle the operation any time you like.
|
||||||
|
@ -24,15 +24,15 @@ Initial output lines may look like this:
|
|||||||
2016-05-19 17:57:11 INFO connection validated on 127.0.0.1:3306
|
2016-05-19 17:57:11 INFO connection validated on 127.0.0.1:3306
|
||||||
2016-05-19 17:57:11 INFO rotate to next log name: mysql-bin.002587
|
2016-05-19 17:57:11 INFO rotate to next log name: mysql-bin.002587
|
||||||
2016-05-19 17:57:11 INFO connection validated on 127.0.0.1:3306
|
2016-05-19 17:57:11 INFO connection validated on 127.0.0.1:3306
|
||||||
2016-05-19 17:57:11 INFO Droppping table `mydb`.`_mytable_gst`
|
2016-05-19 17:57:11 INFO Dropping table `mydb`.`_mytable_gst`
|
||||||
2016-05-19 17:57:11 INFO Table dropped
|
2016-05-19 17:57:11 INFO Table dropped
|
||||||
2016-05-19 17:57:11 INFO Droppping table `mydb`.`_mytable_old`
|
2016-05-19 17:57:11 INFO Dropping table `mydb`.`_mytable_old`
|
||||||
2016-05-19 17:57:11 INFO Table dropped
|
2016-05-19 17:57:11 INFO Table dropped
|
||||||
2016-05-19 17:57:11 INFO Creating ghost table `mydb`.`_mytable_gst`
|
2016-05-19 17:57:11 INFO Creating ghost table `mydb`.`_mytable_gst`
|
||||||
2016-05-19 17:57:11 INFO Ghost table created
|
2016-05-19 17:57:11 INFO Ghost table created
|
||||||
2016-05-19 17:57:11 INFO Altering ghost table `mydb`.`_mytable_gst`
|
2016-05-19 17:57:11 INFO Altering ghost table `mydb`.`_mytable_gst`
|
||||||
2016-05-19 17:57:11 INFO Ghost table altered
|
2016-05-19 17:57:11 INFO Ghost table altered
|
||||||
2016-05-19 17:57:11 INFO Droppping table `mydb`.`_mytable_osc`
|
2016-05-19 17:57:11 INFO Dropping table `mydb`.`_mytable_osc`
|
||||||
2016-05-19 17:57:11 INFO Table dropped
|
2016-05-19 17:57:11 INFO Table dropped
|
||||||
2016-05-19 17:57:11 INFO Creating changelog table `mydb`.`_mytable_osc`
|
2016-05-19 17:57:11 INFO Creating changelog table `mydb`.`_mytable_osc`
|
||||||
2016-05-19 17:57:11 INFO Changelog table created
|
2016-05-19 17:57:11 INFO Changelog table created
|
||||||
|
@ -16,7 +16,7 @@ Use of triggers simplifies a lot of the flow in doing a live table migration, bu
|
|||||||
|
|
||||||
Triggers are stored routines which are invoked on a per-row operation upon `INSERT`, `DELETE`, `UPDATE` on a table.
|
Triggers are stored routines which are invoked on a per-row operation upon `INSERT`, `DELETE`, `UPDATE` on a table.
|
||||||
They were introduced in MySQL `5.0`.
|
They were introduced in MySQL `5.0`.
|
||||||
A trigger may contain a set of queries, and these queries run in the same transaction space as the query that manipulates the table. This makes for an atomicy of both the original operation on the table and the trigger-invoked operations.
|
A trigger may contain a set of queries, and these queries run in the same transaction space as the query that manipulates the table. This makes for an atomicity of both the original operation on the table and the trigger-invoked operations.
|
||||||
|
|
||||||
### Triggers, overhead
|
### Triggers, overhead
|
||||||
|
|
||||||
|
7
docker-compose.yml
Normal file
7
docker-compose.yml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
version: "3.5"
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
image: app
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile.test
|
@ -7,6 +7,7 @@ package base
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
@ -14,6 +15,8 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/satori/go.uuid"
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
|
|
||||||
@ -26,23 +29,23 @@ type RowsEstimateMethod string
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
TableStatusRowsEstimate RowsEstimateMethod = "TableStatusRowsEstimate"
|
TableStatusRowsEstimate RowsEstimateMethod = "TableStatusRowsEstimate"
|
||||||
ExplainRowsEstimate = "ExplainRowsEstimate"
|
ExplainRowsEstimate RowsEstimateMethod = "ExplainRowsEstimate"
|
||||||
CountRowsEstimate = "CountRowsEstimate"
|
CountRowsEstimate RowsEstimateMethod = "CountRowsEstimate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CutOver int
|
type CutOver int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
CutOverAtomic CutOver = iota
|
CutOverAtomic CutOver = iota
|
||||||
CutOverTwoStep = iota
|
CutOverTwoStep
|
||||||
)
|
)
|
||||||
|
|
||||||
type ThrottleReasonHint string
|
type ThrottleReasonHint string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
NoThrottleReasonHint ThrottleReasonHint = "NoThrottleReasonHint"
|
NoThrottleReasonHint ThrottleReasonHint = "NoThrottleReasonHint"
|
||||||
UserCommandThrottleReasonHint = "UserCommandThrottleReasonHint"
|
UserCommandThrottleReasonHint ThrottleReasonHint = "UserCommandThrottleReasonHint"
|
||||||
LeavingHibernationThrottleReasonHint = "LeavingHibernationThrottleReasonHint"
|
LeavingHibernationThrottleReasonHint ThrottleReasonHint = "LeavingHibernationThrottleReasonHint"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -71,6 +74,8 @@ func NewThrottleCheckResult(throttle bool, reason string, reasonHint ThrottleRea
|
|||||||
// MigrationContext has the general, global state of migration. It is used by
|
// MigrationContext has the general, global state of migration. It is used by
|
||||||
// all components throughout the migration process.
|
// all components throughout the migration process.
|
||||||
type MigrationContext struct {
|
type MigrationContext struct {
|
||||||
|
Uuid string
|
||||||
|
|
||||||
DatabaseName string
|
DatabaseName string
|
||||||
OriginalTableName string
|
OriginalTableName string
|
||||||
AlterStatement string
|
AlterStatement string
|
||||||
@ -82,17 +87,25 @@ type MigrationContext struct {
|
|||||||
SwitchToRowBinlogFormat bool
|
SwitchToRowBinlogFormat bool
|
||||||
AssumeRBR bool
|
AssumeRBR bool
|
||||||
SkipForeignKeyChecks bool
|
SkipForeignKeyChecks bool
|
||||||
|
SkipStrictMode bool
|
||||||
NullableUniqueKeyAllowed bool
|
NullableUniqueKeyAllowed bool
|
||||||
ApproveRenamedColumns bool
|
ApproveRenamedColumns bool
|
||||||
SkipRenamedColumns bool
|
SkipRenamedColumns bool
|
||||||
IsTungsten bool
|
IsTungsten bool
|
||||||
DiscardForeignKeys bool
|
DiscardForeignKeys bool
|
||||||
|
AliyunRDS bool
|
||||||
|
GoogleCloudPlatform bool
|
||||||
|
|
||||||
config ContextConfig
|
config ContextConfig
|
||||||
configMutex *sync.Mutex
|
configMutex *sync.Mutex
|
||||||
ConfigFile string
|
ConfigFile string
|
||||||
CliUser string
|
CliUser string
|
||||||
CliPassword string
|
CliPassword string
|
||||||
|
UseTLS bool
|
||||||
|
TLSAllowInsecure bool
|
||||||
|
TLSCACertificate string
|
||||||
|
TLSCertificate string
|
||||||
|
TLSKey string
|
||||||
CliMasterUser string
|
CliMasterUser string
|
||||||
CliMasterPassword string
|
CliMasterPassword string
|
||||||
|
|
||||||
@ -114,10 +127,15 @@ type MigrationContext struct {
|
|||||||
CriticalLoadHibernateSeconds int64
|
CriticalLoadHibernateSeconds int64
|
||||||
PostponeCutOverFlagFile string
|
PostponeCutOverFlagFile string
|
||||||
CutOverLockTimeoutSeconds int64
|
CutOverLockTimeoutSeconds int64
|
||||||
|
CutOverExponentialBackoff bool
|
||||||
|
ExponentialBackoffMaxInterval int64
|
||||||
ForceNamedCutOverCommand bool
|
ForceNamedCutOverCommand bool
|
||||||
|
ForceNamedPanicCommand bool
|
||||||
PanicFlagFile string
|
PanicFlagFile string
|
||||||
HooksPath string
|
HooksPath string
|
||||||
HooksHintMessage string
|
HooksHintMessage string
|
||||||
|
HooksHintOwner string
|
||||||
|
HooksHintToken string
|
||||||
|
|
||||||
DropServeSocket bool
|
DropServeSocket bool
|
||||||
ServeSocketFile string
|
ServeSocketFile string
|
||||||
@ -157,6 +175,7 @@ type MigrationContext struct {
|
|||||||
pointOfInterestTime time.Time
|
pointOfInterestTime time.Time
|
||||||
pointOfInterestTimeMutex *sync.Mutex
|
pointOfInterestTimeMutex *sync.Mutex
|
||||||
CurrentLag int64
|
CurrentLag int64
|
||||||
|
currentProgress uint64
|
||||||
ThrottleHTTPStatusCode int64
|
ThrottleHTTPStatusCode int64
|
||||||
controlReplicasLagResult mysql.ReplicationLagResult
|
controlReplicasLagResult mysql.ReplicationLagResult
|
||||||
TotalRowsCopied int64
|
TotalRowsCopied int64
|
||||||
@ -179,8 +198,10 @@ type MigrationContext struct {
|
|||||||
|
|
||||||
OriginalTableColumnsOnApplier *sql.ColumnList
|
OriginalTableColumnsOnApplier *sql.ColumnList
|
||||||
OriginalTableColumns *sql.ColumnList
|
OriginalTableColumns *sql.ColumnList
|
||||||
|
OriginalTableVirtualColumns *sql.ColumnList
|
||||||
OriginalTableUniqueKeys [](*sql.UniqueKey)
|
OriginalTableUniqueKeys [](*sql.UniqueKey)
|
||||||
GhostTableColumns *sql.ColumnList
|
GhostTableColumns *sql.ColumnList
|
||||||
|
GhostTableVirtualColumns *sql.ColumnList
|
||||||
GhostTableUniqueKeys [](*sql.UniqueKey)
|
GhostTableUniqueKeys [](*sql.UniqueKey)
|
||||||
UniqueKey *sql.UniqueKey
|
UniqueKey *sql.UniqueKey
|
||||||
SharedColumns *sql.ColumnList
|
SharedColumns *sql.ColumnList
|
||||||
@ -195,8 +216,6 @@ type MigrationContext struct {
|
|||||||
ForceTmpTableName string
|
ForceTmpTableName string
|
||||||
|
|
||||||
recentBinlogCoordinates mysql.BinlogCoordinates
|
recentBinlogCoordinates mysql.BinlogCoordinates
|
||||||
|
|
||||||
CanStopStreaming func() bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ContextConfig struct {
|
type ContextConfig struct {
|
||||||
@ -212,14 +231,9 @@ type ContextConfig struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var context *MigrationContext
|
func NewMigrationContext() *MigrationContext {
|
||||||
|
|
||||||
func init() {
|
|
||||||
context = newMigrationContext()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMigrationContext() *MigrationContext {
|
|
||||||
return &MigrationContext{
|
return &MigrationContext{
|
||||||
|
Uuid: uuid.NewV4().String(),
|
||||||
defaultNumRetries: 60,
|
defaultNumRetries: 60,
|
||||||
ChunkSize: 1000,
|
ChunkSize: 1000,
|
||||||
InspectorConnectionConfig: mysql.NewConnectionConfig(),
|
InspectorConnectionConfig: mysql.NewConnectionConfig(),
|
||||||
@ -239,11 +253,6 @@ func newMigrationContext() *MigrationContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMigrationContext
|
|
||||||
func GetMigrationContext() *MigrationContext {
|
|
||||||
return context
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSafeTableName(baseName string, suffix string) string {
|
func getSafeTableName(baseName string, suffix string) string {
|
||||||
name := fmt.Sprintf("_%s_%s", baseName, suffix)
|
name := fmt.Sprintf("_%s_%s", baseName, suffix)
|
||||||
if len(name) <= mysql.MaxTableNameLength {
|
if len(name) <= mysql.MaxTableNameLength {
|
||||||
@ -349,6 +358,14 @@ func (this *MigrationContext) SetCutOverLockTimeoutSeconds(timeoutSeconds int64)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *MigrationContext) SetExponentialBackoffMaxInterval(intervalSeconds int64) error {
|
||||||
|
if intervalSeconds < 2 {
|
||||||
|
return fmt.Errorf("Minimal maximum interval is 2sec. Timeout remains at %d", this.ExponentialBackoffMaxInterval)
|
||||||
|
}
|
||||||
|
this.ExponentialBackoffMaxInterval = intervalSeconds
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (this *MigrationContext) SetDefaultNumRetries(retries int64) {
|
func (this *MigrationContext) SetDefaultNumRetries(retries int64) {
|
||||||
this.throttleMutex.Lock()
|
this.throttleMutex.Lock()
|
||||||
defer this.throttleMutex.Unlock()
|
defer this.throttleMutex.Unlock()
|
||||||
@ -413,6 +430,20 @@ func (this *MigrationContext) MarkRowCopyEndTime() {
|
|||||||
this.RowCopyEndTime = time.Now()
|
this.RowCopyEndTime = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
|
||||||
|
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *MigrationContext) GetProgressPct() float64 {
|
||||||
|
return math.Float64frombits(atomic.LoadUint64(&this.currentProgress))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *MigrationContext) SetProgressPct(progressPct float64) {
|
||||||
|
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
|
||||||
|
}
|
||||||
|
|
||||||
|
// math.Float64bits([f=0..100])
|
||||||
|
|
||||||
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
|
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
|
||||||
// This is not exactly the same as the rows being iterated via chunks, but potentially close enough
|
// This is not exactly the same as the rows being iterated via chunks, but potentially close enough
|
||||||
func (this *MigrationContext) GetTotalRowsCopied() int64 {
|
func (this *MigrationContext) GetTotalRowsCopied() int64 {
|
||||||
@ -689,6 +720,13 @@ func (this *MigrationContext) ApplyCredentials() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *MigrationContext) SetupTLS() error {
|
||||||
|
if this.UseTLS {
|
||||||
|
return this.InspectorConnectionConfig.UseTLS(this.TLSCACertificate, this.TLSCertificate, this.TLSKey, this.TLSAllowInsecure)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ReadConfigFile attempts to read the config file, if it exists
|
// ReadConfigFile attempts to read the config file, if it exists
|
||||||
func (this *MigrationContext) ReadConfigFile() error {
|
func (this *MigrationContext) ReadConfigFile() error {
|
||||||
this.configMutex.Lock()
|
this.configMutex.Lock()
|
||||||
|
@ -19,27 +19,27 @@ func init() {
|
|||||||
|
|
||||||
func TestGetTableNames(t *testing.T) {
|
func TestGetTableNames(t *testing.T) {
|
||||||
{
|
{
|
||||||
context = newMigrationContext()
|
context := NewMigrationContext()
|
||||||
context.OriginalTableName = "some_table"
|
context.OriginalTableName = "some_table"
|
||||||
test.S(t).ExpectEquals(context.GetOldTableName(), "_some_table_del")
|
test.S(t).ExpectEquals(context.GetOldTableName(), "_some_table_del")
|
||||||
test.S(t).ExpectEquals(context.GetGhostTableName(), "_some_table_gho")
|
test.S(t).ExpectEquals(context.GetGhostTableName(), "_some_table_gho")
|
||||||
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_some_table_ghc")
|
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_some_table_ghc")
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
context = newMigrationContext()
|
context := NewMigrationContext()
|
||||||
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890"
|
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890"
|
||||||
test.S(t).ExpectEquals(context.GetOldTableName(), "_a1234567890123456789012345678901234567890123456789012345678_del")
|
test.S(t).ExpectEquals(context.GetOldTableName(), "_a1234567890123456789012345678901234567890123456789012345678_del")
|
||||||
test.S(t).ExpectEquals(context.GetGhostTableName(), "_a1234567890123456789012345678901234567890123456789012345678_gho")
|
test.S(t).ExpectEquals(context.GetGhostTableName(), "_a1234567890123456789012345678901234567890123456789012345678_gho")
|
||||||
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_a1234567890123456789012345678901234567890123456789012345678_ghc")
|
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_a1234567890123456789012345678901234567890123456789012345678_ghc")
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
context = newMigrationContext()
|
context := NewMigrationContext()
|
||||||
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890123"
|
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890123"
|
||||||
oldTableName := context.GetOldTableName()
|
oldTableName := context.GetOldTableName()
|
||||||
test.S(t).ExpectEquals(oldTableName, "_a1234567890123456789012345678901234567890123456789012345678_del")
|
test.S(t).ExpectEquals(oldTableName, "_a1234567890123456789012345678901234567890123456789012345678_del")
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
context = newMigrationContext()
|
context := NewMigrationContext()
|
||||||
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890123"
|
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890123"
|
||||||
context.TimestampOldTable = true
|
context.TimestampOldTable = true
|
||||||
longForm := "Jan 2, 2006 at 3:04pm (MST)"
|
longForm := "Jan 2, 2006 at 3:04pm (MST)"
|
||||||
@ -48,7 +48,7 @@ func TestGetTableNames(t *testing.T) {
|
|||||||
test.S(t).ExpectEquals(oldTableName, "_a1234567890123456789012345678901234567890123_20130203195400_del")
|
test.S(t).ExpectEquals(oldTableName, "_a1234567890123456789012345678901234567890123_20130203195400_del")
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
context = newMigrationContext()
|
context := NewMigrationContext()
|
||||||
context.OriginalTableName = "foo_bar_baz"
|
context.OriginalTableName = "foo_bar_baz"
|
||||||
context.ForceTmpTableName = "tmp"
|
context.ForceTmpTableName = "tmp"
|
||||||
test.S(t).ExpectEquals(context.GetOldTableName(), "_tmp_del")
|
test.S(t).ExpectEquals(context.GetOldTableName(), "_tmp_del")
|
||||||
|
@ -37,6 +37,14 @@ func FileExists(fileName string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TouchFile(fileName string) error {
|
||||||
|
f, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
// StringContainsAll returns true if `s` contains all non empty given `substrings`
|
// StringContainsAll returns true if `s` contains all non empty given `substrings`
|
||||||
// The function returns `false` if no non-empty arguments are given.
|
// The function returns `false` if no non-empty arguments are given.
|
||||||
func StringContainsAll(s string, substrings ...string) bool {
|
func StringContainsAll(s string, substrings ...string) bool {
|
||||||
@ -55,17 +63,27 @@ func StringContainsAll(s string, substrings ...string) bool {
|
|||||||
return nonEmptyStringsFound
|
return nonEmptyStringsFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig) (string, error) {
|
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext) (string, error) {
|
||||||
query := `select @@global.port, @@global.version`
|
versionQuery := `select @@global.version`
|
||||||
var port, extraPort int
|
var port, extraPort int
|
||||||
var version string
|
var version string
|
||||||
if err := db.QueryRow(query).Scan(&port, &version); err != nil {
|
if err := db.QueryRow(versionQuery).Scan(&version); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
extraPortQuery := `select @@global.extra_port`
|
extraPortQuery := `select @@global.extra_port`
|
||||||
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil {
|
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil {
|
||||||
// swallow this error. not all servers support extra_port
|
// swallow this error. not all servers support extra_port
|
||||||
}
|
}
|
||||||
|
// AliyunRDS set users port to "NULL", replace it by gh-ost param
|
||||||
|
// GCP set users port to "NULL", replace it by gh-ost param
|
||||||
|
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform {
|
||||||
|
port = connectionConfig.Key.Port
|
||||||
|
} else {
|
||||||
|
portQuery := `select @@global.port`
|
||||||
|
if err := db.QueryRow(portQuery).Scan(&port); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
|
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
|
||||||
log.Infof("connection validated on %+v", connectionConfig.Key)
|
log.Infof("connection validated on %+v", connectionConfig.Key)
|
||||||
|
@ -7,17 +7,18 @@ package binlog
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/github/gh-ost/go/sql"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/github/gh-ost/go/sql"
|
||||||
)
|
)
|
||||||
|
|
||||||
type EventDML string
|
type EventDML string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
NotDML EventDML = "NoDML"
|
NotDML EventDML = "NoDML"
|
||||||
InsertDML = "Insert"
|
InsertDML EventDML = "Insert"
|
||||||
UpdateDML = "Update"
|
UpdateDML EventDML = "Update"
|
||||||
DeleteDML = "Delete"
|
DeleteDML EventDML = "Delete"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ToEventDML(description string) EventDML {
|
func ToEventDML(description string) EventDML {
|
||||||
|
@ -26,7 +26,7 @@ func NewBinlogEntry(logFile string, logPos uint64) *BinlogEntry {
|
|||||||
return binlogEntry
|
return binlogEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBinlogEntry creates an empty, ready to go BinlogEntry object
|
// NewBinlogEntryAt creates an empty, ready to go BinlogEntry object
|
||||||
func NewBinlogEntryAt(coordinates mysql.BinlogCoordinates) *BinlogEntry {
|
func NewBinlogEntryAt(coordinates mysql.BinlogCoordinates) *BinlogEntry {
|
||||||
binlogEntry := &BinlogEntry{
|
binlogEntry := &BinlogEntry{
|
||||||
Coordinates: coordinates,
|
Coordinates: coordinates,
|
||||||
@ -41,7 +41,7 @@ func (this *BinlogEntry) Duplicate() *BinlogEntry {
|
|||||||
return binlogEntry
|
return binlogEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
// Duplicate creates and returns a new binlog entry, with some of the attributes pre-assigned
|
// String() returns a string representation of this binlog entry
|
||||||
func (this *BinlogEntry) String() string {
|
func (this *BinlogEntry) String() string {
|
||||||
return fmt.Sprintf("[BinlogEntry at %+v; dml:%+v]", this.Coordinates, this.DmlEvent)
|
return fmt.Sprintf("[BinlogEntry at %+v; dml:%+v]", this.Coordinates, this.DmlEvent)
|
||||||
}
|
}
|
||||||
|
@ -26,28 +26,28 @@ type GoMySQLReader struct {
|
|||||||
currentCoordinates mysql.BinlogCoordinates
|
currentCoordinates mysql.BinlogCoordinates
|
||||||
currentCoordinatesMutex *sync.Mutex
|
currentCoordinatesMutex *sync.Mutex
|
||||||
LastAppliedRowsEventHint mysql.BinlogCoordinates
|
LastAppliedRowsEventHint mysql.BinlogCoordinates
|
||||||
MigrationContext *base.MigrationContext
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGoMySQLReader(connectionConfig *mysql.ConnectionConfig) (binlogReader *GoMySQLReader, err error) {
|
func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *GoMySQLReader, err error) {
|
||||||
binlogReader = &GoMySQLReader{
|
binlogReader = &GoMySQLReader{
|
||||||
connectionConfig: connectionConfig,
|
connectionConfig: migrationContext.InspectorConnectionConfig,
|
||||||
currentCoordinates: mysql.BinlogCoordinates{},
|
currentCoordinates: mysql.BinlogCoordinates{},
|
||||||
currentCoordinatesMutex: &sync.Mutex{},
|
currentCoordinatesMutex: &sync.Mutex{},
|
||||||
binlogSyncer: nil,
|
binlogSyncer: nil,
|
||||||
binlogStreamer: nil,
|
binlogStreamer: nil,
|
||||||
MigrationContext: base.GetMigrationContext(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
serverId := uint32(binlogReader.MigrationContext.ReplicaServerId)
|
serverId := uint32(migrationContext.ReplicaServerId)
|
||||||
|
|
||||||
binlogSyncerConfig := &replication.BinlogSyncerConfig{
|
binlogSyncerConfig := replication.BinlogSyncerConfig{
|
||||||
ServerID: serverId,
|
ServerID: serverId,
|
||||||
Flavor: "mysql",
|
Flavor: "mysql",
|
||||||
Host: connectionConfig.Key.Hostname,
|
Host: binlogReader.connectionConfig.Key.Hostname,
|
||||||
Port: uint16(connectionConfig.Key.Port),
|
Port: uint16(binlogReader.connectionConfig.Key.Port),
|
||||||
User: connectionConfig.User,
|
User: binlogReader.connectionConfig.User,
|
||||||
Password: connectionConfig.Password,
|
Password: binlogReader.connectionConfig.Password,
|
||||||
|
TLSConfig: binlogReader.connectionConfig.TLSConfig(),
|
||||||
|
UseDecimal: true,
|
||||||
}
|
}
|
||||||
binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
|
binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
|
||||||
|
|
||||||
@ -57,12 +57,12 @@ func NewGoMySQLReader(connectionConfig *mysql.ConnectionConfig) (binlogReader *G
|
|||||||
// ConnectBinlogStreamer
|
// ConnectBinlogStreamer
|
||||||
func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) {
|
func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) {
|
||||||
if coordinates.IsEmpty() {
|
if coordinates.IsEmpty() {
|
||||||
return log.Errorf("Emptry coordinates at ConnectBinlogStreamer()")
|
return log.Errorf("Empty coordinates at ConnectBinlogStreamer()")
|
||||||
}
|
}
|
||||||
|
|
||||||
this.currentCoordinates = coordinates
|
this.currentCoordinates = coordinates
|
||||||
log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
|
log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
|
||||||
// Start sync with sepcified binlog file and position
|
// Start sync with specified binlog file and position
|
||||||
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
|
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@ -113,8 +113,8 @@ func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEven
|
|||||||
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
|
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// The channel will do the throttling. Whoever is reding from the channel
|
// The channel will do the throttling. Whoever is reading from the channel
|
||||||
// decides whether action is taken sycnhronously (meaning we wait before
|
// decides whether action is taken synchronously (meaning we wait before
|
||||||
// next iteration) or asynchronously (we keep pushing more events)
|
// next iteration) or asynchronously (we keep pushing more events)
|
||||||
// In reality, reads will be synchronous
|
// In reality, reads will be synchronous
|
||||||
entriesChannel <- binlogEntry
|
entriesChannel <- binlogEntry
|
||||||
@ -147,7 +147,7 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
|
|||||||
defer this.currentCoordinatesMutex.Unlock()
|
defer this.currentCoordinatesMutex.Unlock()
|
||||||
this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
|
this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
|
||||||
}()
|
}()
|
||||||
log.Infof("rotate to next log name: %s", rotateEvent.NextLogName)
|
log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), rotateEvent.NextLogName)
|
||||||
} else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
|
} else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
|
||||||
if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
|
if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -160,10 +160,6 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (this *GoMySQLReader) Close() error {
|
func (this *GoMySQLReader) Close() error {
|
||||||
// Historically there was a:
|
this.binlogSyncer.Close()
|
||||||
// this.binlogSyncer.Close()
|
|
||||||
// here. A new go-mysql version closes the binlog syncer connection independently.
|
|
||||||
// I will go against the sacred rules of comments and just leave this here.
|
|
||||||
// This is the year 2017. Let's see what year these comments get deleted.
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/github/gh-ost/go/base"
|
"github.com/github/gh-ost/go/base"
|
||||||
"github.com/github/gh-ost/go/logic"
|
"github.com/github/gh-ost/go/logic"
|
||||||
|
_ "github.com/go-sql-driver/mysql"
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh/terminal"
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
@ -43,10 +44,9 @@ func acceptSignals(migrationContext *base.MigrationContext) {
|
|||||||
|
|
||||||
// main is the application's entry point. It will either spawn a CLI or HTTP interfaces.
|
// main is the application's entry point. It will either spawn a CLI or HTTP interfaces.
|
||||||
func main() {
|
func main() {
|
||||||
migrationContext := base.GetMigrationContext()
|
migrationContext := base.NewMigrationContext()
|
||||||
|
|
||||||
flag.StringVar(&migrationContext.InspectorConnectionConfig.Key.Hostname, "host", "127.0.0.1", "MySQL hostname (preferably a replica, not the master)")
|
flag.StringVar(&migrationContext.InspectorConnectionConfig.Key.Hostname, "host", "127.0.0.1", "MySQL hostname (preferably a replica, not the master)")
|
||||||
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unabel to determine the master")
|
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unable to determine the master")
|
||||||
flag.IntVar(&migrationContext.InspectorConnectionConfig.Key.Port, "port", 3306, "MySQL port (preferably a replica, not the master)")
|
flag.IntVar(&migrationContext.InspectorConnectionConfig.Key.Port, "port", 3306, "MySQL port (preferably a replica, not the master)")
|
||||||
flag.StringVar(&migrationContext.CliUser, "user", "", "MySQL user")
|
flag.StringVar(&migrationContext.CliUser, "user", "", "MySQL user")
|
||||||
flag.StringVar(&migrationContext.CliPassword, "password", "", "MySQL password")
|
flag.StringVar(&migrationContext.CliPassword, "password", "", "MySQL password")
|
||||||
@ -55,6 +55,12 @@ func main() {
|
|||||||
flag.StringVar(&migrationContext.ConfigFile, "conf", "", "Config file")
|
flag.StringVar(&migrationContext.ConfigFile, "conf", "", "Config file")
|
||||||
askPass := flag.Bool("ask-pass", false, "prompt for MySQL password")
|
askPass := flag.Bool("ask-pass", false, "prompt for MySQL password")
|
||||||
|
|
||||||
|
flag.BoolVar(&migrationContext.UseTLS, "ssl", false, "Enable SSL encrypted connections to MySQL hosts")
|
||||||
|
flag.StringVar(&migrationContext.TLSCACertificate, "ssl-ca", "", "CA certificate in PEM format for TLS connections to MySQL hosts. Requires --ssl")
|
||||||
|
flag.StringVar(&migrationContext.TLSCertificate, "ssl-cert", "", "Certificate in PEM format for TLS connections to MySQL hosts. Requires --ssl")
|
||||||
|
flag.StringVar(&migrationContext.TLSKey, "ssl-key", "", "Key in PEM format for TLS connections to MySQL hosts. Requires --ssl")
|
||||||
|
flag.BoolVar(&migrationContext.TLSAllowInsecure, "ssl-allow-insecure", false, "Skips verification of MySQL hosts' certificate chain and host name. Requires --ssl")
|
||||||
|
|
||||||
flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)")
|
flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)")
|
||||||
flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)")
|
flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)")
|
||||||
flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)")
|
flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)")
|
||||||
@ -68,6 +74,9 @@ func main() {
|
|||||||
flag.BoolVar(&migrationContext.IsTungsten, "tungsten", false, "explicitly let gh-ost know that you are running on a tungsten-replication based topology (you are likely to also provide --assume-master-host)")
|
flag.BoolVar(&migrationContext.IsTungsten, "tungsten", false, "explicitly let gh-ost know that you are running on a tungsten-replication based topology (you are likely to also provide --assume-master-host)")
|
||||||
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
|
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
|
||||||
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
|
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
|
||||||
|
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
|
||||||
|
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
|
||||||
|
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
|
||||||
|
|
||||||
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
|
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
|
||||||
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
|
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
|
||||||
@ -80,9 +89,12 @@ func main() {
|
|||||||
flag.BoolVar(&migrationContext.TimestampOldTable, "timestamp-old-table", false, "Use a timestamp in old table name. This makes old table names unique and non conflicting cross migrations")
|
flag.BoolVar(&migrationContext.TimestampOldTable, "timestamp-old-table", false, "Use a timestamp in old table name. This makes old table names unique and non conflicting cross migrations")
|
||||||
cutOver := flag.String("cut-over", "atomic", "choose cut-over type (default|atomic, two-step)")
|
cutOver := flag.String("cut-over", "atomic", "choose cut-over type (default|atomic, two-step)")
|
||||||
flag.BoolVar(&migrationContext.ForceNamedCutOverCommand, "force-named-cut-over", false, "When true, the 'unpostpone|cut-over' interactive command must name the migrated table")
|
flag.BoolVar(&migrationContext.ForceNamedCutOverCommand, "force-named-cut-over", false, "When true, the 'unpostpone|cut-over' interactive command must name the migrated table")
|
||||||
|
flag.BoolVar(&migrationContext.ForceNamedPanicCommand, "force-named-panic", false, "When true, the 'panic' interactive command must name the migrated table")
|
||||||
|
|
||||||
flag.BoolVar(&migrationContext.SwitchToRowBinlogFormat, "switch-to-rbr", false, "let this tool automatically switch binary log format to 'ROW' on the replica, if needed. The format will NOT be switched back. I'm too scared to do that, and wish to protect you if you happen to execute another migration while this one is running")
|
flag.BoolVar(&migrationContext.SwitchToRowBinlogFormat, "switch-to-rbr", false, "let this tool automatically switch binary log format to 'ROW' on the replica, if needed. The format will NOT be switched back. I'm too scared to do that, and wish to protect you if you happen to execute another migration while this one is running")
|
||||||
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
|
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
|
||||||
|
flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').")
|
||||||
|
exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.")
|
||||||
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
|
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
|
||||||
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
|
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
|
||||||
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
|
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
|
||||||
@ -106,6 +118,8 @@ func main() {
|
|||||||
|
|
||||||
flag.StringVar(&migrationContext.HooksPath, "hooks-path", "", "directory where hook files are found (default: empty, ie. hooks disabled). Hook files found on this path, and conforming to hook naming conventions will be executed")
|
flag.StringVar(&migrationContext.HooksPath, "hooks-path", "", "directory where hook files are found (default: empty, ie. hooks disabled). Hook files found on this path, and conforming to hook naming conventions will be executed")
|
||||||
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
|
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
|
||||||
|
flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience")
|
||||||
|
flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience")
|
||||||
|
|
||||||
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
|
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
|
||||||
|
|
||||||
@ -121,6 +135,7 @@ func main() {
|
|||||||
version := flag.Bool("version", false, "Print version & exit")
|
version := flag.Bool("version", false, "Print version & exit")
|
||||||
checkFlag := flag.Bool("check-flag", false, "Check if another flag exists/supported. This allows for cross-version scripting. Exits with 0 when all additional provided flags exist, nonzero otherwise. You must provide (dummy) values for flags that require a value. Example: gh-ost --check-flag --cut-over-lock-timeout-seconds --nice-ratio 0")
|
checkFlag := flag.Bool("check-flag", false, "Check if another flag exists/supported. This allows for cross-version scripting. Exits with 0 when all additional provided flags exist, nonzero otherwise. You must provide (dummy) values for flags that require a value. Example: gh-ost --check-flag --cut-over-lock-timeout-seconds --nice-ratio 0")
|
||||||
flag.StringVar(&migrationContext.ForceTmpTableName, "force-table-names", "", "table name prefix to be used on the temporary tables")
|
flag.StringVar(&migrationContext.ForceTmpTableName, "force-table-names", "", "table name prefix to be used on the temporary tables")
|
||||||
|
flag.CommandLine.SetOutput(os.Stdout)
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
@ -128,7 +143,7 @@ func main() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if *help {
|
if *help {
|
||||||
fmt.Fprintf(os.Stderr, "Usage of gh-ost:\n")
|
fmt.Fprintf(os.Stdout, "Usage of gh-ost:\n")
|
||||||
flag.PrintDefaults()
|
flag.PrintDefaults()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -190,6 +205,18 @@ func main() {
|
|||||||
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
|
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
|
||||||
log.Fatalf("--master-password requires --assume-master-host")
|
log.Fatalf("--master-password requires --assume-master-host")
|
||||||
}
|
}
|
||||||
|
if migrationContext.TLSCACertificate != "" && !migrationContext.UseTLS {
|
||||||
|
log.Fatalf("--ssl-ca requires --ssl")
|
||||||
|
}
|
||||||
|
if migrationContext.TLSCertificate != "" && !migrationContext.UseTLS {
|
||||||
|
log.Fatalf("--ssl-cert requires --ssl")
|
||||||
|
}
|
||||||
|
if migrationContext.TLSKey != "" && !migrationContext.UseTLS {
|
||||||
|
log.Fatalf("--ssl-key requires --ssl")
|
||||||
|
}
|
||||||
|
if migrationContext.TLSAllowInsecure && !migrationContext.UseTLS {
|
||||||
|
log.Fatalf("--ssl-allow-insecure requires --ssl")
|
||||||
|
}
|
||||||
if *replicationLagQuery != "" {
|
if *replicationLagQuery != "" {
|
||||||
log.Warningf("--replication-lag-query is deprecated")
|
log.Warningf("--replication-lag-query is deprecated")
|
||||||
}
|
}
|
||||||
@ -234,14 +261,20 @@ func main() {
|
|||||||
migrationContext.SetThrottleHTTP(*throttleHTTP)
|
migrationContext.SetThrottleHTTP(*throttleHTTP)
|
||||||
migrationContext.SetDefaultNumRetries(*defaultRetries)
|
migrationContext.SetDefaultNumRetries(*defaultRetries)
|
||||||
migrationContext.ApplyCredentials()
|
migrationContext.ApplyCredentials()
|
||||||
|
if err := migrationContext.SetupTLS(); err != nil {
|
||||||
|
log.Fatale(err)
|
||||||
|
}
|
||||||
if err := migrationContext.SetCutOverLockTimeoutSeconds(*cutOverLockTimeoutSeconds); err != nil {
|
if err := migrationContext.SetCutOverLockTimeoutSeconds(*cutOverLockTimeoutSeconds); err != nil {
|
||||||
log.Errore(err)
|
log.Errore(err)
|
||||||
}
|
}
|
||||||
|
if err := migrationContext.SetExponentialBackoffMaxInterval(*exponentialBackoffMaxInterval); err != nil {
|
||||||
|
log.Errore(err)
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof("starting gh-ost %+v", AppVersion)
|
log.Infof("starting gh-ost %+v", AppVersion)
|
||||||
acceptSignals(migrationContext)
|
acceptSignals(migrationContext)
|
||||||
|
|
||||||
migrator := logic.NewMigrator()
|
migrator := logic.NewMigrator(migrationContext)
|
||||||
err := migrator.Migrate()
|
err := migrator.Migrate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
migrator.ExecOnFailureHook()
|
migrator.ExecOnFailureHook()
|
||||||
|
@ -24,50 +24,77 @@ const (
|
|||||||
atomicCutOverMagicHint = "ghost-cut-over-sentry"
|
atomicCutOverMagicHint = "ghost-cut-over-sentry"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type dmlBuildResult struct {
|
||||||
|
query string
|
||||||
|
args []interface{}
|
||||||
|
rowsDelta int64
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDmlBuildResult(query string, args []interface{}, rowsDelta int64, err error) *dmlBuildResult {
|
||||||
|
return &dmlBuildResult{
|
||||||
|
query: query,
|
||||||
|
args: args,
|
||||||
|
rowsDelta: rowsDelta,
|
||||||
|
err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDmlBuildResultError(err error) *dmlBuildResult {
|
||||||
|
return &dmlBuildResult{
|
||||||
|
err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Applier connects and writes the the applier-server, which is the server where migration
|
// Applier connects and writes the the applier-server, which is the server where migration
|
||||||
// happens. This is typically the master, but could be a replica when `--test-on-replica` or
|
// happens. This is typically the master, but could be a replica when `--test-on-replica` or
|
||||||
// `--execute-on-replica` are given.
|
// `--execute-on-replica` are given.
|
||||||
// Applier is the one to actually write row data and apply binlog events onto the ghost table.
|
// Applier is the one to actually write row data and apply binlog events onto the ghost table.
|
||||||
// It is where the ghost & changelog tables get created. It is where the cut-over phase happens.
|
// It is where the ghost & changelog tables get created. It is where the cut-over phase happens.
|
||||||
type Applier struct {
|
type Applier struct {
|
||||||
connectionConfig *mysql.ConnectionConfig
|
connectionConfig *mysql.ConnectionConfig
|
||||||
db *gosql.DB
|
db *gosql.DB
|
||||||
singletonDB *gosql.DB
|
singletonDB *gosql.DB
|
||||||
migrationContext *base.MigrationContext
|
migrationContext *base.MigrationContext
|
||||||
|
finishedMigrating int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewApplier() *Applier {
|
func NewApplier(migrationContext *base.MigrationContext) *Applier {
|
||||||
return &Applier{
|
return &Applier{
|
||||||
connectionConfig: base.GetMigrationContext().ApplierConnectionConfig,
|
connectionConfig: migrationContext.ApplierConnectionConfig,
|
||||||
migrationContext: base.GetMigrationContext(),
|
migrationContext: migrationContext,
|
||||||
|
finishedMigrating: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *Applier) InitDBConnections() (err error) {
|
func (this *Applier) InitDBConnections() (err error) {
|
||||||
|
|
||||||
applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
||||||
if this.db, _, err = sqlutils.GetDB(applierUri); err != nil {
|
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
singletonApplierUri := fmt.Sprintf("%s?timeout=0", applierUri)
|
singletonApplierUri := fmt.Sprintf("%s&timeout=0", applierUri)
|
||||||
if this.singletonDB, _, err = sqlutils.GetDB(singletonApplierUri); err != nil {
|
if this.singletonDB, _, err = mysql.GetDB(this.migrationContext.Uuid, singletonApplierUri); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.singletonDB.SetMaxOpenConns(1)
|
this.singletonDB.SetMaxOpenConns(1)
|
||||||
version, err := base.ValidateConnection(this.db, this.connectionConfig)
|
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig); err != nil {
|
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.ApplierMySQLVersion = version
|
this.migrationContext.ApplierMySQLVersion = version
|
||||||
if err := this.validateAndReadTimeZone(); err != nil {
|
if err := this.validateAndReadTimeZone(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
|
||||||
return err
|
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
||||||
} else {
|
return err
|
||||||
this.connectionConfig.ImpliedKey = impliedKey
|
} else {
|
||||||
|
this.connectionConfig.ImpliedKey = impliedKey
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err := this.readTableColumns(); err != nil {
|
if err := this.readTableColumns(); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -90,7 +117,7 @@ func (this *Applier) validateAndReadTimeZone() error {
|
|||||||
// readTableColumns reads table columns on applier
|
// readTableColumns reads table columns on applier
|
||||||
func (this *Applier) readTableColumns() (err error) {
|
func (this *Applier) readTableColumns() (err error) {
|
||||||
log.Infof("Examining table structure on applier")
|
log.Infof("Examining table structure on applier")
|
||||||
this.migrationContext.OriginalTableColumnsOnApplier, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName)
|
this.migrationContext.OriginalTableColumnsOnApplier, _, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -99,7 +126,6 @@ func (this *Applier) readTableColumns() (err error) {
|
|||||||
|
|
||||||
// showTableStatus returns the output of `show table status like '...'` command
|
// showTableStatus returns the output of `show table status like '...'` command
|
||||||
func (this *Applier) showTableStatus(tableName string) (rowMap sqlutils.RowMap) {
|
func (this *Applier) showTableStatus(tableName string) (rowMap sqlutils.RowMap) {
|
||||||
rowMap = nil
|
|
||||||
query := fmt.Sprintf(`show /* gh-ost */ table status from %s like '%s'`, sql.EscapeName(this.migrationContext.DatabaseName), tableName)
|
query := fmt.Sprintf(`show /* gh-ost */ table status from %s like '%s'`, sql.EscapeName(this.migrationContext.DatabaseName), tableName)
|
||||||
sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
||||||
rowMap = m
|
rowMap = m
|
||||||
@ -213,7 +239,7 @@ func (this *Applier) dropTable(tableName string) error {
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(tableName),
|
sql.EscapeName(tableName),
|
||||||
)
|
)
|
||||||
log.Infof("Droppping table %s.%s",
|
log.Infof("Dropping table %s.%s",
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(tableName),
|
sql.EscapeName(tableName),
|
||||||
)
|
)
|
||||||
@ -263,7 +289,7 @@ func (this *Applier) WriteChangelog(hint, value string) (string, error) {
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
||||||
)
|
)
|
||||||
_, err := sqlutils.Exec(this.db, query, explicitId, hint, value)
|
_, err := sqlutils.ExecNoPrepare(this.db, query, explicitId, hint, value)
|
||||||
return hint, err
|
return hint, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -298,6 +324,9 @@ func (this *Applier) InitiateHeartbeat() {
|
|||||||
|
|
||||||
heartbeatTick := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
heartbeatTick := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||||
for range heartbeatTick {
|
for range heartbeatTick {
|
||||||
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
// Generally speaking, we would issue a goroutine, but I'd actually rather
|
// Generally speaking, we would issue a goroutine, but I'd actually rather
|
||||||
// have this block the loop rather than spam the master in the event something
|
// have this block the loop rather than spam the master in the event something
|
||||||
// goes wrong
|
// goes wrong
|
||||||
@ -380,7 +409,7 @@ func (this *Applier) ReadMigrationRangeValues() error {
|
|||||||
// CalculateNextIterationRangeEndValues reads the next-iteration-range-end unique key values,
|
// CalculateNextIterationRangeEndValues reads the next-iteration-range-end unique key values,
|
||||||
// which will be used for copying the next chunk of rows. Ir returns "false" if there is
|
// which will be used for copying the next chunk of rows. Ir returns "false" if there is
|
||||||
// no further chunk to work through, i.e. we're past the last chunk and are done with
|
// no further chunk to work through, i.e. we're past the last chunk and are done with
|
||||||
// itrating the range (and this done with copying row chunks)
|
// iterating the range (and this done with copying row chunks)
|
||||||
func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange bool, err error) {
|
func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange bool, err error) {
|
||||||
this.migrationContext.MigrationIterationRangeMinValues = this.migrationContext.MigrationIterationRangeMaxValues
|
this.migrationContext.MigrationIterationRangeMinValues = this.migrationContext.MigrationIterationRangeMaxValues
|
||||||
if this.migrationContext.MigrationIterationRangeMinValues == nil {
|
if this.migrationContext.MigrationIterationRangeMinValues == nil {
|
||||||
@ -452,10 +481,14 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sessionQuery := fmt.Sprintf(`SET
|
defer tx.Rollback()
|
||||||
SESSION time_zone = '%s',
|
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
|
||||||
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
|
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
|
||||||
`, this.migrationContext.ApplierTimeZone)
|
if !this.migrationContext.SkipStrictMode {
|
||||||
|
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
|
||||||
|
}
|
||||||
|
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
|
||||||
|
|
||||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
if _, err := tx.Exec(sessionQuery); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -899,81 +932,52 @@ func (this *Applier) ShowStatusVariable(variableName string) (result int64, err
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// updateModifiesUniqueKeyColumns checks whether a UPDATE DML event actually
|
||||||
|
// modifies values of the migration's unique key (the iterated key). This will call
|
||||||
|
// for special handling.
|
||||||
|
func (this *Applier) updateModifiesUniqueKeyColumns(dmlEvent *binlog.BinlogDMLEvent) (modifiedColumn string, isModified bool) {
|
||||||
|
for _, column := range this.migrationContext.UniqueKey.Columns.Columns() {
|
||||||
|
tableOrdinal := this.migrationContext.OriginalTableColumns.Ordinals[column.Name]
|
||||||
|
whereColumnValue := dmlEvent.WhereColumnValues.AbstractValues()[tableOrdinal]
|
||||||
|
newColumnValue := dmlEvent.NewColumnValues.AbstractValues()[tableOrdinal]
|
||||||
|
if newColumnValue != whereColumnValue {
|
||||||
|
return column.Name, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
// buildDMLEventQuery creates a query to operate on the ghost table, based on an intercepted binlog
|
// buildDMLEventQuery creates a query to operate on the ghost table, based on an intercepted binlog
|
||||||
// event entry on the original table.
|
// event entry on the original table.
|
||||||
func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (query string, args []interface{}, rowsDelta int64, err error) {
|
func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (results [](*dmlBuildResult)) {
|
||||||
switch dmlEvent.DML {
|
switch dmlEvent.DML {
|
||||||
case binlog.DeleteDML:
|
case binlog.DeleteDML:
|
||||||
{
|
{
|
||||||
query, uniqueKeyArgs, err := sql.BuildDMLDeleteQuery(dmlEvent.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.OriginalTableColumns, &this.migrationContext.UniqueKey.Columns, dmlEvent.WhereColumnValues.AbstractValues())
|
query, uniqueKeyArgs, err := sql.BuildDMLDeleteQuery(dmlEvent.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.OriginalTableColumns, &this.migrationContext.UniqueKey.Columns, dmlEvent.WhereColumnValues.AbstractValues())
|
||||||
return query, uniqueKeyArgs, -1, err
|
return append(results, newDmlBuildResult(query, uniqueKeyArgs, -1, err))
|
||||||
}
|
}
|
||||||
case binlog.InsertDML:
|
case binlog.InsertDML:
|
||||||
{
|
{
|
||||||
query, sharedArgs, err := sql.BuildDMLInsertQuery(dmlEvent.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns, dmlEvent.NewColumnValues.AbstractValues())
|
query, sharedArgs, err := sql.BuildDMLInsertQuery(dmlEvent.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns, dmlEvent.NewColumnValues.AbstractValues())
|
||||||
return query, sharedArgs, 1, err
|
return append(results, newDmlBuildResult(query, sharedArgs, 1, err))
|
||||||
}
|
}
|
||||||
case binlog.UpdateDML:
|
case binlog.UpdateDML:
|
||||||
{
|
{
|
||||||
|
if _, isModified := this.updateModifiesUniqueKeyColumns(dmlEvent); isModified {
|
||||||
|
dmlEvent.DML = binlog.DeleteDML
|
||||||
|
results = append(results, this.buildDMLEventQuery(dmlEvent)...)
|
||||||
|
dmlEvent.DML = binlog.InsertDML
|
||||||
|
results = append(results, this.buildDMLEventQuery(dmlEvent)...)
|
||||||
|
return results
|
||||||
|
}
|
||||||
query, sharedArgs, uniqueKeyArgs, err := sql.BuildDMLUpdateQuery(dmlEvent.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns, &this.migrationContext.UniqueKey.Columns, dmlEvent.NewColumnValues.AbstractValues(), dmlEvent.WhereColumnValues.AbstractValues())
|
query, sharedArgs, uniqueKeyArgs, err := sql.BuildDMLUpdateQuery(dmlEvent.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns, &this.migrationContext.UniqueKey.Columns, dmlEvent.NewColumnValues.AbstractValues(), dmlEvent.WhereColumnValues.AbstractValues())
|
||||||
|
args := sqlutils.Args()
|
||||||
args = append(args, sharedArgs...)
|
args = append(args, sharedArgs...)
|
||||||
args = append(args, uniqueKeyArgs...)
|
args = append(args, uniqueKeyArgs...)
|
||||||
return query, args, 0, err
|
return append(results, newDmlBuildResult(query, args, 0, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", args, 0, fmt.Errorf("Unknown dml event type: %+v", dmlEvent.DML)
|
return append(results, newDmlBuildResultError(fmt.Errorf("Unknown dml event type: %+v", dmlEvent.DML)))
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyDMLEventQuery writes an entry to the ghost table, in response to an intercepted
|
|
||||||
// original-table binlog event
|
|
||||||
func (this *Applier) ApplyDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) error {
|
|
||||||
query, args, rowDelta, err := this.buildDMLEventQuery(dmlEvent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// TODO The below is in preparation for transactional writes on the ghost tables.
|
|
||||||
// Such writes would be, for example:
|
|
||||||
// - prepended with sql_mode setup
|
|
||||||
// - prepended with time zone setup
|
|
||||||
// - prepended with SET SQL_LOG_BIN=0
|
|
||||||
// - prepended with SET FK_CHECKS=0
|
|
||||||
// etc.
|
|
||||||
//
|
|
||||||
// a known problem: https://github.com/golang/go/issues/9373 -- bitint unsigned values, not supported in database/sql
|
|
||||||
// is solved by silently converting unsigned bigints to string values.
|
|
||||||
//
|
|
||||||
|
|
||||||
err = func() error {
|
|
||||||
tx, err := this.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sessionQuery := `SET
|
|
||||||
SESSION time_zone = '+00:00',
|
|
||||||
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
|
|
||||||
`
|
|
||||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := tx.Exec(query, args...); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), query, args)
|
|
||||||
return log.Errore(err)
|
|
||||||
}
|
|
||||||
// no error
|
|
||||||
atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, 1)
|
|
||||||
if this.migrationContext.CountTableRows {
|
|
||||||
atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, rowDelta)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table
|
// ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table
|
||||||
@ -992,23 +996,28 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sessionQuery := `SET
|
sessionQuery := "SET SESSION time_zone = '+00:00'"
|
||||||
SESSION time_zone = '+00:00',
|
|
||||||
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
|
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
|
||||||
`
|
if !this.migrationContext.SkipStrictMode {
|
||||||
|
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
|
||||||
|
}
|
||||||
|
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
|
||||||
|
|
||||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
if _, err := tx.Exec(sessionQuery); err != nil {
|
||||||
return rollback(err)
|
return rollback(err)
|
||||||
}
|
}
|
||||||
for _, dmlEvent := range dmlEvents {
|
for _, dmlEvent := range dmlEvents {
|
||||||
query, args, rowDelta, err := this.buildDMLEventQuery(dmlEvent)
|
for _, buildResult := range this.buildDMLEventQuery(dmlEvent) {
|
||||||
if err != nil {
|
if buildResult.err != nil {
|
||||||
return rollback(err)
|
return rollback(buildResult.err)
|
||||||
|
}
|
||||||
|
if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil {
|
||||||
|
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args)
|
||||||
|
return rollback(err)
|
||||||
|
}
|
||||||
|
totalDelta += buildResult.rowsDelta
|
||||||
}
|
}
|
||||||
if _, err := tx.Exec(query, args...); err != nil {
|
|
||||||
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), query, args)
|
|
||||||
return rollback(err)
|
|
||||||
}
|
|
||||||
totalDelta += rowDelta
|
|
||||||
}
|
}
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1027,3 +1036,10 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
|
|||||||
log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents))
|
log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *Applier) Teardown() {
|
||||||
|
log.Debugf("Tearing down...")
|
||||||
|
this.db.Close()
|
||||||
|
this.singletonDB.Close()
|
||||||
|
atomic.StoreInt64(&this.finishedMigrating, 1)
|
||||||
|
}
|
||||||
|
@ -37,9 +37,9 @@ type HooksExecutor struct {
|
|||||||
migrationContext *base.MigrationContext
|
migrationContext *base.MigrationContext
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHooksExecutor() *HooksExecutor {
|
func NewHooksExecutor(migrationContext *base.MigrationContext) *HooksExecutor {
|
||||||
return &HooksExecutor{
|
return &HooksExecutor{
|
||||||
migrationContext: base.GetMigrationContext(),
|
migrationContext: migrationContext,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,7 +63,12 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
|
|||||||
env = append(env, fmt.Sprintf("GH_OST_MIGRATED_HOST=%s", this.migrationContext.GetApplierHostname()))
|
env = append(env, fmt.Sprintf("GH_OST_MIGRATED_HOST=%s", this.migrationContext.GetApplierHostname()))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
|
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
|
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
|
||||||
|
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
|
||||||
|
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
|
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
|
||||||
|
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
|
||||||
|
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
|
||||||
|
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
|
||||||
|
|
||||||
for _, variable := range extraVariables {
|
for _, variable := range extraVariables {
|
||||||
env = append(env, variable)
|
env = append(env, variable)
|
||||||
|
@ -26,30 +26,39 @@ const startSlavePostWaitMilliseconds = 500 * time.Millisecond
|
|||||||
// Inspector reads data from the read-MySQL-server (typically a replica, but can be the master)
|
// Inspector reads data from the read-MySQL-server (typically a replica, but can be the master)
|
||||||
// It is used for gaining initial status and structure, and later also follow up on progress and changelog
|
// It is used for gaining initial status and structure, and later also follow up on progress and changelog
|
||||||
type Inspector struct {
|
type Inspector struct {
|
||||||
connectionConfig *mysql.ConnectionConfig
|
connectionConfig *mysql.ConnectionConfig
|
||||||
db *gosql.DB
|
db *gosql.DB
|
||||||
migrationContext *base.MigrationContext
|
informationSchemaDb *gosql.DB
|
||||||
|
migrationContext *base.MigrationContext
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInspector() *Inspector {
|
func NewInspector(migrationContext *base.MigrationContext) *Inspector {
|
||||||
return &Inspector{
|
return &Inspector{
|
||||||
connectionConfig: base.GetMigrationContext().InspectorConnectionConfig,
|
connectionConfig: migrationContext.InspectorConnectionConfig,
|
||||||
migrationContext: base.GetMigrationContext(),
|
migrationContext: migrationContext,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *Inspector) InitDBConnections() (err error) {
|
func (this *Inspector) InitDBConnections() (err error) {
|
||||||
inspectorUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
inspectorUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
||||||
if this.db, _, err = sqlutils.GetDB(inspectorUri); err != nil {
|
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, inspectorUri); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
informationSchemaUri := this.connectionConfig.GetDBUri("information_schema")
|
||||||
|
if this.informationSchemaDb, _, err = mysql.GetDB(this.migrationContext.Uuid, informationSchemaUri); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if err := this.validateConnection(); err != nil {
|
if err := this.validateConnection(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
|
||||||
return err
|
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
||||||
} else {
|
return err
|
||||||
this.connectionConfig.ImpliedKey = impliedKey
|
} else {
|
||||||
|
this.connectionConfig.ImpliedKey = impliedKey
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err := this.validateGrants(); err != nil {
|
if err := this.validateGrants(); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -80,24 +89,24 @@ func (this *Inspector) ValidateOriginalTable() (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) {
|
func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, virtualColumns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) {
|
||||||
uniqueKeys, err = this.getCandidateUniqueKeys(tableName)
|
uniqueKeys, err = this.getCandidateUniqueKeys(tableName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return columns, uniqueKeys, err
|
return columns, virtualColumns, uniqueKeys, err
|
||||||
}
|
}
|
||||||
if len(uniqueKeys) == 0 {
|
if len(uniqueKeys) == 0 {
|
||||||
return columns, uniqueKeys, fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out")
|
return columns, virtualColumns, uniqueKeys, fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out")
|
||||||
}
|
}
|
||||||
columns, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, tableName)
|
columns, virtualColumns, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, tableName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return columns, uniqueKeys, err
|
return columns, virtualColumns, uniqueKeys, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return columns, uniqueKeys, nil
|
return columns, virtualColumns, uniqueKeys, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *Inspector) InspectOriginalTable() (err error) {
|
func (this *Inspector) InspectOriginalTable() (err error) {
|
||||||
this.migrationContext.OriginalTableColumns, this.migrationContext.OriginalTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.OriginalTableName)
|
this.migrationContext.OriginalTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.OriginalTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.OriginalTableName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -113,7 +122,7 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
|||||||
return fmt.Errorf("It seems like table structure is not identical between master and replica. This scenario is not supported.")
|
return fmt.Errorf("It seems like table structure is not identical between master and replica. This scenario is not supported.")
|
||||||
}
|
}
|
||||||
|
|
||||||
this.migrationContext.GhostTableColumns, this.migrationContext.GhostTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.GetGhostTableName())
|
this.migrationContext.GhostTableColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.GhostTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.GetGhostTableName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -156,21 +165,15 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
|||||||
return fmt.Errorf("Chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", this.migrationContext.UniqueKey)
|
return fmt.Errorf("Chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", this.migrationContext.UniqueKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !this.migrationContext.UniqueKey.IsPrimary() {
|
|
||||||
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
|
|
||||||
return fmt.Errorf("binlog_row_image is '%s' and chosen key is %s, which is not the primary key. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.migrationContext.OriginalBinlogRowImage, this.migrationContext.UniqueKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.ColumnRenameMap)
|
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.ColumnRenameMap)
|
||||||
log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
|
log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
|
||||||
// By fact that a non-empty unique key exists we also know the shared columns are non-empty
|
// By fact that a non-empty unique key exists we also know the shared columns are non-empty
|
||||||
|
|
||||||
// This additional step looks at which columns are unsigned. We could have merged this within
|
// This additional step looks at which columns are unsigned. We could have merged this within
|
||||||
// the `getTableColumns()` function, but it's a later patch and introduces some complexity; I feel
|
// the `getTableColumns()` function, but it's a later patch and introduces some complexity; I feel
|
||||||
// comfortable in doing this as a separate step.
|
// comfortable in doing this as a separate step.
|
||||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns)
|
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, &this.migrationContext.UniqueKey.Columns)
|
||||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &this.migrationContext.UniqueKey.Columns)
|
|
||||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.GhostTableColumns, this.migrationContext.MappedSharedColumns)
|
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.GhostTableColumns, this.migrationContext.MappedSharedColumns)
|
||||||
|
|
||||||
for i := range this.migrationContext.SharedColumns.Columns() {
|
for i := range this.migrationContext.SharedColumns.Columns() {
|
||||||
@ -196,13 +199,13 @@ func (this *Inspector) validateConnection() error {
|
|||||||
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
|
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
|
||||||
}
|
}
|
||||||
|
|
||||||
version, err := base.ValidateConnection(this.db, this.connectionConfig)
|
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
|
||||||
this.migrationContext.InspectorMySQLVersion = version
|
this.migrationContext.InspectorMySQLVersion = version
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateGrants verifies the user by which we're executing has necessary grants
|
// validateGrants verifies the user by which we're executing has necessary grants
|
||||||
// to do its thang.
|
// to do its thing.
|
||||||
func (this *Inspector) validateGrants() error {
|
func (this *Inspector) validateGrants() error {
|
||||||
query := `show /* gh-ost */ grants for current_user()`
|
query := `show /* gh-ost */ grants for current_user()`
|
||||||
foundAll := false
|
foundAll := false
|
||||||
@ -229,6 +232,9 @@ func (this *Inspector) validateGrants() error {
|
|||||||
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", this.migrationContext.DatabaseName)) {
|
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", this.migrationContext.DatabaseName)) {
|
||||||
foundDBAll = true
|
foundDBAll = true
|
||||||
}
|
}
|
||||||
|
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", strings.Replace(this.migrationContext.DatabaseName, "_", "\\_", -1))) {
|
||||||
|
foundDBAll = true
|
||||||
|
}
|
||||||
if base.StringContainsAll(grant, `ALTER`, `CREATE`, `DELETE`, `DROP`, `INDEX`, `INSERT`, `LOCK TABLES`, `SELECT`, `TRIGGER`, `UPDATE`, ` ON *.*`) {
|
if base.StringContainsAll(grant, `ALTER`, `CREATE`, `DELETE`, `DROP`, `INDEX`, `INSERT`, `LOCK TABLES`, `SELECT`, `TRIGGER`, `UPDATE`, ` ON *.*`) {
|
||||||
foundDBAll = true
|
foundDBAll = true
|
||||||
}
|
}
|
||||||
@ -261,7 +267,7 @@ func (this *Inspector) validateGrants() error {
|
|||||||
|
|
||||||
// restartReplication is required so that we are _certain_ the binlog format and
|
// restartReplication is required so that we are _certain_ the binlog format and
|
||||||
// row image settings have actually been applied to the replication thread.
|
// row image settings have actually been applied to the replication thread.
|
||||||
// It is entriely possible, for example, that the replication is using 'STATEMENT'
|
// It is entirely possible, for example, that the replication is using 'STATEMENT'
|
||||||
// binlog format even as the variable says 'ROW'
|
// binlog format even as the variable says 'ROW'
|
||||||
func (this *Inspector) restartReplication() error {
|
func (this *Inspector) restartReplication() error {
|
||||||
log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
@ -349,6 +355,9 @@ func (this *Inspector) validateBinlogs() error {
|
|||||||
this.migrationContext.OriginalBinlogRowImage = "FULL"
|
this.migrationContext.OriginalBinlogRowImage = "FULL"
|
||||||
}
|
}
|
||||||
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
|
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
|
||||||
|
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
|
||||||
|
return fmt.Errorf("%s:%d has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogRowImage)
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
return nil
|
return nil
|
||||||
@ -377,7 +386,7 @@ func (this *Inspector) validateLogSlaveUpdates() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if this.migrationContext.InspectorIsAlsoApplier() {
|
if this.migrationContext.InspectorIsAlsoApplier() {
|
||||||
log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -545,44 +554,35 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
|
|||||||
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
||||||
columnName := m.GetString("COLUMN_NAME")
|
columnName := m.GetString("COLUMN_NAME")
|
||||||
columnType := m.GetString("COLUMN_TYPE")
|
columnType := m.GetString("COLUMN_TYPE")
|
||||||
if strings.Contains(columnType, "unsigned") {
|
for _, columnsList := range columnsLists {
|
||||||
for _, columnsList := range columnsLists {
|
column := columnsList.GetColumn(columnName)
|
||||||
columnsList.SetUnsigned(columnName)
|
if column == nil {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if strings.Contains(columnType, "mediumint") {
|
if strings.Contains(columnType, "unsigned") {
|
||||||
for _, columnsList := range columnsLists {
|
column.IsUnsigned = true
|
||||||
columnsList.GetColumn(columnName).Type = sql.MediumIntColumnType
|
|
||||||
}
|
}
|
||||||
}
|
if strings.Contains(columnType, "mediumint") {
|
||||||
if strings.Contains(columnType, "timestamp") {
|
column.Type = sql.MediumIntColumnType
|
||||||
for _, columnsList := range columnsLists {
|
|
||||||
columnsList.GetColumn(columnName).Type = sql.TimestampColumnType
|
|
||||||
}
|
}
|
||||||
}
|
if strings.Contains(columnType, "timestamp") {
|
||||||
if strings.Contains(columnType, "datetime") {
|
column.Type = sql.TimestampColumnType
|
||||||
for _, columnsList := range columnsLists {
|
|
||||||
columnsList.GetColumn(columnName).Type = sql.DateTimeColumnType
|
|
||||||
}
|
}
|
||||||
}
|
if strings.Contains(columnType, "datetime") {
|
||||||
if strings.Contains(columnType, "json") {
|
column.Type = sql.DateTimeColumnType
|
||||||
for _, columnsList := range columnsLists {
|
|
||||||
columnsList.GetColumn(columnName).Type = sql.JSONColumnType
|
|
||||||
}
|
}
|
||||||
}
|
if strings.Contains(columnType, "json") {
|
||||||
if strings.Contains(columnType, "float") {
|
column.Type = sql.JSONColumnType
|
||||||
for _, columnsList := range columnsLists {
|
|
||||||
columnsList.GetColumn(columnName).Type = sql.FloatColumnType
|
|
||||||
}
|
}
|
||||||
}
|
if strings.Contains(columnType, "float") {
|
||||||
if strings.HasPrefix(columnType, "enum") {
|
column.Type = sql.FloatColumnType
|
||||||
for _, columnsList := range columnsLists {
|
|
||||||
columnsList.GetColumn(columnName).Type = sql.EnumColumnType
|
|
||||||
}
|
}
|
||||||
}
|
if strings.HasPrefix(columnType, "enum") {
|
||||||
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
|
column.Type = sql.EnumColumnType
|
||||||
for _, columnsList := range columnsLists {
|
}
|
||||||
columnsList.SetCharset(columnName, charset)
|
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
|
||||||
|
column.Charset = charset
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -622,8 +622,6 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*
|
|||||||
GROUP BY TABLE_SCHEMA, TABLE_NAME, INDEX_NAME
|
GROUP BY TABLE_SCHEMA, TABLE_NAME, INDEX_NAME
|
||||||
) AS UNIQUES
|
) AS UNIQUES
|
||||||
ON (
|
ON (
|
||||||
COLUMNS.TABLE_SCHEMA = UNIQUES.TABLE_SCHEMA AND
|
|
||||||
COLUMNS.TABLE_NAME = UNIQUES.TABLE_NAME AND
|
|
||||||
COLUMNS.COLUMN_NAME = UNIQUES.FIRST_COLUMN_NAME
|
COLUMNS.COLUMN_NAME = UNIQUES.FIRST_COLUMN_NAME
|
||||||
)
|
)
|
||||||
WHERE
|
WHERE
|
||||||
@ -685,21 +683,34 @@ func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getSharedColumns returns the intersection of two lists of columns in same order as the first list
|
// getSharedColumns returns the intersection of two lists of columns in same order as the first list
|
||||||
func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) {
|
func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, originalVirtualColumns, ghostVirtualColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) {
|
||||||
sharedColumnNames := []string{}
|
sharedColumnNames := []string{}
|
||||||
for _, originalColumn := range originalColumns.Names() {
|
for _, originalColumn := range originalColumns.Names() {
|
||||||
isSharedColumn := false
|
isSharedColumn := false
|
||||||
for _, ghostColumn := range ghostColumns.Names() {
|
for _, ghostColumn := range ghostColumns.Names() {
|
||||||
if strings.EqualFold(originalColumn, ghostColumn) {
|
if strings.EqualFold(originalColumn, ghostColumn) {
|
||||||
isSharedColumn = true
|
isSharedColumn = true
|
||||||
|
break
|
||||||
}
|
}
|
||||||
if strings.EqualFold(columnRenameMap[originalColumn], ghostColumn) {
|
if strings.EqualFold(columnRenameMap[originalColumn], ghostColumn) {
|
||||||
isSharedColumn = true
|
isSharedColumn = true
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for droppedColumn := range this.migrationContext.DroppedColumnsMap {
|
for droppedColumn := range this.migrationContext.DroppedColumnsMap {
|
||||||
if strings.EqualFold(originalColumn, droppedColumn) {
|
if strings.EqualFold(originalColumn, droppedColumn) {
|
||||||
isSharedColumn = false
|
isSharedColumn = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, virtualColumn := range originalVirtualColumns.Names() {
|
||||||
|
if strings.EqualFold(originalColumn, virtualColumn) {
|
||||||
|
isSharedColumn = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, virtualColumn := range ghostVirtualColumns.Names() {
|
||||||
|
if strings.EqualFold(originalColumn, virtualColumn) {
|
||||||
|
isSharedColumn = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isSharedColumn {
|
if isSharedColumn {
|
||||||
@ -748,8 +759,14 @@ func (this *Inspector) getMasterConnectionConfig() (applierConfig *mysql.Connect
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err error) {
|
func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err error) {
|
||||||
replicationLag, err = mysql.GetReplicationLag(
|
replicationLag, err = mysql.GetReplicationLagFromSlaveStatus(
|
||||||
this.migrationContext.InspectorConnectionConfig,
|
this.informationSchemaDb,
|
||||||
)
|
)
|
||||||
return replicationLag, err
|
return replicationLag, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *Inspector) Teardown() {
|
||||||
|
this.db.Close()
|
||||||
|
this.informationSchemaDb.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
@ -78,16 +78,18 @@ type Migrator struct {
|
|||||||
|
|
||||||
rowCopyCompleteFlag int64
|
rowCopyCompleteFlag int64
|
||||||
// copyRowsQueue should not be buffered; if buffered some non-damaging but
|
// copyRowsQueue should not be buffered; if buffered some non-damaging but
|
||||||
// excessive work happens at the end of the iteration as new copy-jobs arrive befroe realizing the copy is complete
|
// excessive work happens at the end of the iteration as new copy-jobs arrive before realizing the copy is complete
|
||||||
copyRowsQueue chan tableWriteFunc
|
copyRowsQueue chan tableWriteFunc
|
||||||
applyEventsQueue chan *applyEventStruct
|
applyEventsQueue chan *applyEventStruct
|
||||||
|
|
||||||
handledChangelogStates map[string]bool
|
handledChangelogStates map[string]bool
|
||||||
|
|
||||||
|
finishedMigrating int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMigrator() *Migrator {
|
func NewMigrator(context *base.MigrationContext) *Migrator {
|
||||||
migrator := &Migrator{
|
migrator := &Migrator{
|
||||||
migrationContext: base.GetMigrationContext(),
|
migrationContext: context,
|
||||||
parser: sql.NewParser(),
|
parser: sql.NewParser(),
|
||||||
ghostTableMigrated: make(chan bool),
|
ghostTableMigrated: make(chan bool),
|
||||||
firstThrottlingCollected: make(chan bool, 3),
|
firstThrottlingCollected: make(chan bool, 3),
|
||||||
@ -97,13 +99,14 @@ func NewMigrator() *Migrator {
|
|||||||
copyRowsQueue: make(chan tableWriteFunc),
|
copyRowsQueue: make(chan tableWriteFunc),
|
||||||
applyEventsQueue: make(chan *applyEventStruct, base.MaxEventsBatchSize),
|
applyEventsQueue: make(chan *applyEventStruct, base.MaxEventsBatchSize),
|
||||||
handledChangelogStates: make(map[string]bool),
|
handledChangelogStates: make(map[string]bool),
|
||||||
|
finishedMigrating: 0,
|
||||||
}
|
}
|
||||||
return migrator
|
return migrator
|
||||||
}
|
}
|
||||||
|
|
||||||
// initiateHooksExecutor
|
// initiateHooksExecutor
|
||||||
func (this *Migrator) initiateHooksExecutor() (err error) {
|
func (this *Migrator) initiateHooksExecutor() (err error) {
|
||||||
this.hooksExecutor = NewHooksExecutor()
|
this.hooksExecutor = NewHooksExecutor(this.migrationContext)
|
||||||
if err := this.hooksExecutor.initHooks(); err != nil {
|
if err := this.hooksExecutor.initHooks(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -146,6 +149,34 @@ func (this *Migrator) retryOperation(operation func() error, notFatalHint ...boo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// `retryOperationWithExponentialBackoff` attempts running given function, waiting 2^(n-1)
|
||||||
|
// seconds between each attempt, where `n` is the running number of attempts. Exits
|
||||||
|
// as soon as the function returns with non-error, or as soon as `MaxRetries`
|
||||||
|
// attempts are reached. Wait intervals between attempts obey a maximum of
|
||||||
|
// `ExponentialBackoffMaxInterval`.
|
||||||
|
func (this *Migrator) retryOperationWithExponentialBackoff(operation func() error, notFatalHint ...bool) (err error) {
|
||||||
|
var interval int64
|
||||||
|
maxRetries := int(this.migrationContext.MaxRetries())
|
||||||
|
maxInterval := this.migrationContext.ExponentialBackoffMaxInterval
|
||||||
|
for i := 0; i < maxRetries; i++ {
|
||||||
|
newInterval := int64(math.Exp2(float64(i - 1)))
|
||||||
|
if newInterval <= maxInterval {
|
||||||
|
interval = newInterval
|
||||||
|
}
|
||||||
|
if i != 0 {
|
||||||
|
time.Sleep(time.Duration(interval) * time.Second)
|
||||||
|
}
|
||||||
|
err = operation()
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(notFatalHint) == 0 {
|
||||||
|
this.migrationContext.PanicAbort <- err
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// executeAndThrottleOnError executes a given function. If it errors, it
|
// executeAndThrottleOnError executes a given function. If it errors, it
|
||||||
// throttles.
|
// throttles.
|
||||||
func (this *Migrator) executeAndThrottleOnError(operation func() error) (err error) {
|
func (this *Migrator) executeAndThrottleOnError(operation func() error) (err error) {
|
||||||
@ -179,7 +210,7 @@ func (this *Migrator) canStopStreaming() bool {
|
|||||||
|
|
||||||
// onChangelogStateEvent is called when a binlog event operation on the changelog table is intercepted.
|
// onChangelogStateEvent is called when a binlog event operation on the changelog table is intercepted.
|
||||||
func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
|
func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
|
||||||
// Hey, I created the changlog table, I know the type of columns it has!
|
// Hey, I created the changelog table, I know the type of columns it has!
|
||||||
if hint := dmlEvent.NewColumnValues.StringColumn(2); hint != "state" {
|
if hint := dmlEvent.NewColumnValues.StringColumn(2); hint != "state" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -224,7 +255,11 @@ func (this *Migrator) listenOnPanicAbort() {
|
|||||||
// validateStatement validates the `alter` statement meets criteria.
|
// validateStatement validates the `alter` statement meets criteria.
|
||||||
// At this time this means:
|
// At this time this means:
|
||||||
// - column renames are approved
|
// - column renames are approved
|
||||||
|
// - no table rename allowed
|
||||||
func (this *Migrator) validateStatement() (err error) {
|
func (this *Migrator) validateStatement() (err error) {
|
||||||
|
if this.parser.IsRenameTable() {
|
||||||
|
return fmt.Errorf("ALTER statement seems to RENAME the table. This is not supported, and you should run your RENAME outside gh-ost.")
|
||||||
|
}
|
||||||
if this.parser.HasNonTrivialRenames() && !this.migrationContext.SkipRenamedColumns {
|
if this.parser.HasNonTrivialRenames() && !this.migrationContext.SkipRenamedColumns {
|
||||||
this.migrationContext.ColumnRenameMap = this.parser.GetNonTrivialRenames()
|
this.migrationContext.ColumnRenameMap = this.parser.GetNonTrivialRenames()
|
||||||
if !this.migrationContext.ApproveRenamedColumns {
|
if !this.migrationContext.ApproveRenamedColumns {
|
||||||
@ -265,6 +300,18 @@ func (this *Migrator) countTableRows() (err error) {
|
|||||||
return countRowsFunc()
|
return countRowsFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *Migrator) createFlagFiles() (err error) {
|
||||||
|
if this.migrationContext.PostponeCutOverFlagFile != "" {
|
||||||
|
if !base.FileExists(this.migrationContext.PostponeCutOverFlagFile) {
|
||||||
|
if err := base.TouchFile(this.migrationContext.PostponeCutOverFlagFile); err != nil {
|
||||||
|
return log.Errorf("--postpone-cut-over-flag-file indicated by gh-ost is unable to create said file: %s", err.Error())
|
||||||
|
}
|
||||||
|
log.Infof("Created postpone-cut-over-flag-file: %s", this.migrationContext.PostponeCutOverFlagFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Migrate executes the complete migration logic. This is *the* major gh-ost function.
|
// Migrate executes the complete migration logic. This is *the* major gh-ost function.
|
||||||
func (this *Migrator) Migrate() (err error) {
|
func (this *Migrator) Migrate() (err error) {
|
||||||
log.Infof("Migrating %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
log.Infof("Migrating %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||||
@ -287,6 +334,11 @@ func (this *Migrator) Migrate() (err error) {
|
|||||||
if err := this.validateStatement(); err != nil {
|
if err := this.validateStatement(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// After this point, we'll need to teardown anything that's been started
|
||||||
|
// so we don't leave things hanging around
|
||||||
|
defer this.teardown()
|
||||||
|
|
||||||
if err := this.initiateInspector(); err != nil {
|
if err := this.initiateInspector(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -296,6 +348,9 @@ func (this *Migrator) Migrate() (err error) {
|
|||||||
if err := this.initiateApplier(); err != nil {
|
if err := this.initiateApplier(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := this.createFlagFiles(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
initialLag, _ := this.inspector.getReplicationLag()
|
initialLag, _ := this.inspector.getReplicationLag()
|
||||||
log.Infof("Waiting for ghost table to be migrated. Current lag is %+v", initialLag)
|
log.Infof("Waiting for ghost table to be migrated. Current lag is %+v", initialLag)
|
||||||
@ -349,7 +404,13 @@ func (this *Migrator) Migrate() (err error) {
|
|||||||
if err := this.hooksExecutor.onBeforeCutOver(); err != nil {
|
if err := this.hooksExecutor.onBeforeCutOver(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := this.retryOperation(this.cutOver); err != nil {
|
var retrier func(func() error, ...bool) error
|
||||||
|
if this.migrationContext.CutOverExponentialBackoff {
|
||||||
|
retrier = this.retryOperationWithExponentialBackoff
|
||||||
|
} else {
|
||||||
|
retrier = this.retryOperation
|
||||||
|
}
|
||||||
|
if err := retrier(this.cutOver); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
atomic.StoreInt64(&this.migrationContext.CutOverCompleteFlag, 1)
|
atomic.StoreInt64(&this.migrationContext.CutOverCompleteFlag, 1)
|
||||||
@ -372,7 +433,7 @@ func (this *Migrator) ExecOnFailureHook() (err error) {
|
|||||||
|
|
||||||
func (this *Migrator) handleCutOverResult(cutOverError error) (err error) {
|
func (this *Migrator) handleCutOverResult(cutOverError error) (err error) {
|
||||||
if this.migrationContext.TestOnReplica {
|
if this.migrationContext.TestOnReplica {
|
||||||
// We're merly testing, we don't want to keep this state. Rollback the renames as possible
|
// We're merely testing, we don't want to keep this state. Rollback the renames as possible
|
||||||
this.applier.RenameTablesRollback()
|
this.applier.RenameTablesRollback()
|
||||||
}
|
}
|
||||||
if cutOverError == nil {
|
if cutOverError == nil {
|
||||||
@ -638,7 +699,7 @@ func (this *Migrator) initiateServer() (err error) {
|
|||||||
var f printStatusFunc = func(rule PrintStatusRule, writer io.Writer) {
|
var f printStatusFunc = func(rule PrintStatusRule, writer io.Writer) {
|
||||||
this.printStatus(rule, writer)
|
this.printStatus(rule, writer)
|
||||||
}
|
}
|
||||||
this.server = NewServer(this.hooksExecutor, f)
|
this.server = NewServer(this.migrationContext, this.hooksExecutor, f)
|
||||||
if err := this.server.BindSocketFile(); err != nil {
|
if err := this.server.BindSocketFile(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -658,7 +719,7 @@ func (this *Migrator) initiateServer() (err error) {
|
|||||||
// - heartbeat
|
// - heartbeat
|
||||||
// When `--allow-on-master` is supplied, the inspector is actually the master.
|
// When `--allow-on-master` is supplied, the inspector is actually the master.
|
||||||
func (this *Migrator) initiateInspector() (err error) {
|
func (this *Migrator) initiateInspector() (err error) {
|
||||||
this.inspector = NewInspector()
|
this.inspector = NewInspector(this.migrationContext)
|
||||||
if err := this.inspector.InitDBConnections(); err != nil {
|
if err := this.inspector.InitDBConnections(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -718,6 +779,9 @@ func (this *Migrator) initiateStatus() error {
|
|||||||
this.printStatus(ForcePrintStatusAndHintRule)
|
this.printStatus(ForcePrintStatusAndHintRule)
|
||||||
statusTick := time.Tick(1 * time.Second)
|
statusTick := time.Tick(1 * time.Second)
|
||||||
for range statusTick {
|
for range statusTick {
|
||||||
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
go this.printStatus(HeuristicPrintStatusRule)
|
go this.printStatus(HeuristicPrintStatusRule)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -727,7 +791,7 @@ func (this *Migrator) initiateStatus() error {
|
|||||||
// printMigrationStatusHint prints a detailed configuration dump, that is useful
|
// printMigrationStatusHint prints a detailed configuration dump, that is useful
|
||||||
// to keep in mind; such as the name of migrated table, throttle params etc.
|
// to keep in mind; such as the name of migrated table, throttle params etc.
|
||||||
// This gets printed at beginning and end of migration, every 10 minutes throughout
|
// This gets printed at beginning and end of migration, every 10 minutes throughout
|
||||||
// migration, and as reponse to the "status" interactive command.
|
// migration, and as response to the "status" interactive command.
|
||||||
func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
|
func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
|
||||||
w := io.MultiWriter(writers...)
|
w := io.MultiWriter(writers...)
|
||||||
fmt.Fprintln(w, fmt.Sprintf("# Migrating %s.%s; Ghost table is %s.%s",
|
fmt.Fprintln(w, fmt.Sprintf("# Migrating %s.%s; Ghost table is %s.%s",
|
||||||
@ -805,7 +869,7 @@ func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// printStatus prints the prgoress status, and optionally additionally detailed
|
// printStatus prints the progress status, and optionally additionally detailed
|
||||||
// dump of configuration.
|
// dump of configuration.
|
||||||
// `rule` indicates the type of output expected.
|
// `rule` indicates the type of output expected.
|
||||||
// By default the status is written to standard output, but other writers can
|
// By default the status is written to standard output, but other writers can
|
||||||
@ -831,6 +895,8 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
|
|||||||
} else {
|
} else {
|
||||||
progressPct = 100.0 * float64(totalRowsCopied) / float64(rowsEstimate)
|
progressPct = 100.0 * float64(totalRowsCopied) / float64(rowsEstimate)
|
||||||
}
|
}
|
||||||
|
// we take the opportunity to update migration context with progressPct
|
||||||
|
this.migrationContext.SetProgressPct(progressPct)
|
||||||
// Before status, let's see if we should print a nice reminder for what exactly we're doing here.
|
// Before status, let's see if we should print a nice reminder for what exactly we're doing here.
|
||||||
shouldPrintMigrationStatusHint := (elapsedSeconds%600 == 0)
|
shouldPrintMigrationStatusHint := (elapsedSeconds%600 == 0)
|
||||||
if rule == ForcePrintStatusAndHintRule {
|
if rule == ForcePrintStatusAndHintRule {
|
||||||
@ -847,7 +913,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
|
|||||||
eta := "N/A"
|
eta := "N/A"
|
||||||
if progressPct >= 100.0 {
|
if progressPct >= 100.0 {
|
||||||
eta = "due"
|
eta = "due"
|
||||||
} else if progressPct >= 1.0 {
|
} else if progressPct >= 0.1 {
|
||||||
elapsedRowCopySeconds := this.migrationContext.ElapsedRowCopyTime().Seconds()
|
elapsedRowCopySeconds := this.migrationContext.ElapsedRowCopyTime().Seconds()
|
||||||
totalExpectedSeconds := elapsedRowCopySeconds * float64(rowsEstimate) / float64(totalRowsCopied)
|
totalExpectedSeconds := elapsedRowCopySeconds * float64(rowsEstimate) / float64(totalRowsCopied)
|
||||||
etaSeconds = totalExpectedSeconds - elapsedRowCopySeconds
|
etaSeconds = totalExpectedSeconds - elapsedRowCopySeconds
|
||||||
@ -894,12 +960,13 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
|
|||||||
|
|
||||||
currentBinlogCoordinates := *this.eventsStreamer.GetCurrentBinlogCoordinates()
|
currentBinlogCoordinates := *this.eventsStreamer.GetCurrentBinlogCoordinates()
|
||||||
|
|
||||||
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; State: %s; ETA: %s",
|
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, State: %s; ETA: %s",
|
||||||
totalRowsCopied, rowsEstimate, progressPct,
|
totalRowsCopied, rowsEstimate, progressPct,
|
||||||
atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied),
|
atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied),
|
||||||
len(this.applyEventsQueue), cap(this.applyEventsQueue),
|
len(this.applyEventsQueue), cap(this.applyEventsQueue),
|
||||||
base.PrettifyDurationOutput(elapsedTime), base.PrettifyDurationOutput(this.migrationContext.ElapsedRowCopyTime()),
|
base.PrettifyDurationOutput(elapsedTime), base.PrettifyDurationOutput(this.migrationContext.ElapsedRowCopyTime()),
|
||||||
currentBinlogCoordinates,
|
currentBinlogCoordinates,
|
||||||
|
this.migrationContext.GetCurrentLagDuration().Seconds(),
|
||||||
state,
|
state,
|
||||||
eta,
|
eta,
|
||||||
)
|
)
|
||||||
@ -917,7 +984,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
|
|||||||
|
|
||||||
// initiateStreaming begins streaming of binary log events and registers listeners for such events
|
// initiateStreaming begins streaming of binary log events and registers listeners for such events
|
||||||
func (this *Migrator) initiateStreaming() error {
|
func (this *Migrator) initiateStreaming() error {
|
||||||
this.eventsStreamer = NewEventsStreamer()
|
this.eventsStreamer = NewEventsStreamer(this.migrationContext)
|
||||||
if err := this.eventsStreamer.InitDBConnections(); err != nil {
|
if err := this.eventsStreamer.InitDBConnections(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -942,6 +1009,9 @@ func (this *Migrator) initiateStreaming() error {
|
|||||||
go func() {
|
go func() {
|
||||||
ticker := time.Tick(1 * time.Second)
|
ticker := time.Tick(1 * time.Second)
|
||||||
for range ticker {
|
for range ticker {
|
||||||
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
this.migrationContext.SetRecentBinlogCoordinates(*this.eventsStreamer.GetCurrentBinlogCoordinates())
|
this.migrationContext.SetRecentBinlogCoordinates(*this.eventsStreamer.GetCurrentBinlogCoordinates())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -965,7 +1035,7 @@ func (this *Migrator) addDMLEventsListener() error {
|
|||||||
|
|
||||||
// initiateThrottler kicks in the throttling collection and the throttling checks.
|
// initiateThrottler kicks in the throttling collection and the throttling checks.
|
||||||
func (this *Migrator) initiateThrottler() error {
|
func (this *Migrator) initiateThrottler() error {
|
||||||
this.throttler = NewThrottler(this.applier, this.inspector)
|
this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector)
|
||||||
|
|
||||||
go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected)
|
go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected)
|
||||||
log.Infof("Waiting for first throttle metrics to be collected")
|
log.Infof("Waiting for first throttle metrics to be collected")
|
||||||
@ -979,7 +1049,7 @@ func (this *Migrator) initiateThrottler() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (this *Migrator) initiateApplier() error {
|
func (this *Migrator) initiateApplier() error {
|
||||||
this.applier = NewApplier()
|
this.applier = NewApplier(this.migrationContext)
|
||||||
if err := this.applier.InitDBConnections(); err != nil {
|
if err := this.applier.InitDBConnections(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1020,24 +1090,33 @@ func (this *Migrator) iterateChunks() error {
|
|||||||
log.Debugf("No rows found in table. Rowcopy will be implicitly empty")
|
log.Debugf("No rows found in table. Rowcopy will be implicitly empty")
|
||||||
return terminateRowIteration(nil)
|
return terminateRowIteration(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var hasNoFurtherRangeFlag int64
|
||||||
// Iterate per chunk:
|
// Iterate per chunk:
|
||||||
for {
|
for {
|
||||||
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 {
|
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 {
|
||||||
// Done
|
// Done
|
||||||
// There's another such check down the line
|
// There's another such check down the line
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
copyRowsFunc := func() error {
|
copyRowsFunc := func() error {
|
||||||
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 {
|
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 {
|
||||||
// Done.
|
// Done.
|
||||||
// There's another such check down the line
|
// There's another such check down the line
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
hasFurtherRange, err := this.applier.CalculateNextIterationRangeEndValues()
|
|
||||||
if err != nil {
|
// When hasFurtherRange is false, original table might be write locked and CalculateNextIterationRangeEndValues would hangs forever
|
||||||
|
|
||||||
|
hasFurtherRange := false
|
||||||
|
if err := this.retryOperation(func() (e error) {
|
||||||
|
hasFurtherRange, e = this.applier.CalculateNextIterationRangeEndValues()
|
||||||
|
return e
|
||||||
|
}); err != nil {
|
||||||
return terminateRowIteration(err)
|
return terminateRowIteration(err)
|
||||||
}
|
}
|
||||||
if !hasFurtherRange {
|
if !hasFurtherRange {
|
||||||
|
atomic.StoreInt64(&hasNoFurtherRangeFlag, 1)
|
||||||
return terminateRowIteration(nil)
|
return terminateRowIteration(nil)
|
||||||
}
|
}
|
||||||
// Copy task:
|
// Copy task:
|
||||||
@ -1055,7 +1134,7 @@ func (this *Migrator) iterateChunks() error {
|
|||||||
}
|
}
|
||||||
_, rowsAffected, _, err := this.applier.ApplyIterationInsertQuery()
|
_, rowsAffected, _, err := this.applier.ApplyIterationInsertQuery()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return terminateRowIteration(err)
|
return err // wrapping call will retry
|
||||||
}
|
}
|
||||||
atomic.AddInt64(&this.migrationContext.TotalRowsCopied, rowsAffected)
|
atomic.AddInt64(&this.migrationContext.TotalRowsCopied, rowsAffected)
|
||||||
atomic.AddInt64(&this.migrationContext.Iteration, 1)
|
atomic.AddInt64(&this.migrationContext.Iteration, 1)
|
||||||
@ -1132,6 +1211,10 @@ func (this *Migrator) executeWriteFuncs() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
this.throttler.throttle(nil)
|
this.throttler.throttle(nil)
|
||||||
|
|
||||||
// We give higher priority to event processing, then secondary priority to
|
// We give higher priority to event processing, then secondary priority to
|
||||||
@ -1211,3 +1294,27 @@ func (this *Migrator) finalCleanup() error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *Migrator) teardown() {
|
||||||
|
atomic.StoreInt64(&this.finishedMigrating, 1)
|
||||||
|
|
||||||
|
if this.inspector != nil {
|
||||||
|
log.Infof("Tearing down inspector")
|
||||||
|
this.inspector.Teardown()
|
||||||
|
}
|
||||||
|
|
||||||
|
if this.applier != nil {
|
||||||
|
log.Infof("Tearing down applier")
|
||||||
|
this.applier.Teardown()
|
||||||
|
}
|
||||||
|
|
||||||
|
if this.eventsStreamer != nil {
|
||||||
|
log.Infof("Tearing down streamer")
|
||||||
|
this.eventsStreamer.Teardown()
|
||||||
|
}
|
||||||
|
|
||||||
|
if this.throttler != nil {
|
||||||
|
log.Infof("Tearing down throttler")
|
||||||
|
this.throttler.Teardown()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -30,9 +30,9 @@ type Server struct {
|
|||||||
printStatus printStatusFunc
|
printStatus printStatusFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewServer(hooksExecutor *HooksExecutor, printStatus printStatusFunc) *Server {
|
func NewServer(migrationContext *base.MigrationContext, hooksExecutor *HooksExecutor, printStatus printStatusFunc) *Server {
|
||||||
return &Server{
|
return &Server{
|
||||||
migrationContext: base.GetMigrationContext(),
|
migrationContext: migrationContext,
|
||||||
hooksExecutor: hooksExecutor,
|
hooksExecutor: hooksExecutor,
|
||||||
printStatus: printStatus,
|
printStatus: printStatus,
|
||||||
}
|
}
|
||||||
@ -130,6 +130,9 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
|
|||||||
arg := ""
|
arg := ""
|
||||||
if len(tokens) > 1 {
|
if len(tokens) > 1 {
|
||||||
arg = strings.TrimSpace(tokens[1])
|
arg = strings.TrimSpace(tokens[1])
|
||||||
|
if unquoted, err := strconv.Unquote(arg); err == nil {
|
||||||
|
arg = unquoted
|
||||||
|
}
|
||||||
}
|
}
|
||||||
argIsQuestion := (arg == "?")
|
argIsQuestion := (arg == "?")
|
||||||
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged\n"
|
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged\n"
|
||||||
@ -141,13 +144,13 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
|
|||||||
switch command {
|
switch command {
|
||||||
case "help":
|
case "help":
|
||||||
{
|
{
|
||||||
fmt.Fprintln(writer, `available commands:
|
fmt.Fprint(writer, `available commands:
|
||||||
status # Print a detailed status message
|
status # Print a detailed status message
|
||||||
sup # Print a short status message
|
sup # Print a short status message
|
||||||
coordinates # Print the currently inspected coordinates
|
coordinates # Print the currently inspected coordinates
|
||||||
chunk-size=<newsize> # Set a new chunk-size
|
chunk-size=<newsize> # Set a new chunk-size
|
||||||
dml-batch-size=<newsize> # Set a new dml-batch-size
|
dml-batch-size=<newsize> # Set a new dml-batch-size
|
||||||
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is agrressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
|
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
|
||||||
critical-load=<load> # Set a new set of max-load thresholds
|
critical-load=<load> # Set a new set of max-load thresholds
|
||||||
max-lag-millis=<max-lag> # Set a new replication lag threshold
|
max-lag-millis=<max-lag> # Set a new replication lag threshold
|
||||||
replication-lag-query=<query> # Set a new query that determines replication lag (no quotes)
|
replication-lag-query=<query> # Set a new query that determines replication lag (no quotes)
|
||||||
@ -289,12 +292,22 @@ help # This message
|
|||||||
}
|
}
|
||||||
case "throttle", "pause", "suspend":
|
case "throttle", "pause", "suspend":
|
||||||
{
|
{
|
||||||
|
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||||
|
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||||
|
err := fmt.Errorf("User commanded 'throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||||
|
return NoPrintStatusRule, err
|
||||||
|
}
|
||||||
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
|
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
|
||||||
fmt.Fprintf(writer, throttleHint)
|
fmt.Fprintf(writer, throttleHint)
|
||||||
return ForcePrintStatusAndHintRule, nil
|
return ForcePrintStatusAndHintRule, nil
|
||||||
}
|
}
|
||||||
case "no-throttle", "unthrottle", "resume", "continue":
|
case "no-throttle", "unthrottle", "resume", "continue":
|
||||||
{
|
{
|
||||||
|
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||||
|
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||||
|
err := fmt.Errorf("User commanded 'no-throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||||
|
return NoPrintStatusRule, err
|
||||||
|
}
|
||||||
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 0)
|
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 0)
|
||||||
return ForcePrintStatusAndHintRule, nil
|
return ForcePrintStatusAndHintRule, nil
|
||||||
}
|
}
|
||||||
@ -305,8 +318,8 @@ help # This message
|
|||||||
return NoPrintStatusRule, err
|
return NoPrintStatusRule, err
|
||||||
}
|
}
|
||||||
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||||
// User exlpicitly provided table name. This is a courtesy protection mechanism
|
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||||
err := fmt.Errorf("User commanded 'unpostpone' on %s, but migrated table is %s; ingoring request.", arg, this.migrationContext.OriginalTableName)
|
err := fmt.Errorf("User commanded 'unpostpone' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||||
return NoPrintStatusRule, err
|
return NoPrintStatusRule, err
|
||||||
}
|
}
|
||||||
if atomic.LoadInt64(&this.migrationContext.IsPostponingCutOver) > 0 {
|
if atomic.LoadInt64(&this.migrationContext.IsPostponingCutOver) > 0 {
|
||||||
@ -319,7 +332,16 @@ help # This message
|
|||||||
}
|
}
|
||||||
case "panic":
|
case "panic":
|
||||||
{
|
{
|
||||||
err := fmt.Errorf("User commanded 'panic'. I will now panic, without cleanup. PANIC!")
|
if arg == "" && this.migrationContext.ForceNamedPanicCommand {
|
||||||
|
err := fmt.Errorf("User commanded 'panic' without specifying table name, but --force-named-panic is set")
|
||||||
|
return NoPrintStatusRule, err
|
||||||
|
}
|
||||||
|
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||||
|
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||||
|
err := fmt.Errorf("User commanded 'panic' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||||
|
return NoPrintStatusRule, err
|
||||||
|
}
|
||||||
|
err := fmt.Errorf("User commanded 'panic'. The migration will be aborted without cleanup. Please drop the gh-ost tables before trying again.")
|
||||||
this.migrationContext.PanicAbort <- err
|
this.migrationContext.PanicAbort <- err
|
||||||
return NoPrintStatusRule, err
|
return NoPrintStatusRule, err
|
||||||
}
|
}
|
||||||
|
@ -45,10 +45,10 @@ type EventsStreamer struct {
|
|||||||
binlogReader *binlog.GoMySQLReader
|
binlogReader *binlog.GoMySQLReader
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEventsStreamer() *EventsStreamer {
|
func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer {
|
||||||
return &EventsStreamer{
|
return &EventsStreamer{
|
||||||
connectionConfig: base.GetMigrationContext().InspectorConnectionConfig,
|
connectionConfig: migrationContext.InspectorConnectionConfig,
|
||||||
migrationContext: base.GetMigrationContext(),
|
migrationContext: migrationContext,
|
||||||
listeners: [](*BinlogEventListener){},
|
listeners: [](*BinlogEventListener){},
|
||||||
listenersMutex: &sync.Mutex{},
|
listenersMutex: &sync.Mutex{},
|
||||||
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
|
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
|
||||||
@ -104,10 +104,10 @@ func (this *EventsStreamer) notifyListeners(binlogEvent *binlog.BinlogDMLEvent)
|
|||||||
|
|
||||||
func (this *EventsStreamer) InitDBConnections() (err error) {
|
func (this *EventsStreamer) InitDBConnections() (err error) {
|
||||||
EventsStreamerUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
EventsStreamerUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
||||||
if this.db, _, err = sqlutils.GetDB(EventsStreamerUri); err != nil {
|
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := base.ValidateConnection(this.db, this.connectionConfig); err != nil {
|
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := this.readCurrentBinlogCoordinates(); err != nil {
|
if err := this.readCurrentBinlogCoordinates(); err != nil {
|
||||||
@ -122,7 +122,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
|
|||||||
|
|
||||||
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
|
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
|
||||||
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
|
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
|
||||||
goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext.InspectorConnectionConfig)
|
goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -178,7 +178,14 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
|
|||||||
var successiveFailures int64
|
var successiveFailures int64
|
||||||
var lastAppliedRowsEventHint mysql.BinlogCoordinates
|
var lastAppliedRowsEventHint mysql.BinlogCoordinates
|
||||||
for {
|
for {
|
||||||
|
if canStopStreaming() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if err := this.binlogReader.StreamEvents(canStopStreaming, this.eventsChannel); err != nil {
|
if err := this.binlogReader.StreamEvents(canStopStreaming, this.eventsChannel); err != nil {
|
||||||
|
if canStopStreaming() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof("StreamEvents encountered unexpected error: %+v", err)
|
log.Infof("StreamEvents encountered unexpected error: %+v", err)
|
||||||
this.migrationContext.MarkPointOfInterest()
|
this.migrationContext.MarkPointOfInterest()
|
||||||
time.Sleep(ReconnectStreamerSleepSeconds * time.Second)
|
time.Sleep(ReconnectStreamerSleepSeconds * time.Second)
|
||||||
@ -209,3 +216,8 @@ func (this *EventsStreamer) Close() (err error) {
|
|||||||
log.Infof("Closed streamer connection. err=%+v", err)
|
log.Infof("Closed streamer connection. err=%+v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *EventsStreamer) Teardown() {
|
||||||
|
this.db.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
@ -16,7 +16,6 @@ import (
|
|||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
"github.com/outbrain/golib/sqlutils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -39,19 +38,21 @@ var (
|
|||||||
|
|
||||||
const frenoMagicHint = "freno"
|
const frenoMagicHint = "freno"
|
||||||
|
|
||||||
// Throttler collects metrics related to throttling and makes informed decisison
|
// Throttler collects metrics related to throttling and makes informed decision
|
||||||
// whether throttling should take place.
|
// whether throttling should take place.
|
||||||
type Throttler struct {
|
type Throttler struct {
|
||||||
migrationContext *base.MigrationContext
|
migrationContext *base.MigrationContext
|
||||||
applier *Applier
|
applier *Applier
|
||||||
inspector *Inspector
|
inspector *Inspector
|
||||||
|
finishedMigrating int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewThrottler(applier *Applier, inspector *Inspector) *Throttler {
|
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector) *Throttler {
|
||||||
return &Throttler{
|
return &Throttler{
|
||||||
migrationContext: base.GetMigrationContext(),
|
migrationContext: migrationContext,
|
||||||
applier: applier,
|
applier: applier,
|
||||||
inspector: inspector,
|
inspector: inspector,
|
||||||
|
finishedMigrating: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,8 +140,8 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
|
|||||||
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
|
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
|
||||||
// when running on replica, the heartbeat injection is also done on the replica.
|
// when running on replica, the heartbeat injection is also done on the replica.
|
||||||
// This means we will always get a good heartbeat value.
|
// This means we will always get a good heartbeat value.
|
||||||
// When runnign on replica, we should instead check the `SHOW SLAVE STATUS` output.
|
// When running on replica, we should instead check the `SHOW SLAVE STATUS` output.
|
||||||
if lag, err := mysql.GetReplicationLag(this.inspector.connectionConfig); err != nil {
|
if lag, err := mysql.GetReplicationLagFromSlaveStatus(this.inspector.informationSchemaDb); err != nil {
|
||||||
return log.Errore(err)
|
return log.Errore(err)
|
||||||
} else {
|
} else {
|
||||||
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
||||||
@ -160,6 +161,9 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
|
|||||||
|
|
||||||
ticker := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
ticker := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||||
for range ticker {
|
for range ticker {
|
||||||
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
go collectFunc()
|
go collectFunc()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -182,11 +186,12 @@ func (this *Throttler) collectControlReplicasLag() {
|
|||||||
dbUri := connectionConfig.GetDBUri("information_schema")
|
dbUri := connectionConfig.GetDBUri("information_schema")
|
||||||
|
|
||||||
var heartbeatValue string
|
var heartbeatValue string
|
||||||
if db, _, err := sqlutils.GetDB(dbUri); err != nil {
|
if db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri); err != nil {
|
||||||
return lag, err
|
return lag, err
|
||||||
} else if err = db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
|
} else if err = db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
|
||||||
return lag, err
|
return lag, err
|
||||||
}
|
}
|
||||||
|
|
||||||
lag, err = parseChangelogHeartbeat(heartbeatValue)
|
lag, err = parseChangelogHeartbeat(heartbeatValue)
|
||||||
return lag, err
|
return lag, err
|
||||||
}
|
}
|
||||||
@ -233,6 +238,9 @@ func (this *Throttler) collectControlReplicasLag() {
|
|||||||
shouldReadLagAggressively := false
|
shouldReadLagAggressively := false
|
||||||
|
|
||||||
for range aggressiveTicker {
|
for range aggressiveTicker {
|
||||||
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
if counter%relaxedFactor == 0 {
|
if counter%relaxedFactor == 0 {
|
||||||
// we only check if we wish to be aggressive once per second. The parameters for being aggressive
|
// we only check if we wish to be aggressive once per second. The parameters for being aggressive
|
||||||
// do not typically change at all throughout the migration, but nonetheless we check them.
|
// do not typically change at all throughout the migration, but nonetheless we check them.
|
||||||
@ -285,6 +293,10 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
|
|||||||
|
|
||||||
ticker := time.Tick(100 * time.Millisecond)
|
ticker := time.Tick(100 * time.Millisecond)
|
||||||
for range ticker {
|
for range ticker {
|
||||||
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if sleep, _ := collectFunc(); sleep {
|
if sleep, _ := collectFunc(); sleep {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
@ -393,6 +405,10 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
|
|||||||
|
|
||||||
throttlerMetricsTick := time.Tick(1 * time.Second)
|
throttlerMetricsTick := time.Tick(1 * time.Second)
|
||||||
for range throttlerMetricsTick {
|
for range throttlerMetricsTick {
|
||||||
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
this.collectGeneralThrottleMetrics()
|
this.collectGeneralThrottleMetrics()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -419,6 +435,9 @@ func (this *Throttler) initiateThrottlerChecks() error {
|
|||||||
}
|
}
|
||||||
throttlerFunction()
|
throttlerFunction()
|
||||||
for range throttlerTick {
|
for range throttlerTick {
|
||||||
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
throttlerFunction()
|
throttlerFunction()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -440,3 +459,8 @@ func (this *Throttler) throttle(onThrottled func()) {
|
|||||||
time.Sleep(250 * time.Millisecond)
|
time.Sleep(250 * time.Millisecond)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *Throttler) Teardown() {
|
||||||
|
log.Debugf("Tearing down...")
|
||||||
|
atomic.StoreInt64(&this.finishedMigrating, 1)
|
||||||
|
}
|
||||||
|
@ -57,7 +57,7 @@ func (this BinlogCoordinates) String() string {
|
|||||||
return this.DisplayString()
|
return this.DisplayString()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equals tests equality of this corrdinate and another one.
|
// Equals tests equality of this coordinate and another one.
|
||||||
func (this *BinlogCoordinates) Equals(other *BinlogCoordinates) bool {
|
func (this *BinlogCoordinates) Equals(other *BinlogCoordinates) bool {
|
||||||
if other == nil {
|
if other == nil {
|
||||||
return false
|
return false
|
||||||
@ -95,8 +95,8 @@ func (this *BinlogCoordinates) FileSmallerThan(other *BinlogCoordinates) bool {
|
|||||||
return this.LogFile < other.LogFile
|
return this.LogFile < other.LogFile
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileNumberDistance returns the numeric distance between this corrdinate's file number and the other's.
|
// FileNumberDistance returns the numeric distance between this coordinate's file number and the other's.
|
||||||
// Effectively it means "how many roatets/FLUSHes would make these coordinates's file reach the other's"
|
// Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's"
|
||||||
func (this *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int {
|
func (this *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int {
|
||||||
thisNumber, _ := this.FileNumber()
|
thisNumber, _ := this.FileNumber()
|
||||||
otherNumber, _ := other.FileNumber()
|
otherNumber, _ := other.FileNumber()
|
||||||
|
@ -6,8 +6,18 @@
|
|||||||
package mysql
|
package mysql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
|
"github.com/go-sql-driver/mysql"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
TLS_CONFIG_KEY = "ghost"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConnectionConfig is the minimal configuration required to connect to a MySQL server
|
// ConnectionConfig is the minimal configuration required to connect to a MySQL server
|
||||||
@ -16,6 +26,7 @@ type ConnectionConfig struct {
|
|||||||
User string
|
User string
|
||||||
Password string
|
Password string
|
||||||
ImpliedKey *InstanceKey
|
ImpliedKey *InstanceKey
|
||||||
|
tlsConfig *tls.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConnectionConfig() *ConnectionConfig {
|
func NewConnectionConfig() *ConnectionConfig {
|
||||||
@ -29,9 +40,10 @@ func NewConnectionConfig() *ConnectionConfig {
|
|||||||
// DuplicateCredentials creates a new connection config with given key and with same credentials as this config
|
// DuplicateCredentials creates a new connection config with given key and with same credentials as this config
|
||||||
func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionConfig {
|
func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionConfig {
|
||||||
config := &ConnectionConfig{
|
config := &ConnectionConfig{
|
||||||
Key: key,
|
Key: key,
|
||||||
User: this.User,
|
User: this.User,
|
||||||
Password: this.Password,
|
Password: this.Password,
|
||||||
|
tlsConfig: this.tlsConfig,
|
||||||
}
|
}
|
||||||
config.ImpliedKey = &config.Key
|
config.ImpliedKey = &config.Key
|
||||||
return config
|
return config
|
||||||
@ -42,13 +54,54 @@ func (this *ConnectionConfig) Duplicate() *ConnectionConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (this *ConnectionConfig) String() string {
|
func (this *ConnectionConfig) String() string {
|
||||||
return fmt.Sprintf("%s, user=%s", this.Key.DisplayString(), this.User)
|
return fmt.Sprintf("%s, user=%s, usingTLS=%t", this.Key.DisplayString(), this.User, this.tlsConfig != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *ConnectionConfig) Equals(other *ConnectionConfig) bool {
|
func (this *ConnectionConfig) Equals(other *ConnectionConfig) bool {
|
||||||
return this.Key.Equals(&other.Key) || this.ImpliedKey.Equals(other.ImpliedKey)
|
return this.Key.Equals(&other.Key) || this.ImpliedKey.Equals(other.ImpliedKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clientKey string, allowInsecure bool) error {
|
||||||
|
var rootCertPool *x509.CertPool
|
||||||
|
var certs []tls.Certificate
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if caCertificatePath == "" {
|
||||||
|
rootCertPool, err = x509.SystemCertPool()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
rootCertPool = x509.NewCertPool()
|
||||||
|
pem, err := ioutil.ReadFile(caCertificatePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
|
||||||
|
return errors.New("could not add ca certificate to cert pool")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if clientCertificate != "" || clientKey != "" {
|
||||||
|
cert, err := tls.LoadX509KeyPair(clientCertificate, clientKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
certs = []tls.Certificate{cert}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.tlsConfig = &tls.Config{
|
||||||
|
Certificates: certs,
|
||||||
|
RootCAs: rootCertPool,
|
||||||
|
InsecureSkipVerify: allowInsecure,
|
||||||
|
}
|
||||||
|
|
||||||
|
return mysql.RegisterTLSConfig(TLS_CONFIG_KEY, this.tlsConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *ConnectionConfig) TLSConfig() *tls.Config {
|
||||||
|
return this.tlsConfig
|
||||||
|
}
|
||||||
|
|
||||||
func (this *ConnectionConfig) GetDBUri(databaseName string) string {
|
func (this *ConnectionConfig) GetDBUri(databaseName string) string {
|
||||||
hostname := this.Key.Hostname
|
hostname := this.Key.Hostname
|
||||||
var ip = net.ParseIP(hostname)
|
var ip = net.ParseIP(hostname)
|
||||||
@ -56,5 +109,12 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string {
|
|||||||
// Wrap IPv6 literals in square brackets
|
// Wrap IPv6 literals in square brackets
|
||||||
hostname = fmt.Sprintf("[%s]", hostname)
|
hostname = fmt.Sprintf("[%s]", hostname)
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1", this.User, this.Password, hostname, this.Key.Port, databaseName)
|
interpolateParams := true
|
||||||
|
// go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to
|
||||||
|
// simplify construction of the DSN below.
|
||||||
|
tlsOption := "false"
|
||||||
|
if this.tlsConfig != nil {
|
||||||
|
tlsOption = TLS_CONFIG_KEY
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?interpolateParams=%t&autocommit=true&charset=utf8mb4,utf8,latin1&tls=%s", this.User, this.Password, hostname, this.Key.Port, databaseName, interpolateParams, tlsOption)
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package mysql
|
package mysql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/outbrain/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
@ -31,6 +32,10 @@ func TestDuplicateCredentials(t *testing.T) {
|
|||||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||||
c.User = "gromit"
|
c.User = "gromit"
|
||||||
c.Password = "penguin"
|
c.Password = "penguin"
|
||||||
|
c.tlsConfig = &tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
ServerName: "feathers",
|
||||||
|
}
|
||||||
|
|
||||||
dup := c.DuplicateCredentials(InstanceKey{Hostname: "otherhost", Port: 3310})
|
dup := c.DuplicateCredentials(InstanceKey{Hostname: "otherhost", Port: 3310})
|
||||||
test.S(t).ExpectEquals(dup.Key.Hostname, "otherhost")
|
test.S(t).ExpectEquals(dup.Key.Hostname, "otherhost")
|
||||||
@ -39,6 +44,7 @@ func TestDuplicateCredentials(t *testing.T) {
|
|||||||
test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3310)
|
test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3310)
|
||||||
test.S(t).ExpectEquals(dup.User, "gromit")
|
test.S(t).ExpectEquals(dup.User, "gromit")
|
||||||
test.S(t).ExpectEquals(dup.Password, "penguin")
|
test.S(t).ExpectEquals(dup.Password, "penguin")
|
||||||
|
test.S(t).ExpectEquals(dup.tlsConfig, c.tlsConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDuplicate(t *testing.T) {
|
func TestDuplicate(t *testing.T) {
|
||||||
@ -55,3 +61,24 @@ func TestDuplicate(t *testing.T) {
|
|||||||
test.S(t).ExpectEquals(dup.User, "gromit")
|
test.S(t).ExpectEquals(dup.User, "gromit")
|
||||||
test.S(t).ExpectEquals(dup.Password, "penguin")
|
test.S(t).ExpectEquals(dup.Password, "penguin")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetDBUri(t *testing.T) {
|
||||||
|
c := NewConnectionConfig()
|
||||||
|
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||||
|
c.User = "gromit"
|
||||||
|
c.Password = "penguin"
|
||||||
|
|
||||||
|
uri := c.GetDBUri("test")
|
||||||
|
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=false")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetDBUriWithTLSSetup(t *testing.T) {
|
||||||
|
c := NewConnectionConfig()
|
||||||
|
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||||
|
c.User = "gromit"
|
||||||
|
c.Password = "penguin"
|
||||||
|
c.tlsConfig = &tls.Config{}
|
||||||
|
|
||||||
|
uri := c.GetDBUri("test")
|
||||||
|
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=ghost")
|
||||||
|
}
|
||||||
|
@ -15,7 +15,7 @@ const (
|
|||||||
DefaultInstancePort = 3306
|
DefaultInstancePort = 3306
|
||||||
)
|
)
|
||||||
|
|
||||||
// InstanceKey is an instance indicator, identifued by hostname and port
|
// InstanceKey is an instance indicator, identified by hostname and port
|
||||||
type InstanceKey struct {
|
type InstanceKey struct {
|
||||||
Hostname string
|
Hostname string
|
||||||
Port int
|
Port int
|
||||||
@ -83,7 +83,7 @@ func (this *InstanceKey) IsValid() bool {
|
|||||||
return len(this.Hostname) > 0 && this.Port > 0
|
return len(this.Hostname) > 0 && this.Port > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// DetachedKey returns an instance key whose hostname is detahced: invalid, but recoverable
|
// DetachedKey returns an instance key whose hostname is detached: invalid, but recoverable
|
||||||
func (this *InstanceKey) DetachedKey() *InstanceKey {
|
func (this *InstanceKey) DetachedKey() *InstanceKey {
|
||||||
if this.IsDetached() {
|
if this.IsDetached() {
|
||||||
return this
|
return this
|
||||||
@ -91,7 +91,7 @@ func (this *InstanceKey) DetachedKey() *InstanceKey {
|
|||||||
return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, this.Hostname), Port: this.Port}
|
return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, this.Hostname), Port: this.Port}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReattachedKey returns an instance key whose hostname is detahced: invalid, but recoverable
|
// ReattachedKey returns an instance key whose hostname is detached: invalid, but recoverable
|
||||||
func (this *InstanceKey) ReattachedKey() *InstanceKey {
|
func (this *InstanceKey) ReattachedKey() *InstanceKey {
|
||||||
if !this.IsDetached() {
|
if !this.IsDetached() {
|
||||||
return this
|
return this
|
||||||
|
@ -8,6 +8,8 @@ package mysql
|
|||||||
import (
|
import (
|
||||||
gosql "database/sql"
|
gosql "database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
@ -33,16 +35,32 @@ func (this *ReplicationLagResult) HasLag() bool {
|
|||||||
return this.Lag > 0
|
return this.Lag > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReplicationLag returns replication lag for a given connection config; either by explicit query
|
// knownDBs is a DB cache by uri
|
||||||
// or via SHOW SLAVE STATUS
|
var knownDBs map[string]*gosql.DB = make(map[string]*gosql.DB)
|
||||||
func GetReplicationLag(connectionConfig *ConnectionConfig) (replicationLag time.Duration, err error) {
|
var knownDBsMutex = &sync.Mutex{}
|
||||||
dbUri := connectionConfig.GetDBUri("information_schema")
|
|
||||||
var db *gosql.DB
|
|
||||||
if db, _, err = sqlutils.GetDB(dbUri); err != nil {
|
|
||||||
return replicationLag, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = sqlutils.QueryRowsMap(db, `show slave status`, func(m sqlutils.RowMap) error {
|
func GetDB(migrationUuid string, mysql_uri string) (*gosql.DB, bool, error) {
|
||||||
|
cacheKey := migrationUuid + ":" + mysql_uri
|
||||||
|
|
||||||
|
knownDBsMutex.Lock()
|
||||||
|
defer func() {
|
||||||
|
knownDBsMutex.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var exists bool
|
||||||
|
if _, exists = knownDBs[cacheKey]; !exists {
|
||||||
|
if db, err := gosql.Open("mysql", mysql_uri); err == nil {
|
||||||
|
knownDBs[cacheKey] = db
|
||||||
|
} else {
|
||||||
|
return db, exists, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return knownDBs[cacheKey], exists, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
|
||||||
|
func GetReplicationLagFromSlaveStatus(informationSchemaDb *gosql.DB) (replicationLag time.Duration, err error) {
|
||||||
|
err = sqlutils.QueryRowsMap(informationSchemaDb, `show slave status`, func(m sqlutils.RowMap) error {
|
||||||
slaveIORunning := m.GetString("Slave_IO_Running")
|
slaveIORunning := m.GetString("Slave_IO_Running")
|
||||||
slaveSQLRunning := m.GetString("Slave_SQL_Running")
|
slaveSQLRunning := m.GetString("Slave_SQL_Running")
|
||||||
secondsBehindMaster := m.GetNullInt64("Seconds_Behind_Master")
|
secondsBehindMaster := m.GetNullInt64("Seconds_Behind_Master")
|
||||||
@ -52,15 +70,19 @@ func GetReplicationLag(connectionConfig *ConnectionConfig) (replicationLag time.
|
|||||||
replicationLag = time.Duration(secondsBehindMaster.Int64) * time.Second
|
replicationLag = time.Duration(secondsBehindMaster.Int64) * time.Second
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return replicationLag, err
|
return replicationLag, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey *InstanceKey, err error) {
|
func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey *InstanceKey, err error) {
|
||||||
currentUri := connectionConfig.GetDBUri("information_schema")
|
currentUri := connectionConfig.GetDBUri("information_schema")
|
||||||
db, _, err := sqlutils.GetDB(currentUri)
|
// This function is only called once, okay to not have a cached connection pool
|
||||||
|
db, err := gosql.Open("mysql", currentUri)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
err = sqlutils.QueryRowsMap(db, `show slave status`, func(rowMap sqlutils.RowMap) error {
|
err = sqlutils.QueryRowsMap(db, `show slave status`, func(rowMap sqlutils.RowMap) error {
|
||||||
// We wish to recognize the case where the topology's master actually has replication configuration.
|
// We wish to recognize the case where the topology's master actually has replication configuration.
|
||||||
// This can happen when a DBA issues a `RESET SLAVE` instead of `RESET SLAVE ALL`.
|
// This can happen when a DBA issues a `RESET SLAVE` instead of `RESET SLAVE ALL`.
|
||||||
@ -73,7 +95,6 @@ func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey
|
|||||||
slaveIORunning := rowMap.GetString("Slave_IO_Running")
|
slaveIORunning := rowMap.GetString("Slave_IO_Running")
|
||||||
slaveSQLRunning := rowMap.GetString("Slave_SQL_Running")
|
slaveSQLRunning := rowMap.GetString("Slave_SQL_Running")
|
||||||
|
|
||||||
//
|
|
||||||
if slaveIORunning != "Yes" || slaveSQLRunning != "Yes" {
|
if slaveIORunning != "Yes" || slaveSQLRunning != "Yes" {
|
||||||
return fmt.Errorf("Replication on %+v is broken: Slave_IO_Running: %s, Slave_SQL_Running: %s. Please make sure replication runs before using gh-ost.",
|
return fmt.Errorf("Replication on %+v is broken: Slave_IO_Running: %s, Slave_SQL_Running: %s. Please make sure replication runs before using gh-ost.",
|
||||||
connectionConfig.Key,
|
connectionConfig.Key,
|
||||||
@ -153,7 +174,7 @@ func GetInstanceKey(db *gosql.DB) (instanceKey *InstanceKey, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetTableColumns reads column list from given table
|
// GetTableColumns reads column list from given table
|
||||||
func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnList, error) {
|
func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnList, *sql.ColumnList, error) {
|
||||||
query := fmt.Sprintf(`
|
query := fmt.Sprintf(`
|
||||||
show columns from %s.%s
|
show columns from %s.%s
|
||||||
`,
|
`,
|
||||||
@ -161,18 +182,24 @@ func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnL
|
|||||||
sql.EscapeName(tableName),
|
sql.EscapeName(tableName),
|
||||||
)
|
)
|
||||||
columnNames := []string{}
|
columnNames := []string{}
|
||||||
|
virtualColumnNames := []string{}
|
||||||
err := sqlutils.QueryRowsMap(db, query, func(rowMap sqlutils.RowMap) error {
|
err := sqlutils.QueryRowsMap(db, query, func(rowMap sqlutils.RowMap) error {
|
||||||
columnNames = append(columnNames, rowMap.GetString("Field"))
|
columnName := rowMap.GetString("Field")
|
||||||
|
columnNames = append(columnNames, columnName)
|
||||||
|
if strings.Contains(rowMap.GetString("Extra"), " GENERATED") {
|
||||||
|
log.Debugf("%s is a generated column", columnName)
|
||||||
|
virtualColumnNames = append(virtualColumnNames, columnName)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if len(columnNames) == 0 {
|
if len(columnNames) == 0 {
|
||||||
return nil, log.Errorf("Found 0 columns on %s.%s. Bailing out",
|
return nil, nil, log.Errorf("Found 0 columns on %s.%s. Bailing out",
|
||||||
sql.EscapeName(databaseName),
|
sql.EscapeName(databaseName),
|
||||||
sql.EscapeName(tableName),
|
sql.EscapeName(tableName),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return sql.NewColumnList(columnNames), nil
|
return sql.NewColumnList(columnNames), sql.NewColumnList(virtualColumnNames), nil
|
||||||
}
|
}
|
||||||
|
@ -15,11 +15,11 @@ type ValueComparisonSign string
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
LessThanComparisonSign ValueComparisonSign = "<"
|
LessThanComparisonSign ValueComparisonSign = "<"
|
||||||
LessThanOrEqualsComparisonSign = "<="
|
LessThanOrEqualsComparisonSign ValueComparisonSign = "<="
|
||||||
EqualsComparisonSign = "="
|
EqualsComparisonSign ValueComparisonSign = "="
|
||||||
GreaterThanOrEqualsComparisonSign = ">="
|
GreaterThanOrEqualsComparisonSign ValueComparisonSign = ">="
|
||||||
GreaterThanComparisonSign = ">"
|
GreaterThanComparisonSign ValueComparisonSign = ">"
|
||||||
NotEqualsComparisonSign = "!="
|
NotEqualsComparisonSign ValueComparisonSign = "!="
|
||||||
)
|
)
|
||||||
|
|
||||||
// EscapeName will escape a db/table/column/... name by wrapping with backticks.
|
// EscapeName will escape a db/table/column/... name by wrapping with backticks.
|
||||||
@ -140,13 +140,12 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
|
|||||||
comparisons := []string{}
|
comparisons := []string{}
|
||||||
|
|
||||||
for i, column := range columns {
|
for i, column := range columns {
|
||||||
//
|
|
||||||
value := values[i]
|
value := values[i]
|
||||||
rangeComparison, err := BuildValueComparison(column, value, comparisonSign)
|
rangeComparison, err := BuildValueComparison(column, value, comparisonSign)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", explodedArgs, err
|
return "", explodedArgs, err
|
||||||
}
|
}
|
||||||
if len(columns[0:i]) > 0 {
|
if i > 0 {
|
||||||
equalitiesComparison, err := BuildEqualsComparison(columns[0:i], values[0:i])
|
equalitiesComparison, err := BuildEqualsComparison(columns[0:i], values[0:i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", explodedArgs, err
|
return "", explodedArgs, err
|
||||||
|
@ -8,6 +8,7 @@ package sql
|
|||||||
import (
|
import (
|
||||||
"golang.org/x/text/encoding"
|
"golang.org/x/text/encoding"
|
||||||
"golang.org/x/text/encoding/charmap"
|
"golang.org/x/text/encoding/charmap"
|
||||||
|
"golang.org/x/text/encoding/simplifiedchinese"
|
||||||
)
|
)
|
||||||
|
|
||||||
type charsetEncoding map[string]encoding.Encoding
|
type charsetEncoding map[string]encoding.Encoding
|
||||||
@ -18,4 +19,5 @@ func init() {
|
|||||||
charsetEncodingMap = make(map[string]encoding.Encoding)
|
charsetEncodingMap = make(map[string]encoding.Encoding)
|
||||||
// Begin mappings
|
// Begin mappings
|
||||||
charsetEncodingMap["latin1"] = charmap.Windows1252
|
charsetEncodingMap["latin1"] = charmap.Windows1252
|
||||||
|
charsetEncodingMap["gbk"] = simplifiedchinese.GBK
|
||||||
}
|
}
|
||||||
|
@ -15,11 +15,13 @@ var (
|
|||||||
sanitizeQuotesRegexp = regexp.MustCompile("('[^']*')")
|
sanitizeQuotesRegexp = regexp.MustCompile("('[^']*')")
|
||||||
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
|
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
|
||||||
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
|
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
|
||||||
|
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
|
||||||
)
|
)
|
||||||
|
|
||||||
type Parser struct {
|
type Parser struct {
|
||||||
columnRenameMap map[string]string
|
columnRenameMap map[string]string
|
||||||
droppedColumns map[string]bool
|
droppedColumns map[string]bool
|
||||||
|
isRenameTable bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewParser() *Parser {
|
func NewParser() *Parser {
|
||||||
@ -86,6 +88,12 @@ func (this *Parser) parseAlterToken(alterToken string) (err error) {
|
|||||||
this.droppedColumns[submatch[2]] = true
|
this.droppedColumns[submatch[2]] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
// rename table
|
||||||
|
if renameTableRegexp.MatchString(alterToken) {
|
||||||
|
this.isRenameTable = true
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,3 +123,7 @@ func (this *Parser) HasNonTrivialRenames() bool {
|
|||||||
func (this *Parser) DroppedColumnsMap() map[string]bool {
|
func (this *Parser) DroppedColumnsMap() map[string]bool {
|
||||||
return this.droppedColumns
|
return this.droppedColumns
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *Parser) IsRenameTable() bool {
|
||||||
|
return this.isRenameTable
|
||||||
|
}
|
||||||
|
@ -159,3 +159,42 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
|
|||||||
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseAlterStatementRenameTable(t *testing.T) {
|
||||||
|
|
||||||
|
{
|
||||||
|
parser := NewParser()
|
||||||
|
statement := "drop column b"
|
||||||
|
err := parser.ParseAlterStatement(statement)
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectFalse(parser.isRenameTable)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
parser := NewParser()
|
||||||
|
statement := "rename as something_else"
|
||||||
|
err := parser.ParseAlterStatement(statement)
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
parser := NewParser()
|
||||||
|
statement := "drop column b, rename as something_else"
|
||||||
|
err := parser.ParseAlterStatement(statement)
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
parser := NewParser()
|
||||||
|
statement := "engine=innodb rename as something_else"
|
||||||
|
err := parser.ParseAlterStatement(statement)
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
parser := NewParser()
|
||||||
|
statement := "rename as something_else, engine=innodb"
|
||||||
|
err := parser.ParseAlterStatement(statement)
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -15,18 +15,18 @@ import (
|
|||||||
type ColumnType int
|
type ColumnType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
UnknownColumnType ColumnType = iota
|
UnknownColumnType ColumnType = iota
|
||||||
TimestampColumnType = iota
|
TimestampColumnType
|
||||||
DateTimeColumnType = iota
|
DateTimeColumnType
|
||||||
EnumColumnType = iota
|
EnumColumnType
|
||||||
MediumIntColumnType = iota
|
MediumIntColumnType
|
||||||
JSONColumnType = iota
|
JSONColumnType
|
||||||
FloatColumnType = iota
|
FloatColumnType
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxMediumintUnsigned int32 = 16777215
|
const maxMediumintUnsigned int32 = 16777215
|
||||||
|
|
||||||
type TimezoneConvertion struct {
|
type TimezoneConversion struct {
|
||||||
ToTimezone string
|
ToTimezone string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ type Column struct {
|
|||||||
IsUnsigned bool
|
IsUnsigned bool
|
||||||
Charset string
|
Charset string
|
||||||
Type ColumnType
|
Type ColumnType
|
||||||
timezoneConversion *TimezoneConvertion
|
timezoneConversion *TimezoneConversion
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *Column) convertArg(arg interface{}) interface{} {
|
func (this *Column) convertArg(arg interface{}) interface{} {
|
||||||
@ -172,7 +172,7 @@ func (this *ColumnList) GetColumnType(columnName string) ColumnType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (this *ColumnList) SetConvertDatetimeToTimestamp(columnName string, toTimezone string) {
|
func (this *ColumnList) SetConvertDatetimeToTimestamp(columnName string, toTimezone string) {
|
||||||
this.GetColumn(columnName).timezoneConversion = &TimezoneConvertion{ToTimezone: toTimezone}
|
this.GetColumn(columnName).timezoneConversion = &TimezoneConversion{ToTimezone: toTimezone}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *ColumnList) HasTimezoneConversion(columnName string) bool {
|
func (this *ColumnList) HasTimezoneConversion(columnName string) bool {
|
||||||
|
9
localtests/autoinc-zero-value/create.sql
Normal file
9
localtests/autoinc-zero-value/create.sql
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
i int not null,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
set session sql_mode='NO_AUTO_VALUE_ON_ZERO';
|
||||||
|
insert into gh_ost_test values (0, 23);
|
20
localtests/bit-add/create.sql
Normal file
20
localtests/bit-add/create.sql
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
i int not null,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test values (null, 11);
|
||||||
|
insert into gh_ost_test values (null, 13);
|
||||||
|
end ;;
|
1
localtests/bit-add/extra_args
Normal file
1
localtests/bit-add/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--alter="add column is_good bit null default 0"
|
1
localtests/bit-add/ghost_columns
Normal file
1
localtests/bit-add/ghost_columns
Normal file
@ -0,0 +1 @@
|
|||||||
|
id, i
|
1
localtests/bit-add/orig_columns
Normal file
1
localtests/bit-add/orig_columns
Normal file
@ -0,0 +1 @@
|
|||||||
|
id, i
|
24
localtests/bit-dml/create.sql
Normal file
24
localtests/bit-dml/create.sql
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
i int not null,
|
||||||
|
is_good bit null default 0,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test values (null, 11, 0);
|
||||||
|
insert into gh_ost_test values (null, 13, 1);
|
||||||
|
insert into gh_ost_test values (null, 17, 1);
|
||||||
|
|
||||||
|
update gh_ost_test set is_good=0 where i=13 order by id desc limit 1;
|
||||||
|
end ;;
|
1
localtests/bit-dml/extra_args
Normal file
1
localtests/bit-dml/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--alter="modify column is_good bit not null default 0" --approve-renamed-columns
|
31
localtests/convert-utf8mb4/create.sql
Normal file
31
localtests/convert-utf8mb4/create.sql
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
t varchar(128) charset utf8 collate utf8_general_ci,
|
||||||
|
tl varchar(128) charset latin1 not null,
|
||||||
|
ta varchar(128) charset ascii not null,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
insert into gh_ost_test values (null, 'átesting');
|
||||||
|
|
||||||
|
|
||||||
|
insert into gh_ost_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial');
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test values (null, md5(rand()), 'átesting-a', 'a');
|
||||||
|
insert into gh_ost_test values (null, 'novo proprietário', 'átesting-b', 'b');
|
||||||
|
insert into gh_ost_test values (null, '2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm', 'átesting-c', 'c');
|
||||||
|
insert into gh_ost_test values (null, 'usuário', 'átesting-x', 'x');
|
||||||
|
|
||||||
|
delete from gh_ost_test where ta='x' order by id desc limit 1;
|
||||||
|
end ;;
|
1
localtests/convert-utf8mb4/extra_args
Normal file
1
localtests/convert-utf8mb4/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--alter='convert to character set utf8mb4'
|
27
localtests/datetime-1970/create.sql
Normal file
27
localtests/datetime-1970/create.sql
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
set session time_zone='+00:00';
|
||||||
|
|
||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
create_time timestamp NULL DEFAULT '0000-00-00 00:00:00',
|
||||||
|
update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||||
|
counter int(10) unsigned DEFAULT NULL,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
set session time_zone='+00:00';
|
||||||
|
insert into gh_ost_test values (1, '0000-00-00 00:00:00', now(), 0);
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
set session time_zone='+00:00';
|
||||||
|
update gh_ost_test set counter = counter + 1 where id = 1;
|
||||||
|
end ;;
|
1
localtests/datetime-1970/extra_args
Normal file
1
localtests/datetime-1970/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--alter='add column name varchar(1)'
|
1
localtests/datetime-1970/ghost_columns
Normal file
1
localtests/datetime-1970/ghost_columns
Normal file
@ -0,0 +1 @@
|
|||||||
|
id, create_time, update_time, counter
|
1
localtests/datetime-1970/orig_columns
Normal file
1
localtests/datetime-1970/orig_columns
Normal file
@ -0,0 +1 @@
|
|||||||
|
id, create_time, update_time, counter
|
0
localtests/datetime-1970/sql_mode
Normal file
0
localtests/datetime-1970/sql_mode
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
@ -17,7 +17,7 @@ create event gh_ost_test
|
|||||||
starts current_timestamp
|
starts current_timestamp
|
||||||
ends current_timestamp + interval 60 second
|
ends current_timestamp + interval 60 second
|
||||||
on completion not preserve
|
on completion not preserve
|
||||||
enable
|
disable on slave
|
||||||
do
|
do
|
||||||
begin
|
begin
|
||||||
insert into gh_ost_test values (null, 11, now(), now(), now(), 0);
|
insert into gh_ost_test values (null, 11, now(), now(), now(), 0);
|
||||||
|
1
localtests/datetime-submillis/ignore_versions
Normal file
1
localtests/datetime-submillis/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
1
localtests/datetime-to-timestamp-pk-fail/ignore_versions
Normal file
1
localtests/datetime-to-timestamp-pk-fail/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
1
localtests/datetime/ignore_versions
Normal file
1
localtests/datetime/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
23
localtests/decimal/create.sql
Normal file
23
localtests/decimal/create.sql
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
dec0 decimal(65,30) unsigned NOT NULL DEFAULT '0.000000000000000000000000000000',
|
||||||
|
dec1 decimal(65,30) unsigned NOT NULL DEFAULT '1.000000000000000000000000000000',
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test values (null, 0.0, 0.0);
|
||||||
|
insert into gh_ost_test values (null, 2.0, 4.0);
|
||||||
|
insert into gh_ost_test values (null, 99999999999999999999999999999999999.000, 6.0);
|
||||||
|
update gh_ost_test set dec1=4.5 where dec2=4.0 order by id desc limit 1;
|
||||||
|
end ;;
|
22
localtests/fail-rename-table/create.sql
Normal file
22
localtests/fail-rename-table/create.sql
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
i int not null,
|
||||||
|
ts timestamp,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test values (null, 11, now());
|
||||||
|
insert into gh_ost_test values (null, 13, now());
|
||||||
|
insert into gh_ost_test values (null, 17, now());
|
||||||
|
end ;;
|
1
localtests/fail-rename-table/expect_failure
Normal file
1
localtests/fail-rename-table/expect_failure
Normal file
@ -0,0 +1 @@
|
|||||||
|
ALTER statement seems to RENAME the table
|
1
localtests/fail-rename-table/extra_args
Normal file
1
localtests/fail-rename-table/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--alter="rename as something_else"
|
52
localtests/fail-update-pk-column/create.sql
Normal file
52
localtests/fail-update-pk-column/create.sql
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
i int not null,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
insert into gh_ost_test values (null, 101);
|
||||||
|
insert into gh_ost_test values (null, 102);
|
||||||
|
insert into gh_ost_test values (null, 103);
|
||||||
|
insert into gh_ost_test values (null, 104);
|
||||||
|
insert into gh_ost_test values (null, 105);
|
||||||
|
insert into gh_ost_test values (null, 106);
|
||||||
|
insert into gh_ost_test values (null, 107);
|
||||||
|
insert into gh_ost_test values (null, 108);
|
||||||
|
insert into gh_ost_test values (null, 109);
|
||||||
|
insert into gh_ost_test values (null, 110);
|
||||||
|
insert into gh_ost_test values (null, 111);
|
||||||
|
insert into gh_ost_test values (null, 112);
|
||||||
|
insert into gh_ost_test values (null, 113);
|
||||||
|
insert into gh_ost_test values (null, 114);
|
||||||
|
insert into gh_ost_test values (null, 115);
|
||||||
|
insert into gh_ost_test values (null, 116);
|
||||||
|
insert into gh_ost_test values (null, 117);
|
||||||
|
insert into gh_ost_test values (null, 118);
|
||||||
|
insert into gh_ost_test values (null, 119);
|
||||||
|
insert into gh_ost_test values (null, 120);
|
||||||
|
insert into gh_ost_test values (null, 121);
|
||||||
|
insert into gh_ost_test values (null, 122);
|
||||||
|
insert into gh_ost_test values (null, 123);
|
||||||
|
insert into gh_ost_test values (null, 124);
|
||||||
|
insert into gh_ost_test values (null, 125);
|
||||||
|
insert into gh_ost_test values (null, 126);
|
||||||
|
insert into gh_ost_test values (null, 127);
|
||||||
|
insert into gh_ost_test values (null, 128);
|
||||||
|
insert into gh_ost_test values (null, 129);
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp + interval 3 second
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
update gh_ost_test set id=-2 where id=21;
|
||||||
|
update gh_ost_test set id=55 where id=22;
|
||||||
|
update gh_ost_test set id=23 where id=23;
|
||||||
|
update gh_ost_test set i=5024 where id=24;
|
||||||
|
end ;;
|
25
localtests/gbk-charset/create.sql
Normal file
25
localtests/gbk-charset/create.sql
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int(11) NOT NULL AUTO_INCREMENT,
|
||||||
|
name varchar(512) DEFAULT NULL,
|
||||||
|
v varchar(255) DEFAULT NULL COMMENT '添加普通列测试',
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=gbk;
|
||||||
|
|
||||||
|
insert into gh_ost_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试');
|
||||||
|
insert into gh_ost_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试');
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test (name) values ('gbk-test-default');
|
||||||
|
insert into gh_ost_test values (null, 'gbk-test', '添加普通列测试-添加普通列测试');
|
||||||
|
update gh_ost_test set v='添加普通列测试' where v='添加普通列测试-添加普通列测试' order by id desc limit 1;
|
||||||
|
end ;;
|
0
localtests/gbk-charset/extra_args
Normal file
0
localtests/gbk-charset/extra_args
Normal file
29
localtests/generated-columns-add57/create.sql
Normal file
29
localtests/generated-columns-add57/create.sql
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
a int not null,
|
||||||
|
b int not null,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,3);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,4);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,5);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,6);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,7);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,8);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,9);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,0);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,1);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,2);
|
||||||
|
end ;;
|
1
localtests/generated-columns-add57/extra_args
Normal file
1
localtests/generated-columns-add57/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--alter="add column sum_ab int as (a + b) virtual not null"
|
1
localtests/generated-columns-add57/ghost_columns
Normal file
1
localtests/generated-columns-add57/ghost_columns
Normal file
@ -0,0 +1 @@
|
|||||||
|
id, a, b
|
1
localtests/generated-columns-add57/ignore_versions
Normal file
1
localtests/generated-columns-add57/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5|5.6)
|
1
localtests/generated-columns-add57/order_by
Normal file
1
localtests/generated-columns-add57/order_by
Normal file
@ -0,0 +1 @@
|
|||||||
|
id
|
1
localtests/generated-columns-add57/orig_columns
Normal file
1
localtests/generated-columns-add57/orig_columns
Normal file
@ -0,0 +1 @@
|
|||||||
|
id, a, b
|
30
localtests/generated-columns-rename57/create.sql
Normal file
30
localtests/generated-columns-rename57/create.sql
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
a int not null,
|
||||||
|
b int not null,
|
||||||
|
sum_ab int as (a + b) virtual not null,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,3);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,4);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,5);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,6);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,7);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,8);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,9);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,0);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,1);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,2);
|
||||||
|
end ;;
|
1
localtests/generated-columns-rename57/extra_args
Normal file
1
localtests/generated-columns-rename57/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--alter="change sum_ab total_ab int as (a + b) virtual not null" --approve-renamed-columns
|
1
localtests/generated-columns-rename57/ignore_versions
Normal file
1
localtests/generated-columns-rename57/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5|5.6)
|
30
localtests/generated-columns57/create.sql
Normal file
30
localtests/generated-columns57/create.sql
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
a int not null,
|
||||||
|
b int not null,
|
||||||
|
sum_ab int as (a + b) virtual not null,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,3);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,4);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,5);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,6);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,7);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,8);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,9);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,0);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,1);
|
||||||
|
insert into gh_ost_test (id, a, b) values (null, 2,2);
|
||||||
|
end ;;
|
1
localtests/generated-columns57/ignore_versions
Normal file
1
localtests/generated-columns57/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5|5.6)
|
21
localtests/geometry57/create.sql
Normal file
21
localtests/geometry57/create.sql
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
g geometry,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test values (null, ST_GeomFromText('POINT(1 1)'));
|
||||||
|
insert into gh_ost_test values (null, ST_GeomFromText('POINT(2 2)'));
|
||||||
|
insert into gh_ost_test values (null, ST_GeomFromText('POINT(3 3)'));
|
||||||
|
end ;;
|
1
localtests/geometry57/ignore_versions
Normal file
1
localtests/geometry57/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5|5.6)
|
1
localtests/json57/ignore_versions
Normal file
1
localtests/json57/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5|5.6)
|
@ -20,6 +20,7 @@ begin
|
|||||||
insert into gh_ost_test (id, i, j) values (null, 11, '"sometext"');
|
insert into gh_ost_test (id, i, j) values (null, 11, '"sometext"');
|
||||||
insert into gh_ost_test (id, i, j) values (null, 13, '{"key":"val"}');
|
insert into gh_ost_test (id, i, j) values (null, 13, '{"key":"val"}');
|
||||||
insert into gh_ost_test (id, i, j) values (null, 17, '{"is-it": true, "count": 3, "elements": []}');
|
insert into gh_ost_test (id, i, j) values (null, 17, '{"is-it": true, "count": 3, "elements": []}');
|
||||||
|
insert into gh_ost_test (id, i, j) values (null, 19, '{"text":"Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet a, venenatis vitae, justo. Nullam dictum felis eu pede mollis pretium. Integer tincidunt. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. Aliquam lorem ante, dapibus in, viverra quis, feugiat a, tellus. Phasellus viverra nulla ut metus varius laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam eget dui. Etiam rhoncus. Maecenas tempus, tellus eget condimentum rhoncus, sem quam semper libero, sit amet adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. Donec vitae sapien ut libero venenatis faucibus. Nullam quis ante. Etiam sit amet orci eget eros faucibus tincidunt. Duis leo. Sed fringilla mauris sit amet nibh. Donec sodales sagittis magna. Sed consequat, leo eget bibendum sodales, augue velit cursus nunc, quis gravida magna mi a libero. Fusce vulputate eleifend sapien. Vestibulum purus quam, scelerisque ut, mollis sed, nonummy id, metus. Nullam accumsan lorem in dui. Cras ultricies mi eu turpis hendrerit fringilla. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In ac dui quis mi consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu tortor, suscipit eget, imperdiet nec, imperdiet iaculis, ipsum. Sed aliquam ultrices mauris. Integer ante arcu, accumsan a, consectetuer eget, posuere ut, mauris. Praesent adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc nonummy metus. Vestibulum volutpat pretium libero. Cras id dui. Aenean ut eros et nisl sagittis vestibulum. Nullam nulla eros, ultricies sit amet, nonummy id, imperdiet feugiat, pede. Sed lectus. Donec mollis hendrerit risus. Phasellus nec sem in justo pellentesque facilisis. Etiam imperdiet imperdiet orci. Nunc nec neque. Phasellus leo dolor, tempus non, auctor et, hendrerit quis, nisi. Curabitur ligula sapien, tincidunt non, euismod vitae, posuere imperdiet, leo. Maecenas malesuada. Praesent congue erat at massa. Sed cursus turpis vitae tortor. Donec posuere vulputate arcu. Phasellus accumsan cursus velit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed aliquam, nisi quis porttitor congue, elit erat euismod orci, ac"}');
|
||||||
|
|
||||||
update gh_ost_test set j = '{"updated": 11}', updated = 1 where i = 11 and updated = 0;
|
update gh_ost_test set j = '{"updated": 11}', updated = 1 where i = 11 and updated = 0;
|
||||||
update gh_ost_test set j = json_set(j, '$.count', 13, '$.id', id), updated = 1 where i = 13 and updated = 0;
|
update gh_ost_test set j = json_set(j, '$.count', 13, '$.id', id), updated = 1 where i = 13 and updated = 0;
|
||||||
|
1
localtests/json57dml/ignore_versions
Normal file
1
localtests/json57dml/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5|5.6)
|
13
localtests/keyword-column/create.sql
Normal file
13
localtests/keyword-column/create.sql
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
i int not null,
|
||||||
|
color varchar(32),
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
|
||||||
|
insert into gh_ost_test values (null, 11, 'red');
|
||||||
|
insert into gh_ost_test values (null, 13, 'green');
|
||||||
|
insert into gh_ost_test values (null, 17, 'blue');
|
1
localtests/keyword-column/extra_args
Normal file
1
localtests/keyword-column/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--alter='add column `index` int unsigned' \
|
1
localtests/keyword-column/ghost_columns
Normal file
1
localtests/keyword-column/ghost_columns
Normal file
@ -0,0 +1 @@
|
|||||||
|
id, i, color
|
1
localtests/keyword-column/orig_columns
Normal file
1
localtests/keyword-column/orig_columns
Normal file
@ -0,0 +1 @@
|
|||||||
|
id, i, color
|
22
localtests/spatial57/create.sql
Normal file
22
localtests/spatial57/create.sql
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
drop table if exists gh_ost_test;
|
||||||
|
create table gh_ost_test (
|
||||||
|
id int auto_increment,
|
||||||
|
g geometry,
|
||||||
|
pt point,
|
||||||
|
primary key(id)
|
||||||
|
) auto_increment=1;
|
||||||
|
|
||||||
|
drop event if exists gh_ost_test;
|
||||||
|
delimiter ;;
|
||||||
|
create event gh_ost_test
|
||||||
|
on schedule every 1 second
|
||||||
|
starts current_timestamp
|
||||||
|
ends current_timestamp + interval 60 second
|
||||||
|
on completion not preserve
|
||||||
|
enable
|
||||||
|
do
|
||||||
|
begin
|
||||||
|
insert into gh_ost_test values (null, ST_GeomFromText('POINT(1 1)'), POINT(10,10));
|
||||||
|
insert into gh_ost_test values (null, ST_GeomFromText('POINT(2 2)'), POINT(20,20));
|
||||||
|
insert into gh_ost_test values (null, ST_GeomFromText('POINT(3 3)'), POINT(30,30));
|
||||||
|
end ;;
|
1
localtests/spatial57/ignore_versions
Normal file
1
localtests/spatial57/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5|5.6)
|
1
localtests/swap-pk-uk/ignore_versions
Normal file
1
localtests/swap-pk-uk/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
@ -1,8 +1,8 @@
|
|||||||
drop table if exists gh_ost_test;
|
drop table if exists gh_ost_test;
|
||||||
create table gh_ost_test (
|
create table gh_ost_test (
|
||||||
id bigint,
|
id bigint not null,
|
||||||
i int not null,
|
i int not null,
|
||||||
ts timestamp(6),
|
ts timestamp(6) not null,
|
||||||
unique key id_uidx(id),
|
unique key id_uidx(id),
|
||||||
unique key its_uidx(i, ts)
|
unique key its_uidx(i, ts)
|
||||||
) ;
|
) ;
|
||||||
|
1
localtests/swap-uk-uk/ignore_versions
Normal file
1
localtests/swap-uk-uk/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
@ -9,15 +9,31 @@
|
|||||||
|
|
||||||
tests_path=$(dirname $0)
|
tests_path=$(dirname $0)
|
||||||
test_logfile=/tmp/gh-ost-test.log
|
test_logfile=/tmp/gh-ost-test.log
|
||||||
ghost_binary=/tmp/gh-ost-test
|
default_ghost_binary=/tmp/gh-ost-test
|
||||||
|
ghost_binary=""
|
||||||
exec_command_file=/tmp/gh-ost-test.bash
|
exec_command_file=/tmp/gh-ost-test.bash
|
||||||
|
orig_content_output_file=/tmp/gh-ost-test.orig.content.csv
|
||||||
test_pattern="${1:-.}"
|
ghost_content_output_file=/tmp/gh-ost-test.ghost.content.csv
|
||||||
|
throttle_flag_file=/tmp/gh-ost-test.ghost.throttle.flag
|
||||||
|
|
||||||
master_host=
|
master_host=
|
||||||
master_port=
|
master_port=
|
||||||
replica_host=
|
replica_host=
|
||||||
replica_port=
|
replica_port=
|
||||||
|
original_sql_mode=
|
||||||
|
|
||||||
|
OPTIND=1
|
||||||
|
while getopts "b:" OPTION
|
||||||
|
do
|
||||||
|
case $OPTION in
|
||||||
|
b)
|
||||||
|
ghost_binary="$OPTARG"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
shift $((OPTIND-1))
|
||||||
|
|
||||||
|
test_pattern="${1:-.}"
|
||||||
|
|
||||||
verify_master_and_replica() {
|
verify_master_and_replica() {
|
||||||
if [ "$(gh-ost-test-mysql-master -e "select 1" -ss)" != "1" ] ; then
|
if [ "$(gh-ost-test-mysql-master -e "select 1" -ss)" != "1" ] ; then
|
||||||
@ -25,6 +41,18 @@ verify_master_and_replica() {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
read master_host master_port <<< $(gh-ost-test-mysql-master -e "select @@hostname, @@port" -ss)
|
read master_host master_port <<< $(gh-ost-test-mysql-master -e "select @@hostname, @@port" -ss)
|
||||||
|
[ "$master_host" == "$(hostname)" ] && master_host="127.0.0.1"
|
||||||
|
echo "# master verified at $master_host:$master_port"
|
||||||
|
if ! gh-ost-test-mysql-master -e "set global event_scheduler := 1" ; then
|
||||||
|
echo "Cannot enable event_scheduler on master"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
original_sql_mode="$(gh-ost-test-mysql-master -e "select @@global.sql_mode" -s -s)"
|
||||||
|
echo "sql_mode on master is ${original_sql_mode}"
|
||||||
|
|
||||||
|
echo "Gracefully sleeping for 3 seconds while replica is setting up..."
|
||||||
|
sleep 3
|
||||||
|
|
||||||
if [ "$(gh-ost-test-mysql-replica -e "select 1" -ss)" != "1" ] ; then
|
if [ "$(gh-ost-test-mysql-replica -e "select 1" -ss)" != "1" ] ; then
|
||||||
echo "Cannot verify gh-ost-test-mysql-replica"
|
echo "Cannot verify gh-ost-test-mysql-replica"
|
||||||
exit 1
|
exit 1
|
||||||
@ -34,6 +62,8 @@ verify_master_and_replica() {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
read replica_host replica_port <<< $(gh-ost-test-mysql-replica -e "select @@hostname, @@port" -ss)
|
read replica_host replica_port <<< $(gh-ost-test-mysql-replica -e "select @@hostname, @@port" -ss)
|
||||||
|
[ "$replica_host" == "$(hostname)" ] && replica_host="127.0.0.1"
|
||||||
|
echo "# replica verified at $replica_host:$replica_port"
|
||||||
}
|
}
|
||||||
|
|
||||||
exec_cmd() {
|
exec_cmd() {
|
||||||
@ -65,11 +95,26 @@ test_single() {
|
|||||||
local test_name
|
local test_name
|
||||||
test_name="$1"
|
test_name="$1"
|
||||||
|
|
||||||
|
if [ -f $tests_path/$test_name/ignore_versions ] ; then
|
||||||
|
ignore_versions=$(cat $tests_path/$test_name/ignore_versions)
|
||||||
|
mysql_version=$(gh-ost-test-mysql-master -s -s -e "select @@version")
|
||||||
|
if echo "$mysql_version" | egrep -q "^${ignore_versions}" ; then
|
||||||
|
echo -n "Skipping: $test_name"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
echo -n "Testing: $test_name"
|
echo -n "Testing: $test_name"
|
||||||
|
|
||||||
echo_dot
|
echo_dot
|
||||||
start_replication
|
start_replication
|
||||||
echo_dot
|
echo_dot
|
||||||
|
|
||||||
|
if [ -f $tests_path/$test_name/sql_mode ] ; then
|
||||||
|
gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'"
|
||||||
|
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'"
|
||||||
|
fi
|
||||||
|
|
||||||
gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/create.sql
|
gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/create.sql
|
||||||
|
|
||||||
extra_args=""
|
extra_args=""
|
||||||
@ -97,6 +142,7 @@ test_single() {
|
|||||||
--password=gh-ost \
|
--password=gh-ost \
|
||||||
--host=$replica_host \
|
--host=$replica_host \
|
||||||
--port=$replica_port \
|
--port=$replica_port \
|
||||||
|
--assume-master-host=${master_host}:${master_port}
|
||||||
--database=test \
|
--database=test \
|
||||||
--table=gh_ost_test \
|
--table=gh_ost_test \
|
||||||
--alter='engine=innodb' \
|
--alter='engine=innodb' \
|
||||||
@ -105,11 +151,11 @@ test_single() {
|
|||||||
--initially-drop-old-table \
|
--initially-drop-old-table \
|
||||||
--initially-drop-ghost-table \
|
--initially-drop-ghost-table \
|
||||||
--throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _gh_ost_test_ghc' \
|
--throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _gh_ost_test_ghc' \
|
||||||
|
--throttle-flag-file=$throttle_flag_file \
|
||||||
--serve-socket-file=/tmp/gh-ost.test.sock \
|
--serve-socket-file=/tmp/gh-ost.test.sock \
|
||||||
--initially-drop-socket-file \
|
--initially-drop-socket-file \
|
||||||
--postpone-cut-over-flag-file=/tmp/gh-ost.test.postpone.flag \
|
|
||||||
--test-on-replica \
|
--test-on-replica \
|
||||||
--default-retries=1 \
|
--default-retries=3 \
|
||||||
--chunk-size=10 \
|
--chunk-size=10 \
|
||||||
--verbose \
|
--verbose \
|
||||||
--debug \
|
--debug \
|
||||||
@ -122,6 +168,11 @@ test_single() {
|
|||||||
|
|
||||||
execution_result=$?
|
execution_result=$?
|
||||||
|
|
||||||
|
if [ -f $tests_path/$test_name/sql_mode ] ; then
|
||||||
|
gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'"
|
||||||
|
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -f $tests_path/$test_name/destroy.sql ] ; then
|
if [ -f $tests_path/$test_name/destroy.sql ] ; then
|
||||||
gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/destroy.sql
|
gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/destroy.sql
|
||||||
fi
|
fi
|
||||||
@ -148,27 +199,41 @@ test_single() {
|
|||||||
|
|
||||||
if [ $execution_result -ne 0 ] ; then
|
if [ $execution_result -ne 0 ] ; then
|
||||||
echo
|
echo
|
||||||
echo "ERROR $test_name execution failure. cat $test_logfile"
|
echo "ERROR $test_name execution failure. cat $test_logfile:"
|
||||||
|
cat $test_logfile
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo_dot
|
echo_dot
|
||||||
orig_checksum=$(gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test ${order_by}" -ss | md5sum)
|
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test ${order_by}" -ss > $orig_content_output_file
|
||||||
ghost_checksum=$(gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho ${order_by}" -ss | md5sum)
|
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho ${order_by}" -ss > $ghost_content_output_file
|
||||||
|
orig_checksum=$(cat $orig_content_output_file | md5sum)
|
||||||
|
ghost_checksum=$(cat $ghost_content_output_file | md5sum)
|
||||||
|
|
||||||
if [ "$orig_checksum" != "$ghost_checksum" ] ; then
|
if [ "$orig_checksum" != "$ghost_checksum" ] ; then
|
||||||
echo "ERROR $test_name: checksum mismatch"
|
echo "ERROR $test_name: checksum mismatch"
|
||||||
echo "---"
|
echo "---"
|
||||||
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test" -ss
|
diff $orig_content_output_file $ghost_content_output_file
|
||||||
echo "---"
|
|
||||||
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho" -ss
|
echo "diff $orig_content_output_file $ghost_content_output_file"
|
||||||
|
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
build_binary() {
|
build_binary() {
|
||||||
echo "Building"
|
echo "Building"
|
||||||
|
rm -f $default_ghost_binary
|
||||||
|
[ "$ghost_binary" == "" ] && ghost_binary="$default_ghost_binary"
|
||||||
|
if [ -f "$ghost_binary" ] ; then
|
||||||
|
echo "Using binary: $ghost_binary"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
go build -o $ghost_binary go/cmd/gh-ost/main.go
|
go build -o $ghost_binary go/cmd/gh-ost/main.go
|
||||||
|
if [ $? -ne 0 ] ; then
|
||||||
|
echo "Build failure"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
test_all() {
|
test_all() {
|
||||||
|
1
localtests/timestamp-to-datetime/ignore_versions
Normal file
1
localtests/timestamp-to-datetime/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
1
localtests/timestamp/ignore_versions
Normal file
1
localtests/timestamp/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
1
localtests/tz-datetime-ts/ignore_versions
Normal file
1
localtests/tz-datetime-ts/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
1
localtests/tz/ignore_versions
Normal file
1
localtests/tz/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user