Merge branch 'master' into patch-1

This commit is contained in:
Tim Vaillancourt 2022-08-18 22:40:48 +02:00 committed by GitHub
commit 89ae978fba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1028 changed files with 197058 additions and 186165 deletions

View File

@ -10,10 +10,10 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Set up Go 1.14
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.14
go-version: 1.17
- name: Build
run: script/cibuild

25
.github/workflows/codeql.yml vendored Normal file
View File

@ -0,0 +1,25 @@
name: "CodeQL analysis"
on:
push:
pull_request:
schedule:
- cron: '0 0 * * 0'
jobs:
codeql:
strategy:
fail-fast: false
runs-on: ubuntu-latest # windows-latest and ubuntu-latest are supported. macos-latest is not supported at this time.
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

23
.github/workflows/golangci-lint.yml vendored Normal file
View File

@ -0,0 +1,23 @@
name: golangci-lint
on:
push:
branches:
- master
pull_request:
permissions:
contents: read
# Optional: allow read access to pull request. Use with `only-new-issues` option.
# pull-requests: read
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: 1.17
- uses: actions/checkout@v3
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.46.2

View File

@ -6,14 +6,19 @@ jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
version: [mysql-5.7.25,mysql-8.0.16]
steps:
- uses: actions/checkout@v2
- name: Set up Go 1.14
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.14
go-version: 1.17
- name: migration tests
env:
TEST_MYSQL_VERSION: ${{ matrix.version }}
run: script/cibuild-gh-ost-replica-tests

24
.golangci.yml Normal file
View File

@ -0,0 +1,24 @@
run:
timeout: 5m
linters:
disable:
- errcheck
enable:
- contextcheck
- durationcheck
- errname
- execinquery
- gofmt
- ifshort
- misspell
- nilerr
- noctx
- nolintlint
- nosprintfhostport
- prealloc
- rowserrcheck
- sqlclosecheck
- unconvert
- unused
- wastedassign
- whitespace

View File

@ -1,6 +1,4 @@
#
FROM golang:1.14.7
FROM golang:1.17
RUN apt-get update
RUN apt-get install -y ruby ruby-dev rubygems build-essential

View File

@ -1,4 +1,4 @@
FROM golang:1.14.7
FROM golang:1.17
LABEL maintainer="github@github.com"
RUN apt-get update

View File

@ -1,6 +1,6 @@
# gh-ost
[![build status](https://travis-ci.org/github/gh-ost.svg)](https://travis-ci.org/github/gh-ost) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
[![ci](https://github.com/github/gh-ost/actions/workflows/ci.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/ci.yml) [![replica-tests](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
#### GitHub's online schema migration for MySQL <img src="doc/images/gh-ost-logo-light-160.png" align="right">
@ -65,6 +65,7 @@ Also see:
- [the fine print](doc/the-fine-print.md)
- [Community questions](https://github.com/github/gh-ost/issues?q=label%3Aquestion)
- [Using `gh-ost` on AWS RDS](doc/rds.md)
- [Using `gh-ost` on Azure Database for MySQL](doc/azure.md)
## What's in a name?
@ -94,7 +95,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
`gh-ost` is a Go project; it is built with Go `1.14` and above. To build on your own, use either:
`gh-ost` is a Go project; it is built with Go `1.15` and above. To build on your own, use either:
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`

View File

@ -1 +1 @@
1.1.0
1.1.2

View File

@ -18,30 +18,32 @@ function build {
GOOS=$3
GOARCH=$4
if ! go version | egrep -q 'go(1\.1[456])' ; then
echo "go version must be 1.14 or above"
if ! go version | egrep -q 'go1\.(1[5-9]|[2-9][0-9]{1})' ; then
echo "go version must be 1.15 or above"
exit 1
fi
echo "Building ${osname} binary"
echo "Building ${osname}-${GOARCH} binary"
export GOOS
export GOARCH
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
if [ $? -ne 0 ]; then
echo "Build failed for ${osname}"
echo "Build failed for ${osname} ${GOARCH}."
exit 1
fi
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${GOARCH}-${timestamp}.tar.gz $target)
if [ "$GOOS" == "linux" ] ; then
# build RPM and deb for Linux, x86-64 only
if [ "$GOOS" == "linux" ] && [ "$GOARCH" == "amd64" ] ; then
echo "Creating Distro full packages"
builddir=$(setuptree)
cp $buildpath/$target $builddir/gh-ost/usr/bin
cd $buildpath
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" --rpm-os linux .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
cd -
fi
}
@ -62,10 +64,15 @@ main() {
mkdir -p ${buildpath}
rm -rf ${buildpath:?}/*
build GNU/Linux linux linux amd64
# build macOS osx darwin amd64
build GNU/Linux linux linux arm64
build macOS osx darwin amd64
build macOS osx darwin arm64
echo "Binaries found in:"
find $buildpath/gh-ost* -type f -maxdepth 1
echo "Checksums:"
(cd $buildpath && shasum -a256 gh-ost* 2>/dev/null)
}
main "$@"

26
doc/azure.md Normal file
View File

@ -0,0 +1,26 @@
`gh-ost` has been updated to work with Azure Database for MySQL however due to GitHub does not use it, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
# Azure Database for MySQL
## Limitations
- `gh-ost` runs should be setup use [`--assume-rbr`][assume_rbr_docs] and use `binlog_row_image=FULL`.
- Azure Database for MySQL does not use same user name suffix for master and replica, so master host, user and password need to be pointed out.
## Step
1. Change the replica server's `binlog_row_image` from `MINIMAL` to `FULL`. See [guide](https://docs.microsoft.com/en-us/azure/mysql/howto-server-parameters) on Azure document.
2. Use your `gh-ost` always with additional 5 parameter
```{bash}
gh-ost \
--azure \
--assume-master-host=master-server-dns-name \
--master-user="master-user-name" \
--master-password="master-password" \
--assume-rbr \
[-- other paramters you need]
```
[new_issue]: https://github.com/github/gh-ost/issues/new
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
[migrate_test_on_replica_docs]: https://github.com/github/gh-ost/blob/master/doc/cheatsheet.md#c-migratetest-on-replica

View File

@ -5,7 +5,7 @@
Getting started with gh-ost development is simple!
- First obtain the repository with `git clone` or `go get`.
- From inside of the repository run `script/cibuild`
- From inside of the repository run `script/cibuild`.
- This will bootstrap the environment if needed, format the code, build the code, and then run the unit test.
## CI build workflow
@ -14,6 +14,12 @@ Getting started with gh-ost development is simple!
If additional steps are needed, please add them into this workflow so that the workflow remains simple.
## `golang-ci` linter
To enfore best-practices, Pull Requests are automatically linted by [`golang-ci`](https://golangci-lint.run/). The linter config is located at [`.golangci.yml`](https://github.com/github/gh-ost/blob/master/.golangci.yml) and the `golangci-lint` GitHub Action is located at [`.github/workflows/golangci-lint.yml`](https://github.com/github/gh-ost/blob/master/.github/workflows/golangci-lint.yml).
To run the `golang-ci` linters locally _(recommended before push)_, use `script/lint`.
## Notes:
Currently, `script/ensure-go-installed` will install `go` for Mac OS X and Linux. We welcome PR's to add other platforms.

View File

@ -6,6 +6,14 @@ A more in-depth discussion of various `gh-ost` command line flags: implementatio
Add this flag when executing on Aliyun RDS.
### allow-zero-in-date
Allows the user to make schema changes that include a zero date or zero in date (e.g. adding a `datetime default '0000-00-00 00:00:00'` column), even if global `sql_mode` on MySQL has `NO_ZERO_IN_DATE,NO_ZERO_DATE`.
### azure
Add this flag when executing on Azure Database for MySQL.
### allow-master-master
See [`--assume-master-host`](#assume-master-host).
@ -18,7 +26,7 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c
### approve-renamed-columns
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise, the new structure may also look like some column was dropped and another was added.
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
@ -28,7 +36,7 @@ If you think `gh-ost` is mistaken and that there's actually no _rename_ involved
`gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in:
- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters and you prefer that it picks a specific one
- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters, and you prefer that it picks a specific one
- _tungsten replicator_ topologies (together with [`--tungsten`](#tungsten)), where `gh-ost` is unable to crawl and detect the master
### assume-rbr
@ -57,7 +65,13 @@ Comma delimited status-name=threshold, same format as [`--max-load`](#max-load).
`--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold.
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10 hour migration.
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10-hour migration.
### critical-load-hibernate-seconds
When `--critical-load-hibernate-seconds` is non-zero (e.g. `--critical-load-hibernate-seconds=300`), `critical-load` does not panic and bail out; instead, `gh-ost` goes into hibernation for the specified duration. It will not read/write anything from/to any server during this time. Execution then continues upon waking from hibernation.
If `critical-load` is met again, `gh-ost` will repeat this cycle, and never panic and bail out.
### critical-load-interval-millis
@ -94,7 +108,7 @@ Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `g
### exact-rowcount
A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can, and often be, a large number. Exactly what that number is?
A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can and often will be, a large number. Exactly what that number is?
`gh-ost` initially estimates the number of rows in your table by issuing an `explain select * from your_table`. This will use statistics on your table and return with a rough estimate. How rough? It might go as low as half or as high as double the actual number of rows in your table. This is the same method as used in [`pt-online-schema-change`](https://www.percona.com/doc/percona-toolkit/2.2/pt-online-schema-change.html).
`gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen:
@ -131,6 +145,10 @@ Add this flag when executing on a 1st generation Google Cloud Platform (GCP).
Default 100. See [`subsecond-lag`](subsecond-lag.md) for details.
### hooks-status-interval
Defaults to 60 seconds. Configures how often the `gh-ost-on-status` hook is called, see [`hooks`](hooks.md) for full details on how to use hooks.
### initially-drop-ghost-table
`gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand.
@ -177,6 +195,9 @@ Optionally involve the process ID, for example: `--replica-server-id=$((10000000
It's on you to choose a number that does not collide with another `gh-ost` or another running replica.
See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet.
### serve-socket-file
Defaults to an auto-determined and advertised upon startup file. Defines Unix socket file to serve on.
### skip-foreign-key-checks
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
@ -223,7 +244,15 @@ Provide a command delimited list of replicas; `gh-ost` will throttle when any of
### throttle-http
Provide a HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check.
Provide an HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check.
### throttle-http-interval-millis
Defaults to 100. Configures the HTTP throttle check interval in milliseconds.
### throttle-http-timeout-millis
Defaults to 1000 (1 second). Configures the HTTP throttler check timeout in milliseconds.
### timestamp-old-table

View File

@ -66,7 +66,9 @@ The following variables are available on all hooks:
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
- `GH_OST_HEARTBEAT_LAG` - lag in seconds (floating point) of heartbeat
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
- `GH_OST_ETA_SECONDS` - estimated duration until migration finishes in seconds
- `GH_OST_MIGRATED_HOST`
- `GH_OST_INSPECTED_HOST`
- `GH_OST_EXECUTING_HOST`

View File

@ -18,6 +18,8 @@ Both interfaces may serve at the same time. Both respond to simple text command,
- `status`: returns a detailed status summary of migration progress and configuration
- `sup`: returns a brief status summary of migration progress
- `coordinates`: returns recent (though not exactly up to date) binary log coordinates of the inspected server
- `applier`: returns the hostname of the applier
- `inspector`: returns the hostname of the inspector
- `chunk-size=<newsize>`: modify the `chunk-size`; applies on next running copy-iteration
- `dml-batch-size=<newsize>`: modify the `dml-batch-size`; applies on next applying of binary log events
- `max-lag-millis=<max-lag>`: modify the maximum replication lag threshold (milliseconds, minimum value is `100`, i.e. `0.1` second)

View File

@ -2,6 +2,8 @@
### Requirements
- `gh-ost` currently requires MySQL versions 5.7 and greater.
- You will need to have one server serving Row Based Replication (RBR) format binary logs. Right now `FULL` row image is supported. `MINIMAL` to be supported in the near future. `gh-ost` prefers to work with replicas. You may [still have your master configured with Statement Based Replication](migrating-with-sbr.md) (SBR).
- If you are using a replica, the table must have an identical schema between the master and replica.
@ -18,6 +20,8 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- Switching your `binlog_format` to `ROW`, in the case where it is _not_ `ROW` and you explicitly specified `--switch-to-rbr`
- If your replication is already in RBR (`binlog_format=ROW`) you can specify `--assume-rbr` to avoid the `STOP SLAVE/START SLAVE` operations, hence no need for `SUPER`.
- `gh-ost` uses the `REPEATABLE_READ` transaction isolation level for all MySQL connections, regardless of the server default.
- Running `--test-on-replica`: before the cut-over phase, `gh-ost` stops replication so that you can compare the two tables and satisfy that the migration is sound.
### Limitations
@ -41,6 +45,7 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- Amazon RDS works, but has its own [limitations](rds.md).
- Google Cloud SQL works, `--gcp` flag required.
- Aliyun RDS works, `--aliyun-rds` flag required.
- Azure Database for MySQL works, `--azure` flag required, and have detailed document about it. (azure.md)
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)

View File

@ -38,7 +38,7 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
`--max-load='Threads_running=100,Threads_connected=500'`
Metrics must be valid, numeric [status variables](http://dev.mysql.com/doc/refman/5.6/en/server-status-variables.html)
Metrics must be valid, numeric [status variables](https://dev.mysql.com/doc/refman/5.7/en/server-status-variables.html)
#### Throttle query
@ -97,7 +97,7 @@ Copy: 0/2915 0.0%; Applied: 0; Backlog: 0/100; Elapsed: 42s(copy), 42s(total); s
Throttling time is limited by the availability of the binary logs. When throttling begins, `gh-ost` suspends reading the binary logs, and expects to resume reading from same binary log where it paused.
Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
Having said that, throttling for so long is far fetching, in that the `gh-ost` process itself must be kept alive during that time; and the amount of binary logs to process once it resumes will potentially take days to replay.

View File

@ -112,7 +112,7 @@ It is also interesting to observe that `gh-ost` is the only application writing
When `gh-ost` pauses (throttles), it issues no writes on the ghost table. Because there are no triggers, write workload is decoupled from the `gh-ost` write workload. And because we're using an asynchronous approach, the algorithm already handles a time difference between a master write time and the ghost apply time. A difference of a few microseconds is no different from a difference of minutes or hours.
When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or and explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or an explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
An exception is the ongoing heartbeat writes onto the changelog table, which we consider to be negligible.
#### Testability

View File

@ -7,7 +7,7 @@ Existing MySQL schema migration tools:
- [LHM](https://github.com/soundcloud/lhm)
- [oak-online-alter-table](https://github.com/shlomi-noach/openarkkit)
are all using [triggers](http://dev.mysql.com/doc/refman/5.6/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
are all using [triggers](https://dev.mysql.com/doc/refman/5.7/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
Use of triggers simplifies a lot of the flow in doing a live table migration, but also poses some limitations or difficulties. Here are reasons why we choose to [design a triggerless solution](triggerless-design.md) to schema migrations.

27
go.mod Normal file
View File

@ -0,0 +1,27 @@
module github.com/github/gh-ost
go 1.17
require (
github.com/go-ini/ini v1.62.0
github.com/go-mysql-org/go-mysql v1.3.0
github.com/go-sql-driver/mysql v1.6.0
github.com/openark/golib v0.0.0-20210531070646-355f37940af8
github.com/satori/go.uuid v1.2.0
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
golang.org/x/text v0.3.6
)
require (
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 // indirect
github.com/smartystreets/goconvey v1.6.4 // indirect
go.uber.org/atomic v1.7.0 // indirect
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/ini.v1 v1.62.0 // indirect
)

136
go.sum Normal file
View File

@ -0,0 +1,136 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM=
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-ini/ini v1.62.0 h1:7VJT/ZXjzqSrvtraFp4ONq80hTcRQth1c9ZnQ3uNQvU=
github.com/go-ini/ini v1.62.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-mysql-org/go-mysql v1.3.0 h1:lpNqkwdPzIrYSZGdqt8HIgAXZaK6VxBNfr8f7Z4FgGg=
github.com/go-mysql-org/go-mysql v1.3.0/go.mod h1:3lFZKf7l95Qo70+3XB2WpiSf9wu2s3na3geLMaIIrqQ=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/openark/golib v0.0.0-20210531070646-355f37940af8 h1:9ciIHNuyFqRWi9NpMNw9sVLB6z1ItpP5ZhTY9Q1xVu4=
github.com/openark/golib v0.0.0-20210531070646-355f37940af8/go.mod h1:1jj8x1eDVZxgc/Z4VyamX4qTbAdHPUQA6NeVtCd8Sl8=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/errors v0.11.5-0.20201029093017-5a7df2af2ac7/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 h1:LllgC9eGfqzkfubMgjKIDyZYaa609nNWAyNZtpy2B3M=
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/parser v0.0.0-20210415081931-48e7f467fd74/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 h1:oI+RNwuC9jF2g2lP0u0cVEEZrc/AYBCuFdvwrLWM/6Q=
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07/go.mod h1:yFdBgwXP24JziuRl2NMUahT7nGLNOKi1SIiFxMttVD4=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -15,14 +15,13 @@ import (
"sync/atomic"
"time"
"github.com/satori/go.uuid"
uuid "github.com/satori/go.uuid"
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
"github.com/openark/golib/log"
"gopkg.in/gcfg.v1"
gcfgscanner "gopkg.in/gcfg.v1/scanner"
"github.com/go-ini/ini"
)
// RowsEstimateMethod is the type of row number estimation
@ -52,6 +51,7 @@ const (
const (
HTTPStatusOK = 200
MaxEventsBatchSize = 1000
ETAUnknown = math.MinInt64
)
var (
@ -82,6 +82,8 @@ type MigrationContext struct {
AlterStatement string
AlterStatementOptions string // anything following the 'ALTER TABLE [schema.]table' from AlterStatement
countMutex sync.Mutex
countTableRowsCancelFunc func()
CountTableRows bool
ConcurrentCountTableRows bool
AllowedRunningOnMaster bool
@ -90,6 +92,7 @@ type MigrationContext struct {
AssumeRBR bool
SkipForeignKeyChecks bool
SkipStrictMode bool
AllowZeroInDate bool
NullableUniqueKeyAllowed bool
ApproveRenamedColumns bool
SkipRenamedColumns bool
@ -97,6 +100,7 @@ type MigrationContext struct {
DiscardForeignKeys bool
AliyunRDS bool
GoogleCloudPlatform bool
AzureMySQL bool
config ContextConfig
configMutex *sync.Mutex
@ -139,6 +143,7 @@ type MigrationContext struct {
HooksHintMessage string
HooksHintOwner string
HooksHintToken string
HooksStatusIntervalSec int64
DropServeSocket bool
ServeSocketFile string
@ -177,9 +182,14 @@ type MigrationContext struct {
RenameTablesEndTime time.Time
pointOfInterestTime time.Time
pointOfInterestTimeMutex *sync.Mutex
lastHeartbeatOnChangelogTime time.Time
lastHeartbeatOnChangelogMutex *sync.Mutex
CurrentLag int64
currentProgress uint64
etaNanoseonds int64
ThrottleHTTPIntervalMillis int64
ThrottleHTTPStatusCode int64
ThrottleHTTPTimeoutMillis int64
controlReplicasLagResult mysql.ReplicationLagResult
TotalRowsCopied int64
TotalDMLEventsApplied int64
@ -203,6 +213,7 @@ type MigrationContext struct {
OriginalTableColumns *sql.ColumnList
OriginalTableVirtualColumns *sql.ColumnList
OriginalTableUniqueKeys [](*sql.UniqueKey)
OriginalTableAutoIncrement uint64
GhostTableColumns *sql.ColumnList
GhostTableVirtualColumns *sql.ColumnList
GhostTableUniqueKeys [](*sql.UniqueKey)
@ -263,6 +274,7 @@ func NewMigrationContext() *MigrationContext {
MaxLagMillisecondsThrottleThreshold: 1500,
CutOverLockTimeoutSeconds: 3,
DMLBatchSize: 10,
etaNanoseonds: ETAUnknown,
maxLoad: NewLoadMap(),
criticalLoad: NewLoadMap(),
throttleMutex: &sync.Mutex{},
@ -270,6 +282,7 @@ func NewMigrationContext() *MigrationContext {
throttleControlReplicaKeys: mysql.NewInstanceKeyMap(),
configMutex: &sync.Mutex{},
pointOfInterestTimeMutex: &sync.Mutex{},
lastHeartbeatOnChangelogMutex: &sync.Mutex{},
ColumnRenameMap: make(map[string]string),
PanicAbort: make(chan error),
Log: NewDefaultLogger(),
@ -418,6 +431,36 @@ func (this *MigrationContext) IsTransactionalTable() bool {
return false
}
// SetCountTableRowsCancelFunc sets the cancel function for the CountTableRows query context
func (this *MigrationContext) SetCountTableRowsCancelFunc(f func()) {
this.countMutex.Lock()
defer this.countMutex.Unlock()
this.countTableRowsCancelFunc = f
}
// IsCountingTableRows returns true if the migration has a table count query running
func (this *MigrationContext) IsCountingTableRows() bool {
this.countMutex.Lock()
defer this.countMutex.Unlock()
return this.countTableRowsCancelFunc != nil
}
// CancelTableRowsCount cancels the CountTableRows query context. It is safe to
// call function even when IsCountingTableRows is false.
func (this *MigrationContext) CancelTableRowsCount() {
this.countMutex.Lock()
defer this.countMutex.Unlock()
if this.countTableRowsCancelFunc == nil {
return
}
this.countTableRowsCancelFunc()
this.countTableRowsCancelFunc = nil
}
// ElapsedTime returns time since very beginning of the process
func (this *MigrationContext) ElapsedTime() time.Duration {
return time.Since(this.StartTime)
@ -453,6 +496,10 @@ func (this *MigrationContext) MarkRowCopyEndTime() {
this.RowCopyEndTime = time.Now()
}
func (this *MigrationContext) TimeSinceLastHeartbeatOnChangelog() time.Duration {
return time.Since(this.GetLastHeartbeatOnChangelogTime())
}
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
}
@ -465,6 +512,22 @@ func (this *MigrationContext) SetProgressPct(progressPct float64) {
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
}
func (this *MigrationContext) GetETADuration() time.Duration {
return time.Duration(atomic.LoadInt64(&this.etaNanoseonds))
}
func (this *MigrationContext) SetETADuration(etaDuration time.Duration) {
atomic.StoreInt64(&this.etaNanoseonds, etaDuration.Nanoseconds())
}
func (this *MigrationContext) GetETASeconds() int64 {
nano := atomic.LoadInt64(&this.etaNanoseonds)
if nano < 0 {
return ETAUnknown
}
return nano / int64(time.Second)
}
// math.Float64bits([f=0..100])
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
@ -492,6 +555,20 @@ func (this *MigrationContext) TimeSincePointOfInterest() time.Duration {
return time.Since(this.pointOfInterestTime)
}
func (this *MigrationContext) SetLastHeartbeatOnChangelogTime(t time.Time) {
this.lastHeartbeatOnChangelogMutex.Lock()
defer this.lastHeartbeatOnChangelogMutex.Unlock()
this.lastHeartbeatOnChangelogTime = t
}
func (this *MigrationContext) GetLastHeartbeatOnChangelogTime() time.Time {
this.lastHeartbeatOnChangelogMutex.Lock()
defer this.lastHeartbeatOnChangelogMutex.Unlock()
return this.lastHeartbeatOnChangelogTime
}
func (this *MigrationContext) SetHeartbeatIntervalMilliseconds(heartbeatIntervalMilliseconds int64) {
if heartbeatIntervalMilliseconds < 100 {
heartbeatIntervalMilliseconds = 100
@ -510,8 +587,8 @@ func (this *MigrationContext) SetMaxLagMillisecondsThrottleThreshold(maxLagMilli
}
func (this *MigrationContext) SetChunkSize(chunkSize int64) {
if chunkSize < 100 {
chunkSize = 100
if chunkSize < 10 {
chunkSize = 10
}
if chunkSize > 100000 {
chunkSize = 100000
@ -765,10 +842,39 @@ func (this *MigrationContext) ReadConfigFile() error {
if this.ConfigFile == "" {
return nil
}
gcfg.RelaxedParserMode = true
gcfgscanner.RelaxedScannerMode = true
if err := gcfg.ReadFileInto(&this.config, this.ConfigFile); err != nil {
return fmt.Errorf("Error reading config file %s. Details: %s", this.ConfigFile, err.Error())
cfg, err := ini.Load(this.ConfigFile)
if err != nil {
return err
}
if cfg.Section("client").HasKey("user") {
this.config.Client.User = cfg.Section("client").Key("user").String()
}
if cfg.Section("client").HasKey("password") {
this.config.Client.Password = cfg.Section("client").Key("password").String()
}
if cfg.Section("osc").HasKey("chunk_size") {
this.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64()
if err != nil {
return fmt.Errorf("Unable to read osc chunk size: %s", err.Error())
}
}
if cfg.Section("osc").HasKey("max_load") {
this.config.Osc.Max_Load = cfg.Section("osc").Key("max_load").String()
}
if cfg.Section("osc").HasKey("replication_lag_query") {
this.config.Osc.Replication_Lag_Query = cfg.Section("osc").Key("replication_lag_query").String()
}
if cfg.Section("osc").HasKey("max_lag_millis") {
this.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64()
if err != nil {
return fmt.Errorf("Unable to read max lag millis: %s", err.Error())
}
}
// We accept user & password in the form "${SOME_ENV_VARIABLE}" in which case we pull

View File

@ -1,16 +1,18 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package base
import (
"io/ioutil"
"os"
"testing"
"time"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {
@ -56,3 +58,65 @@ func TestGetTableNames(t *testing.T) {
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_tmp_ghc")
}
}
func TestReadConfigFile(t *testing.T) {
{
context := NewMigrationContext()
context.ConfigFile = "/does/not/exist"
if err := context.ReadConfigFile(); err == nil {
t.Fatal("Expected .ReadConfigFile() to return an error, got nil")
}
}
{
f, err := ioutil.TempFile("", t.Name())
if err != nil {
t.Fatalf("Failed to create tmp file: %v", err)
}
defer os.Remove(f.Name())
f.Write([]byte("[client]"))
context := NewMigrationContext()
context.ConfigFile = f.Name()
if err := context.ReadConfigFile(); err != nil {
t.Fatalf(".ReadConfigFile() failed: %v", err)
}
}
{
f, err := ioutil.TempFile("", t.Name())
if err != nil {
t.Fatalf("Failed to create tmp file: %v", err)
}
defer os.Remove(f.Name())
f.Write([]byte("[client]\nuser=test\npassword=123456"))
context := NewMigrationContext()
context.ConfigFile = f.Name()
if err := context.ReadConfigFile(); err != nil {
t.Fatalf(".ReadConfigFile() failed: %v", err)
}
if context.config.Client.User != "test" {
t.Fatalf("Expected client user %q, got %q", "test", context.config.Client.User)
} else if context.config.Client.Password != "123456" {
t.Fatalf("Expected client password %q, got %q", "123456", context.config.Client.Password)
}
}
{
f, err := ioutil.TempFile("", t.Name())
if err != nil {
t.Fatalf("Failed to create tmp file: %v", err)
}
defer os.Remove(f.Name())
f.Write([]byte("[osc]\nmax_load=10"))
context := NewMigrationContext()
context.ConfigFile = f.Name()
if err := context.ReadConfigFile(); err != nil {
t.Fatalf(".ReadConfigFile() failed: %v", err)
}
if context.config.Osc.Max_Load != "10" {
t.Fatalf("Expected osc 'max_load' %q, got %q", "10", context.config.Osc.Max_Load)
}
}
}

View File

@ -1,7 +1,12 @@
/*
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package base
import (
"github.com/outbrain/golib/log"
"github.com/openark/golib/log"
)
type simpleLogger struct{}
@ -12,22 +17,18 @@ func NewDefaultLogger() *simpleLogger {
func (*simpleLogger) Debug(args ...interface{}) {
log.Debug(args[0].(string), args[1:])
return
}
func (*simpleLogger) Debugf(format string, args ...interface{}) {
log.Debugf(format, args...)
return
}
func (*simpleLogger) Info(args ...interface{}) {
log.Info(args[0].(string), args[1:])
return
}
func (*simpleLogger) Infof(format string, args ...interface{}) {
log.Infof(format, args...)
return
}
func (*simpleLogger) Warning(args ...interface{}) error {
@ -64,10 +65,8 @@ func (*simpleLogger) Fatale(err error) error {
func (*simpleLogger) SetLevel(level log.LogLevel) {
log.SetLevel(level)
return
}
func (*simpleLogger) SetPrintStackTrace(printStackTraceFlag bool) {
log.SetPrintStackTrace(printStackTraceFlag)
return
}

View File

@ -8,8 +8,8 @@ package base
import (
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -13,6 +13,7 @@ import (
"time"
gosql "database/sql"
"github.com/github/gh-ost/go/mysql"
)
@ -24,9 +25,7 @@ func PrettifyDurationOutput(d time.Duration) string {
if d < time.Second {
return "0s"
}
result := fmt.Sprintf("%s", d)
result = prettifyDurationRegexp.ReplaceAllString(result, "")
return result
return prettifyDurationRegexp.ReplaceAllString(d.String(), "")
}
func FileExists(fileName string) bool {
@ -62,7 +61,7 @@ func StringContainsAll(s string, substrings ...string) bool {
return nonEmptyStringsFound
}
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext) (string, error) {
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext, name string) (string, error) {
versionQuery := `select @@global.version`
var port, extraPort int
var version string
@ -70,12 +69,13 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
return "", err
}
extraPortQuery := `select @@global.extra_port`
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil {
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil { // nolint:staticcheck
// swallow this error. not all servers support extra_port
}
// AliyunRDS set users port to "NULL", replace it by gh-ost param
// GCP set users port to "NULL", replace it by gh-ost param
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform {
// Azure MySQL set users port to a different value by design, replace it by gh-ost para
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform || migrationContext.AzureMySQL {
port = connectionConfig.Key.Port
} else {
portQuery := `select @@global.port`
@ -85,7 +85,7 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
}
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
migrationContext.Log.Infof("connection validated on %+v", connectionConfig.Key)
migrationContext.Log.Infof("%s connection validated on %+v", name, connectionConfig.Key)
return version, nil
} else if extraPort == 0 {
return "", fmt.Errorf("Unexpected database port reported: %+v", port)

View File

@ -8,8 +8,8 @@ package base
import (
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -13,8 +13,8 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
gomysql "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/replication"
gomysql "github.com/go-mysql-org/go-mysql/mysql"
"github.com/go-mysql-org/go-mysql/replication"
"golang.org/x/net/context"
)
@ -28,31 +28,24 @@ type GoMySQLReader struct {
LastAppliedRowsEventHint mysql.BinlogCoordinates
}
func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *GoMySQLReader, err error) {
binlogReader = &GoMySQLReader{
func NewGoMySQLReader(migrationContext *base.MigrationContext) *GoMySQLReader {
connectionConfig := migrationContext.InspectorConnectionConfig
return &GoMySQLReader{
migrationContext: migrationContext,
connectionConfig: migrationContext.InspectorConnectionConfig,
connectionConfig: connectionConfig,
currentCoordinates: mysql.BinlogCoordinates{},
currentCoordinatesMutex: &sync.Mutex{},
binlogSyncer: nil,
binlogStreamer: nil,
}
serverId := uint32(migrationContext.ReplicaServerId)
binlogSyncerConfig := replication.BinlogSyncerConfig{
ServerID: serverId,
Flavor: "mysql",
Host: binlogReader.connectionConfig.Key.Hostname,
Port: uint16(binlogReader.connectionConfig.Key.Port),
User: binlogReader.connectionConfig.User,
Password: binlogReader.connectionConfig.Password,
TLSConfig: binlogReader.connectionConfig.TLSConfig(),
binlogSyncer: replication.NewBinlogSyncer(replication.BinlogSyncerConfig{
ServerID: uint32(migrationContext.ReplicaServerId),
Flavor: gomysql.MySQLFlavor,
Host: connectionConfig.Key.Hostname,
Port: uint16(connectionConfig.Key.Port),
User: connectionConfig.User,
Password: connectionConfig.Password,
TLSConfig: connectionConfig.TLSConfig(),
UseDecimal: true,
}),
}
binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
return binlogReader, err
}
// ConnectBinlogStreamer
@ -64,7 +57,10 @@ func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordin
this.currentCoordinates = coordinates
this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
// Start sync with specified binlog file and position
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{
Name: this.currentCoordinates.LogFile,
Pos: uint32(this.currentCoordinates.LogPos),
})
return err
}
@ -142,15 +138,17 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
defer this.currentCoordinatesMutex.Unlock()
this.currentCoordinates.LogPos = int64(ev.Header.LogPos)
}()
if rotateEvent, ok := ev.Event.(*replication.RotateEvent); ok {
switch binlogEvent := ev.Event.(type) {
case *replication.RotateEvent:
func() {
this.currentCoordinatesMutex.Lock()
defer this.currentCoordinatesMutex.Unlock()
this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
this.currentCoordinates.LogFile = string(binlogEvent.NextLogName)
}()
this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), rotateEvent.NextLogName)
} else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), binlogEvent.NextLogName)
case *replication.RowsEvent:
if err := this.handleRowsEvent(ev, binlogEvent, entriesChannel); err != nil {
return err
}
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -8,6 +8,7 @@ package main
import (
"flag"
"fmt"
"net/url"
"os"
"os/signal"
"syscall"
@ -16,9 +17,9 @@ import (
"github.com/github/gh-ost/go/logic"
"github.com/github/gh-ost/go/sql"
_ "github.com/go-sql-driver/mysql"
"github.com/outbrain/golib/log"
"github.com/openark/golib/log"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/term"
)
var AppVersion string
@ -77,8 +78,10 @@ func main() {
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
flag.BoolVar(&migrationContext.AllowZeroInDate, "allow-zero-in-date", false, "explicitly tell gh-ost binlog applier to ignore NO_ZERO_IN_DATE,NO_ZERO_DATE in sql_mode")
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
flag.BoolVar(&migrationContext.AzureMySQL, "azure", false, "set to 'true' when you execute on Azure Database on MySQL.")
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
@ -97,7 +100,7 @@ func main() {
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').")
exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.")
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 10-100,000)")
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)")
@ -108,6 +111,8 @@ func main() {
throttleControlReplicas := flag.String("throttle-control-replicas", "", "List of replicas on which to check for lag; comma delimited. Example: myhost1.com:3306,myhost2.com,myhost3.com:3307")
throttleQuery := flag.String("throttle-query", "", "when given, issued (every second) to check if operation should throttle. Expecting to return zero for no-throttle, >0 for throttle. Query is issued on the migrated server. Make sure this query is lightweight")
throttleHTTP := flag.String("throttle-http", "", "when given, gh-ost checks given URL via HEAD request; any response code other than 200 (OK) causes throttling; make sure it has low latency response")
flag.Int64Var(&migrationContext.ThrottleHTTPIntervalMillis, "throttle-http-interval-millis", 100, "Number of milliseconds to wait before triggering another HTTP throttle check")
flag.Int64Var(&migrationContext.ThrottleHTTPTimeoutMillis, "throttle-http-timeout-millis", 1000, "Number of milliseconds to use as an HTTP throttle check timeout")
ignoreHTTPErrors := flag.Bool("ignore-http-errors", false, "ignore HTTP connection errors during throttle check")
heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 100, "how frequently would gh-ost inject a heartbeat value")
flag.StringVar(&migrationContext.ThrottleFlagFile, "throttle-flag-file", "", "operation pauses when this file exists; hint: use a file that is specific to the table being altered")
@ -123,13 +128,14 @@ func main() {
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience")
flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience")
flag.Int64Var(&migrationContext.HooksStatusIntervalSec, "hooks-status-interval", 60, "how many seconds to wait between calling onStatus hook")
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
maxLoad := flag.String("max-load", "", "Comma delimited status-name=threshold. e.g: 'Threads_running=100,Threads_connected=500'. When status exceeds threshold, app throttles writes")
criticalLoad := flag.String("critical-load", "", "Comma delimited status-name=threshold, same format as --max-load. When status exceeds threshold, app panics and quits")
flag.Int64Var(&migrationContext.CriticalLoadIntervalMilliseconds, "critical-load-interval-millis", 0, "When 0, migration immediately bails out upon meeting critical-load. When non-zero, a second check is done after given interval, and migration only bails out if 2nd check still meets critical load")
flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When nonzero, critical-load does not panic and bail out; instead, gh-ost goes into hibernate for the specified duration. It will not read/write anything to from/to any server")
flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When non-zero, critical-load does not panic and bail out; instead, gh-ost goes into hibernation for the specified duration. It will not read/write anything from/to any server")
quiet := flag.Bool("quiet", false, "quiet")
verbose := flag.Bool("verbose", false, "verbose")
debug := flag.Bool("debug", false, "debug mode (very verbose)")
@ -175,7 +181,7 @@ func main() {
}
if migrationContext.AlterStatement == "" {
log.Fatalf("--alter must be provided and statement must not be empty")
log.Fatal("--alter must be provided and statement must not be empty")
}
parser := sql.NewParserFromAlterStatement(migrationContext.AlterStatement)
migrationContext.AlterStatementOptions = parser.GetAlterStatementOptions()
@ -184,55 +190,60 @@ func main() {
if parser.HasExplicitSchema() {
migrationContext.DatabaseName = parser.GetExplicitSchema()
} else {
log.Fatalf("--database must be provided and database name must not be empty, or --alter must specify database name")
log.Fatal("--database must be provided and database name must not be empty, or --alter must specify database name")
}
}
if err := flag.Set("database", url.QueryEscape(migrationContext.DatabaseName)); err != nil {
migrationContext.Log.Fatale(err)
}
if migrationContext.OriginalTableName == "" {
if parser.HasExplicitTable() {
migrationContext.OriginalTableName = parser.GetExplicitTable()
} else {
log.Fatalf("--table must be provided and table name must not be empty, or --alter must specify table name")
log.Fatal("--table must be provided and table name must not be empty, or --alter must specify table name")
}
}
migrationContext.Noop = !(*executeFlag)
if migrationContext.AllowedRunningOnMaster && migrationContext.TestOnReplica {
migrationContext.Log.Fatalf("--allow-on-master and --test-on-replica are mutually exclusive")
migrationContext.Log.Fatal("--allow-on-master and --test-on-replica are mutually exclusive")
}
if migrationContext.AllowedRunningOnMaster && migrationContext.MigrateOnReplica {
migrationContext.Log.Fatalf("--allow-on-master and --migrate-on-replica are mutually exclusive")
migrationContext.Log.Fatal("--allow-on-master and --migrate-on-replica are mutually exclusive")
}
if migrationContext.MigrateOnReplica && migrationContext.TestOnReplica {
migrationContext.Log.Fatalf("--migrate-on-replica and --test-on-replica are mutually exclusive")
migrationContext.Log.Fatal("--migrate-on-replica and --test-on-replica are mutually exclusive")
}
if migrationContext.SwitchToRowBinlogFormat && migrationContext.AssumeRBR {
migrationContext.Log.Fatalf("--switch-to-rbr and --assume-rbr are mutually exclusive")
migrationContext.Log.Fatal("--switch-to-rbr and --assume-rbr are mutually exclusive")
}
if migrationContext.TestOnReplicaSkipReplicaStop {
if !migrationContext.TestOnReplica {
migrationContext.Log.Fatalf("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
migrationContext.Log.Fatal("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
}
migrationContext.Log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
}
if migrationContext.CliMasterUser != "" && migrationContext.AssumeMasterHostname == "" {
migrationContext.Log.Fatalf("--master-user requires --assume-master-host")
migrationContext.Log.Fatal("--master-user requires --assume-master-host")
}
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
migrationContext.Log.Fatalf("--master-password requires --assume-master-host")
migrationContext.Log.Fatal("--master-password requires --assume-master-host")
}
if migrationContext.TLSCACertificate != "" && !migrationContext.UseTLS {
migrationContext.Log.Fatalf("--ssl-ca requires --ssl")
migrationContext.Log.Fatal("--ssl-ca requires --ssl")
}
if migrationContext.TLSCertificate != "" && !migrationContext.UseTLS {
migrationContext.Log.Fatalf("--ssl-cert requires --ssl")
migrationContext.Log.Fatal("--ssl-cert requires --ssl")
}
if migrationContext.TLSKey != "" && !migrationContext.UseTLS {
migrationContext.Log.Fatalf("--ssl-key requires --ssl")
migrationContext.Log.Fatal("--ssl-key requires --ssl")
}
if migrationContext.TLSAllowInsecure && !migrationContext.UseTLS {
migrationContext.Log.Fatalf("--ssl-allow-insecure requires --ssl")
migrationContext.Log.Fatal("--ssl-allow-insecure requires --ssl")
}
if *replicationLagQuery != "" {
migrationContext.Log.Warningf("--replication-lag-query is deprecated")
migrationContext.Log.Warning("--replication-lag-query is deprecated")
}
switch *cutOver {
@ -260,7 +271,7 @@ func main() {
}
if *askPass {
fmt.Println("Password:")
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
bytePassword, err := term.ReadPassword(syscall.Stdin)
if err != nil {
migrationContext.Log.Fatale(err)
}
@ -289,11 +300,10 @@ func main() {
log.Infof("starting gh-ost %+v", AppVersion)
acceptSignals(migrationContext)
migrator := logic.NewMigrator(migrationContext)
err := migrator.Migrate()
if err != nil {
migrator := logic.NewMigrator(migrationContext, AppVersion)
if err := migrator.Migrate(); err != nil {
migrator.ExecOnFailureHook()
migrationContext.Log.Fatale(err)
}
fmt.Fprintf(os.Stdout, "# Done\n")
fmt.Fprintln(os.Stdout, "# Done")
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -8,6 +8,7 @@ package logic
import (
gosql "database/sql"
"fmt"
"sync"
"sync/atomic"
"time"
@ -16,11 +17,12 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/sqlutils"
"sync"
"github.com/openark/golib/log"
"github.com/openark/golib/sqlutils"
)
const (
GhostChangelogTableComment = "gh-ost changelog"
atomicCutOverMagicHint = "ghost-cut-over-sentry"
)
@ -57,6 +59,7 @@ type Applier struct {
singletonDB *gosql.DB
migrationContext *base.MigrationContext
finishedMigrating int64
name string
}
func NewApplier(migrationContext *base.MigrationContext) *Applier {
@ -64,11 +67,11 @@ func NewApplier(migrationContext *base.MigrationContext) *Applier {
connectionConfig: migrationContext.ApplierConnectionConfig,
migrationContext: migrationContext,
finishedMigrating: 0,
name: "applier",
}
}
func (this *Applier) InitDBConnections() (err error) {
applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
return err
@ -78,18 +81,18 @@ func (this *Applier) InitDBConnections() (err error) {
return err
}
this.singletonDB.SetMaxOpenConns(1)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
if err != nil {
return err
}
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext); err != nil {
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext, this.name); err != nil {
return err
}
this.migrationContext.ApplierMySQLVersion = version
if err := this.validateAndReadTimeZone(); err != nil {
return err
}
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
@ -114,6 +117,24 @@ func (this *Applier) validateAndReadTimeZone() error {
return nil
}
// generateSqlModeQuery return a `sql_mode = ...` query, to be wrapped with a `set session` or `set global`,
// based on gh-ost configuration:
// - User may skip strict mode
// - User may allow zero dats or zero in dates
func (this *Applier) generateSqlModeQuery() string {
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
if !this.migrationContext.SkipStrictMode {
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
}
sqlModeQuery := fmt.Sprintf("CONCAT(@@session.sql_mode, ',%s')", sqlModeAddendum)
if this.migrationContext.AllowZeroInDate {
sqlModeQuery = fmt.Sprintf("REPLACE(REPLACE(%s, 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')", sqlModeQuery)
}
sqlModeQuery = fmt.Sprintf("sql_mode = %s", sqlModeQuery)
return sqlModeQuery
}
// readTableColumns reads table columns on applier
func (this *Applier) readTableColumns() (err error) {
this.migrationContext.Log.Infof("Examining table structure on applier")
@ -179,11 +200,33 @@ func (this *Applier) CreateGhostTable() error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
err := func() error {
tx, err := this.db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return err
}
if _, err := tx.Exec(query); err != nil {
return err
}
this.migrationContext.Log.Infof("Ghost table created")
if err := tx.Commit(); err != nil {
// Neither SET SESSION nor ALTER are really transactional, so strictly speaking
// there's no need to commit; but let's do this the legit way anyway.
return err
}
return nil
}()
return err
}
// AlterGhost applies `alter` statement on ghost table
@ -198,10 +241,51 @@ func (this *Applier) AlterGhost() error {
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
this.migrationContext.Log.Debugf("ALTER statement: %s", query)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
err := func() error {
tx, err := this.db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return err
}
if _, err := tx.Exec(query); err != nil {
return err
}
this.migrationContext.Log.Infof("Ghost table altered")
if err := tx.Commit(); err != nil {
// Neither SET SESSION nor ALTER are really transactional, so strictly speaking
// there's no need to commit; but let's do this the legit way anyway.
return err
}
return nil
}()
return err
}
// AlterGhost applies `alter` statement on ghost table
func (this *Applier) AlterGhostAutoIncrement() error {
query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s AUTO_INCREMENT=%d`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
this.migrationContext.OriginalTableAutoIncrement,
)
this.migrationContext.Log.Infof("Altering ghost table AUTO_INCREMENT value %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
this.migrationContext.Log.Debugf("AUTO_INCREMENT ALTER statement: %s", query)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
this.migrationContext.Log.Infof("Ghost table AUTO_INCREMENT altered")
return nil
}
@ -211,16 +295,16 @@ func (this *Applier) CreateChangelogTable() error {
return err
}
query := fmt.Sprintf(`create /* gh-ost */ table %s.%s (
id bigint auto_increment,
id bigint unsigned auto_increment,
last_update timestamp not null DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
hint varchar(64) charset ascii not null,
value varchar(4096) charset ascii not null,
primary key(id),
unique key hint_uidx(hint)
) auto_increment=256
`,
) auto_increment=256 comment='%s'`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
GhostChangelogTableComment,
)
this.migrationContext.Log.Infof("Creating changelog table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
@ -322,8 +406,9 @@ func (this *Applier) InitiateHeartbeat() {
}
injectHeartbeat()
heartbeatTick := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
for range heartbeatTick {
ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -360,10 +445,13 @@ func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
if err != nil {
return err
}
rows, err := this.db.Query(query)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
this.migrationContext.MigrationRangeMinValues = sql.NewColumnValues(uniqueKey.Len())
if err = rows.Scan(this.migrationContext.MigrationRangeMinValues.ValuesPointers...); err != nil {
@ -372,8 +460,7 @@ func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
}
this.migrationContext.Log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues)
err = rows.Err()
return err
return rows.Err()
}
// ReadMigrationMaxValues returns the maximum values to be iterated on rowcopy
@ -383,10 +470,13 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
if err != nil {
return err
}
rows, err := this.db.Query(query)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
this.migrationContext.MigrationRangeMaxValues = sql.NewColumnValues(uniqueKey.Len())
if err = rows.Scan(this.migrationContext.MigrationRangeMaxValues.ValuesPointers...); err != nil {
@ -395,12 +485,31 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
}
this.migrationContext.Log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues)
err = rows.Err()
return rows.Err()
}
// ReadMigrationRangeValues reads min/max values that will be used for rowcopy.
// Before read min/max, write a changelog state into the ghc table to avoid lost data in mysql two-phase commit.
/*
Detail description of the lost data in mysql two-phase commit issue by @Fanduzi:
When using semi-sync and setting rpl_semi_sync_master_wait_point=AFTER_SYNC,
if an INSERT statement is being committed but blocks due to an unmet ack count,
the data inserted by the transaction is not visible to ReadMigrationRangeValues,
so the copy of the existing data in the table does not include the new row inserted by the transaction.
However, the binlog event for the transaction is already written to the binlog,
so the addDMLEventsListener only captures the binlog event after the transaction,
and thus the transaction's binlog event is not captured, resulting in data loss.
If write a changelog into ghc table before ReadMigrationRangeValues, and the transaction commit blocks
because the ack is not met, then the changelog will not be able to write, so the ReadMigrationRangeValues
will not be run. When the changelog writes successfully, the ReadMigrationRangeValues will read the
newly inserted data, thus Avoiding data loss due to the above problem.
*/
func (this *Applier) ReadMigrationRangeValues() error {
if _, err := this.WriteChangelogState(string(ReadMigrationRangeValues)); err != nil {
return err
}
// ReadMigrationRangeValues reads min/max values that will be used for rowcopy
func (this *Applier) ReadMigrationRangeValues() error {
if err := this.ReadMigrationMinValues(this.migrationContext.UniqueKey); err != nil {
return err
}
@ -437,10 +546,13 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo
if err != nil {
return hasFurtherRange, err
}
rows, err := this.db.Query(query, explodedArgs...)
if err != nil {
return hasFurtherRange, err
}
defer rows.Close()
iterationRangeMaxValues := sql.NewColumnValues(this.migrationContext.UniqueKey.Len())
for rows.Next() {
if err = rows.Scan(iterationRangeMaxValues.ValuesPointers...); err != nil {
@ -489,12 +601,9 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
return nil, err
}
defer tx.Rollback()
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
if !this.migrationContext.SkipStrictMode {
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
}
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return nil, err
@ -992,7 +1101,6 @@ func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (result
// ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table
func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error {
var totalDelta int64
err := func() error {
@ -1007,12 +1115,7 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
}
sessionQuery := "SET SESSION time_zone = '+00:00'"
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
if !this.migrationContext.SkipStrictMode {
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
}
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return rollback(err)
@ -1022,11 +1125,20 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
if buildResult.err != nil {
return rollback(buildResult.err)
}
if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil {
result, err := tx.Exec(buildResult.query, buildResult.args...)
if err != nil {
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args)
return rollback(err)
}
totalDelta += buildResult.rowsDelta
rowsAffected, err := result.RowsAffected()
if err != nil {
log.Warningf("error getting rows affected from DML event query: %s. i'm going to assume that the DML affected a single row, but this may result in inaccurate statistics", err)
rowsAffected = 1
}
// each DML is either a single insert (delta +1), update (delta +0) or delete (delta -1).
// multiplying by the rows actually affected (either 0 or 1) will give an accurate row delta for this DML event
totalDelta += buildResult.rowsDelta * rowsAffected
}
}
if err := tx.Commit(); err != nil {

View File

@ -1,6 +1,5 @@
/*
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -14,7 +13,7 @@ import (
"sync/atomic"
"github.com/github/gh-ost/go/base"
"github.com/outbrain/golib/log"
"github.com/openark/golib/log"
)
const (
@ -64,15 +63,15 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
env = append(env, fmt.Sprintf("GH_OST_HEARTBEAT_LAG=%f", this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds()))
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
env = append(env, fmt.Sprintf("GH_OST_ETA_SECONDS=%d", this.migrationContext.GetETASeconds()))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
for _, variable := range extraVariables {
env = append(env, variable)
}
env = append(env, extraVariables...)
return env
}

View File

@ -1,11 +1,12 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
"context"
gosql "database/sql"
"fmt"
"reflect"
@ -17,7 +18,7 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/sqlutils"
"github.com/openark/golib/sqlutils"
)
const startSlavePostWaitMilliseconds = 500 * time.Millisecond
@ -29,12 +30,14 @@ type Inspector struct {
db *gosql.DB
informationSchemaDb *gosql.DB
migrationContext *base.MigrationContext
name string
}
func NewInspector(migrationContext *base.MigrationContext) *Inspector {
return &Inspector{
connectionConfig: migrationContext.InspectorConnectionConfig,
migrationContext: migrationContext,
name: "inspector",
}
}
@ -52,7 +55,7 @@ func (this *Inspector) InitDBConnections() (err error) {
if err := this.validateConnection(); err != nil {
return err
}
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
@ -109,6 +112,10 @@ func (this *Inspector) InspectOriginalTable() (err error) {
if err != nil {
return err
}
this.migrationContext.OriginalTableAutoIncrement, err = this.getAutoIncrementValue(this.migrationContext.OriginalTableName)
if err != nil {
return err
}
return nil
}
@ -181,9 +188,17 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
if column.Name == mappedColumn.Name && column.Type == sql.DateTimeColumnType && mappedColumn.Type == sql.TimestampColumnType {
this.migrationContext.MappedSharedColumns.SetConvertDatetimeToTimestamp(column.Name, this.migrationContext.ApplierTimeZone)
}
if column.Name == mappedColumn.Name && column.Type == sql.EnumColumnType && mappedColumn.Charset != "" {
this.migrationContext.MappedSharedColumns.SetEnumToTextConversion(column.Name)
this.migrationContext.MappedSharedColumns.SetEnumValues(column.Name, column.EnumValues)
}
}
for _, column := range this.migrationContext.UniqueKey.Columns.Columns() {
if this.migrationContext.GhostTableVirtualColumns.GetColumn(column.Name) != nil {
// this is a virtual column
continue
}
if this.migrationContext.MappedSharedColumns.HasTimezoneConversion(column.Name) {
return fmt.Errorf("No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key. Column: %s, key: %s", column.Name, this.migrationContext.UniqueKey.Name)
}
@ -198,7 +213,7 @@ func (this *Inspector) validateConnection() error {
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
}
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
this.migrationContext.InspectorMySQLVersion = version
return err
}
@ -269,7 +284,7 @@ func (this *Inspector) validateGrants() error {
// It is entirely possible, for example, that the replication is using 'STATEMENT'
// binlog format even as the variable says 'ROW'
func (this *Inspector) restartReplication() error {
this.migrationContext.Log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Infof("Restarting replication on %s to make sure binlog settings apply to replication thread", this.connectionConfig.Key.String())
masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.connectionConfig)
if masterKey == nil {
@ -328,13 +343,13 @@ func (this *Inspector) validateBinlogs() error {
return err
}
if !hasBinaryLogs {
return fmt.Errorf("%s:%d must have binary logs enabled", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return fmt.Errorf("%s must have binary logs enabled", this.connectionConfig.Key.String())
}
if this.migrationContext.RequiresBinlogFormatChange() {
if !this.migrationContext.SwitchToRowBinlogFormat {
return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s:%d doesn't have replicas", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", this.connectionConfig.Key.String())
}
query := fmt.Sprintf(`show /* gh-ost */ slave hosts`)
query := `show /* gh-ost */ slave hosts`
countReplicas := 0
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
countReplicas++
@ -344,21 +359,20 @@ func (this *Inspector) validateBinlogs() error {
return err
}
if countReplicas > 0 {
return fmt.Errorf("%s:%d has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
return fmt.Errorf("%s has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
}
this.migrationContext.Log.Infof("%s:%d has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
this.migrationContext.Log.Infof("%s has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
}
query = `select @@global.binlog_row_image`
if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil {
// Only as of 5.6. We wish to support 5.5 as well
this.migrationContext.OriginalBinlogRowImage = "FULL"
return err
}
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
return fmt.Errorf("%s:%d has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogRowImage)
return fmt.Errorf("%s has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogRowImage)
}
this.migrationContext.Log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Infof("binary logs validated on %s", this.connectionConfig.Key.String())
return nil
}
@ -371,25 +385,25 @@ func (this *Inspector) validateLogSlaveUpdates() error {
}
if logSlaveUpdates {
this.migrationContext.Log.Infof("log_slave_updates validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Infof("log_slave_updates validated on %s", this.connectionConfig.Key.String())
return nil
}
if this.migrationContext.IsTungsten {
this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.String())
return nil
}
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
return fmt.Errorf("%s:%d must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return fmt.Errorf("%s must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.String())
}
if this.migrationContext.InspectorIsAlsoApplier() {
this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.String())
return nil
}
return fmt.Errorf("%s:%d must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return fmt.Errorf("%s must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.String())
}
// validateTable makes sure the table we need to operate on actually exists
@ -520,17 +534,39 @@ func (this *Inspector) estimateTableRowsViaExplain() error {
}
// CountTableRows counts exact number of rows on the original table
func (this *Inspector) CountTableRows() error {
func (this *Inspector) CountTableRows(ctx context.Context) error {
atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1)
defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0)
this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
query := fmt.Sprintf(`select /* gh-ost */ count(*) as rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
var rowsEstimate int64
if err := this.db.QueryRow(query).Scan(&rowsEstimate); err != nil {
conn, err := this.db.Conn(ctx)
if err != nil {
return err
}
defer conn.Close()
var connectionID string
if err := conn.QueryRowContext(ctx, `SELECT /* gh-ost */ CONNECTION_ID()`).Scan(&connectionID); err != nil {
return err
}
query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
var rowsEstimate int64
if err := conn.QueryRowContext(ctx, query).Scan(&rowsEstimate); err != nil {
switch err {
case context.Canceled, context.DeadlineExceeded:
this.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err())
return mysql.Kill(this.db, connectionID)
default:
return err
}
}
// row count query finished. nil out the cancel func, so the main migration thread
// doesn't bother calling it after row copy is done.
this.migrationContext.SetCountTableRowsCancelFunc(nil)
atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate)
this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate
@ -553,6 +589,7 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
columnName := m.GetString("COLUMN_NAME")
columnType := m.GetString("COLUMN_TYPE")
columnOctetLength := m.GetUint("CHARACTER_OCTET_LENGTH")
for _, columnsList := range columnsLists {
column := columnsList.GetColumn(columnName)
if column == nil {
@ -579,6 +616,11 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
}
if strings.HasPrefix(columnType, "enum") {
column.Type = sql.EnumColumnType
column.EnumValues = sql.ParseEnumValues(m.GetString("COLUMN_TYPE"))
}
if strings.HasPrefix(columnType, "binary") {
column.Type = sql.BinaryColumnType
column.BinaryOctetLength = columnOctetLength
}
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
column.Charset = charset
@ -589,6 +631,24 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
return err
}
// getAutoIncrementValue get's the original table's AUTO_INCREMENT value, if exists (0 value if not exists)
func (this *Inspector) getAutoIncrementValue(tableName string) (autoIncrement uint64, err error) {
query := `
SELECT
AUTO_INCREMENT
FROM INFORMATION_SCHEMA.TABLES
WHERE
TABLES.TABLE_SCHEMA = ?
AND TABLES.TABLE_NAME = ?
AND AUTO_INCREMENT IS NOT NULL
`
err = sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
autoIncrement = m.GetUint64("AUTO_INCREMENT")
return nil
}, this.migrationContext.DatabaseName, tableName)
return autoIncrement, err
}
// getCandidateUniqueKeys investigates a table and returns the list of unique keys
// candidate for chunking
func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) {
@ -767,5 +827,4 @@ func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err er
func (this *Inspector) Teardown() {
this.db.Close()
this.informationSchemaDb.Close()
return
}

View File

@ -1,11 +1,12 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
"context"
"fmt"
"io"
"math"
@ -24,8 +25,10 @@ import (
type ChangelogState string
const (
AllEventsUpToLockProcessed ChangelogState = "AllEventsUpToLockProcessed"
GhostTableMigrated ChangelogState = "GhostTableMigrated"
AllEventsUpToLockProcessed = "AllEventsUpToLockProcessed"
Migrated ChangelogState = "Migrated"
ReadMigrationRangeValues ChangelogState = "ReadMigrationRangeValues"
)
func ReadChangelogState(s string) ChangelogState {
@ -61,6 +64,7 @@ const (
// Migrator is the main schema migration flow manager.
type Migrator struct {
appVersion string
parser *sql.AlterTableParser
inspector *Inspector
applier *Applier
@ -86,8 +90,9 @@ type Migrator struct {
finishedMigrating int64
}
func NewMigrator(context *base.MigrationContext) *Migrator {
func NewMigrator(context *base.MigrationContext, appVersion string) *Migrator {
migrator := &Migrator{
appVersion: appVersion,
migrationContext: context,
parser: sql.NewAlterTableParser(),
ghostTableMigrated: make(chan bool),
@ -176,16 +181,6 @@ func (this *Migrator) retryOperationWithExponentialBackoff(operation func() erro
return err
}
// executeAndThrottleOnError executes a given function. If it errors, it
// throttles.
func (this *Migrator) executeAndThrottleOnError(operation func() error) (err error) {
if err := operation(); err != nil {
this.throttler.throttle(nil)
return err
}
return nil
}
// consumeRowCopyComplete blocks on the rowCopyComplete channel once, and then
// consumes and drops any further incoming events that may be left hanging.
func (this *Migrator) consumeRowCopyComplete() {
@ -207,16 +202,26 @@ func (this *Migrator) canStopStreaming() bool {
return atomic.LoadInt64(&this.migrationContext.CutOverCompleteFlag) != 0
}
// onChangelogStateEvent is called when a binlog event operation on the changelog table is intercepted.
func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
// onChangelogEvent is called when a binlog event operation on the changelog table is intercepted.
func (this *Migrator) onChangelogEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
// Hey, I created the changelog table, I know the type of columns it has!
if hint := dmlEvent.NewColumnValues.StringColumn(2); hint != "state" {
switch hint := dmlEvent.NewColumnValues.StringColumn(2); hint {
case "state":
return this.onChangelogStateEvent(dmlEvent)
case "heartbeat":
return this.onChangelogHeartbeatEvent(dmlEvent)
default:
return nil
}
}
func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
changelogStateString := dmlEvent.NewColumnValues.StringColumn(3)
changelogState := ReadChangelogState(changelogStateString)
this.migrationContext.Log.Infof("Intercepted changelog state %s", changelogState)
switch changelogState {
case Migrated, ReadMigrationRangeValues:
// no-op event
case GhostTableMigrated:
{
this.ghostTableMigrated <- true
@ -245,6 +250,18 @@ func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (er
return nil
}
func (this *Migrator) onChangelogHeartbeatEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
changelogHeartbeatString := dmlEvent.NewColumnValues.StringColumn(3)
heartbeatTime, err := time.Parse(time.RFC3339Nano, changelogHeartbeatString)
if err != nil {
return this.migrationContext.Log.Errore(err)
} else {
this.migrationContext.SetLastHeartbeatOnChangelogTime(heartbeatTime)
return nil
}
}
// listenOnPanicAbort aborts on abort request
func (this *Migrator) listenOnPanicAbort() {
err := <-this.migrationContext.PanicAbort
@ -280,8 +297,8 @@ func (this *Migrator) countTableRows() (err error) {
return nil
}
countRowsFunc := func() error {
if err := this.inspector.CountTableRows(); err != nil {
countRowsFunc := func(ctx context.Context) error {
if err := this.inspector.CountTableRows(ctx); err != nil {
return err
}
if err := this.hooksExecutor.onRowCountComplete(); err != nil {
@ -291,12 +308,17 @@ func (this *Migrator) countTableRows() (err error) {
}
if this.migrationContext.ConcurrentCountTableRows {
// store a cancel func so we can stop this query before a cut over
rowCountContext, rowCountCancel := context.WithCancel(context.Background())
this.migrationContext.SetCountTableRowsCancelFunc(rowCountCancel)
this.migrationContext.Log.Infof("As instructed, counting rows in the background; meanwhile I will use an estimated count, and will update it later on")
go countRowsFunc()
go countRowsFunc(rowCountContext)
// and we ignore errors, because this turns to be a background job
return nil
}
return countRowsFunc()
return countRowsFunc(context.Background())
}
func (this *Migrator) createFlagFiles() (err error) {
@ -400,6 +422,10 @@ func (this *Migrator) Migrate() (err error) {
}
this.printStatus(ForcePrintStatusRule)
if this.migrationContext.IsCountingTableRows() {
this.migrationContext.Log.Info("stopping query for exact row count, because that can accidentally lock out the cut over")
this.migrationContext.CancelTableRowsCount()
}
if err := this.hooksExecutor.onBeforeCutOver(); err != nil {
return err
}
@ -476,6 +502,13 @@ func (this *Migrator) cutOver() (err error) {
this.migrationContext.Log.Debugf("checking for cut-over postpone")
this.sleepWhileTrue(
func() (bool, error) {
heartbeatLag := this.migrationContext.TimeSinceLastHeartbeatOnChangelog()
maxLagMillisecondsThrottle := time.Duration(atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)) * time.Millisecond
cutOverLockTimeout := time.Duration(this.migrationContext.CutOverLockTimeoutSeconds) * time.Second
if heartbeatLag > maxLagMillisecondsThrottle || heartbeatLag > cutOverLockTimeout {
this.migrationContext.Log.Debugf("current HeartbeatLag (%.2fs) is too high, it needs to be less than both --max-lag-millis (%.2fs) and --cut-over-lock-timeout-seconds (%.2fs) to continue", heartbeatLag.Seconds(), maxLagMillisecondsThrottle.Seconds(), cutOverLockTimeout.Seconds())
return true, nil
}
if this.migrationContext.PostponeCutOverFlagFile == "" {
return false, nil
}
@ -517,20 +550,20 @@ func (this *Migrator) cutOver() (err error) {
}
}
}
if this.migrationContext.CutOverType == base.CutOverAtomic {
switch this.migrationContext.CutOverType {
case base.CutOverAtomic:
// Atomic solution: we use low timeout and multiple attempts. But for
// each failed attempt, we throttle until replication lag is back to normal
err := this.atomicCutOver()
this.handleCutOverResult(err)
return err
}
if this.migrationContext.CutOverType == base.CutOverTwoStep {
err := this.cutOverTwoStep()
this.handleCutOverResult(err)
return err
}
err = this.atomicCutOver()
case base.CutOverTwoStep:
err = this.cutOverTwoStep()
default:
return this.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType)
}
this.handleCutOverResult(err)
return err
}
// Inject the "AllEventsUpToLockProcessed" state hint, wait for it to appear in the binary logs,
// make sure the queue is drained.
@ -777,17 +810,16 @@ func (this *Migrator) initiateInspector() (err error) {
}
// initiateStatus sets and activates the printStatus() ticker
func (this *Migrator) initiateStatus() error {
func (this *Migrator) initiateStatus() {
this.printStatus(ForcePrintStatusAndHintRule)
statusTick := time.Tick(1 * time.Second)
for range statusTick {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return nil
return
}
go this.printStatus(HeuristicPrintStatusRule)
}
return nil
}
// printMigrationStatusHint prints a detailed configuration dump, that is useful
@ -796,57 +828,57 @@ func (this *Migrator) initiateStatus() error {
// migration, and as response to the "status" interactive command.
func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
w := io.MultiWriter(writers...)
fmt.Fprintln(w, fmt.Sprintf("# Migrating %s.%s; Ghost table is %s.%s",
fmt.Fprintf(w, "# Migrating %s.%s; Ghost table is %s.%s\n",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
))
fmt.Fprintln(w, fmt.Sprintf("# Migrating %+v; inspecting %+v; executing on %+v",
)
fmt.Fprintf(w, "# Migrating %+v; inspecting %+v; executing on %+v\n",
*this.applier.connectionConfig.ImpliedKey,
*this.inspector.connectionConfig.ImpliedKey,
this.migrationContext.Hostname,
))
fmt.Fprintln(w, fmt.Sprintf("# Migration started at %+v",
)
fmt.Fprintf(w, "# Migration started at %+v\n",
this.migrationContext.StartTime.Format(time.RubyDate),
))
)
maxLoad := this.migrationContext.GetMaxLoad()
criticalLoad := this.migrationContext.GetCriticalLoad()
fmt.Fprintln(w, fmt.Sprintf("# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f",
fmt.Fprintf(w, "# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f\n",
atomic.LoadInt64(&this.migrationContext.ChunkSize),
atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold),
atomic.LoadInt64(&this.migrationContext.DMLBatchSize),
maxLoad.String(),
criticalLoad.String(),
this.migrationContext.GetNiceRatio(),
))
)
if this.migrationContext.ThrottleFlagFile != "" {
setIndicator := ""
if base.FileExists(this.migrationContext.ThrottleFlagFile) {
setIndicator = "[set]"
}
fmt.Fprintln(w, fmt.Sprintf("# throttle-flag-file: %+v %+v",
fmt.Fprintf(w, "# throttle-flag-file: %+v %+v\n",
this.migrationContext.ThrottleFlagFile, setIndicator,
))
)
}
if this.migrationContext.ThrottleAdditionalFlagFile != "" {
setIndicator := ""
if base.FileExists(this.migrationContext.ThrottleAdditionalFlagFile) {
setIndicator = "[set]"
}
fmt.Fprintln(w, fmt.Sprintf("# throttle-additional-flag-file: %+v %+v",
fmt.Fprintf(w, "# throttle-additional-flag-file: %+v %+v\n",
this.migrationContext.ThrottleAdditionalFlagFile, setIndicator,
))
)
}
if throttleQuery := this.migrationContext.GetThrottleQuery(); throttleQuery != "" {
fmt.Fprintln(w, fmt.Sprintf("# throttle-query: %+v",
fmt.Fprintf(w, "# throttle-query: %+v\n",
throttleQuery,
))
)
}
if throttleControlReplicaKeys := this.migrationContext.GetThrottleControlReplicaKeys(); throttleControlReplicaKeys.Len() > 0 {
fmt.Fprintln(w, fmt.Sprintf("# throttle-control-replicas count: %+v",
fmt.Fprintf(w, "# throttle-control-replicas count: %+v\n",
throttleControlReplicaKeys.Len(),
))
)
}
if this.migrationContext.PostponeCutOverFlagFile != "" {
@ -854,20 +886,20 @@ func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
if base.FileExists(this.migrationContext.PostponeCutOverFlagFile) {
setIndicator = "[set]"
}
fmt.Fprintln(w, fmt.Sprintf("# postpone-cut-over-flag-file: %+v %+v",
fmt.Fprintf(w, "# postpone-cut-over-flag-file: %+v %+v\n",
this.migrationContext.PostponeCutOverFlagFile, setIndicator,
))
)
}
if this.migrationContext.PanicFlagFile != "" {
fmt.Fprintln(w, fmt.Sprintf("# panic-flag-file: %+v",
fmt.Fprintf(w, "# panic-flag-file: %+v\n",
this.migrationContext.PanicFlagFile,
))
)
}
fmt.Fprintln(w, fmt.Sprintf("# Serving on unix socket: %+v",
fmt.Fprintf(w, "# Serving on unix socket: %+v\n",
this.migrationContext.ServeSocketFile,
))
)
if this.migrationContext.ServeTCPPort != 0 {
fmt.Fprintln(w, fmt.Sprintf("# Serving on TCP port: %+v", this.migrationContext.ServeTCPPort))
fmt.Fprintf(w, "# Serving on TCP port: %+v\n", this.migrationContext.ServeTCPPort)
}
}
@ -912,20 +944,29 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
}
var etaSeconds float64 = math.MaxFloat64
eta := "N/A"
var etaDuration = time.Duration(base.ETAUnknown)
if progressPct >= 100.0 {
eta = "due"
etaDuration = 0
} else if progressPct >= 0.1 {
elapsedRowCopySeconds := this.migrationContext.ElapsedRowCopyTime().Seconds()
totalExpectedSeconds := elapsedRowCopySeconds * float64(rowsEstimate) / float64(totalRowsCopied)
etaSeconds = totalExpectedSeconds - elapsedRowCopySeconds
if etaSeconds >= 0 {
etaDuration := time.Duration(etaSeconds) * time.Second
eta = base.PrettifyDurationOutput(etaDuration)
etaDuration = time.Duration(etaSeconds) * time.Second
} else {
eta = "due"
etaDuration = 0
}
}
this.migrationContext.SetETADuration(etaDuration)
var eta string
switch etaDuration {
case 0:
eta = "due"
case time.Duration(base.ETAUnknown):
eta = "N/A"
default:
eta = base.PrettifyDurationOutput(etaDuration)
}
state := "migrating"
if atomic.LoadInt64(&this.migrationContext.CountingRowsFlag) > 0 && !this.migrationContext.ConcurrentCountTableRows {
@ -937,7 +978,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
state = fmt.Sprintf("throttled, %s", throttleReason)
}
shouldPrintStatus := false
var shouldPrintStatus bool
if rule == HeuristicPrintStatusRule {
if elapsedSeconds <= 60 {
shouldPrintStatus = true
@ -962,13 +1003,14 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
currentBinlogCoordinates := *this.eventsStreamer.GetCurrentBinlogCoordinates()
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, State: %s; ETA: %s",
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, HeartbeatLag: %.2fs, State: %s; ETA: %s",
totalRowsCopied, rowsEstimate, progressPct,
atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied),
len(this.applyEventsQueue), cap(this.applyEventsQueue),
base.PrettifyDurationOutput(elapsedTime), base.PrettifyDurationOutput(this.migrationContext.ElapsedRowCopyTime()),
currentBinlogCoordinates,
this.migrationContext.GetCurrentLagDuration().Seconds(),
this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds(),
state,
eta,
)
@ -979,7 +1021,8 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
w := io.MultiWriter(writers...)
fmt.Fprintln(w, status)
if elapsedSeconds%60 == 0 {
hooksStatusIntervalSec := this.migrationContext.HooksStatusIntervalSec
if hooksStatusIntervalSec > 0 && elapsedSeconds%hooksStatusIntervalSec == 0 {
this.hooksExecutor.onStatus(status)
}
}
@ -995,7 +1038,7 @@ func (this *Migrator) initiateStreaming() error {
this.migrationContext.DatabaseName,
this.migrationContext.GetChangelogTableName(),
func(dmlEvent *binlog.BinlogDMLEvent) error {
return this.onChangelogStateEvent(dmlEvent)
return this.onChangelogEvent(dmlEvent)
},
)
@ -1009,8 +1052,9 @@ func (this *Migrator) initiateStreaming() error {
}()
go func() {
ticker := time.Tick(1 * time.Second)
for range ticker {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -1037,7 +1081,7 @@ func (this *Migrator) addDMLEventsListener() error {
// initiateThrottler kicks in the throttling collection and the throttling checks.
func (this *Migrator) initiateThrottler() error {
this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector)
this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector, this.appVersion)
go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected)
this.migrationContext.Log.Infof("Waiting for first throttle metrics to be collected")
@ -1072,6 +1116,14 @@ func (this *Migrator) initiateApplier() error {
return err
}
if this.migrationContext.OriginalTableAutoIncrement > 0 && !this.parser.IsAutoIncrementDefined() {
// Original table has AUTO_INCREMENT value and the -alter statement does not indicate any override,
// so we should copy AUTO_INCREMENT value onto our ghost table.
if err := this.applier.AlterGhostAutoIncrement(); err != nil {
this.migrationContext.Log.Errorf("Unable to ALTER ghost table AUTO_INCREMENT value, see further error details. Bailing out")
return err
}
}
this.applier.WriteChangelogState(string(GhostTableMigrated))
go this.applier.InitiateHeartbeat()
return nil
@ -1150,7 +1202,6 @@ func (this *Migrator) iterateChunks() error {
// Enqueue copy operation; to be executed by executeWriteFuncs()
this.copyRowsQueue <- copyRowsFunc
}
return nil
}
func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error {
@ -1241,7 +1292,7 @@ func (this *Migrator) executeWriteFuncs() error {
if niceRatio := this.migrationContext.GetNiceRatio(); niceRatio > 0 {
copyRowsDuration := time.Since(copyRowsStartTime)
sleepTimeNanosecondFloat64 := niceRatio * float64(copyRowsDuration.Nanoseconds())
sleepTime := time.Duration(time.Duration(int64(sleepTimeNanosecondFloat64)) * time.Nanosecond)
sleepTime := time.Duration(int64(sleepTimeNanosecondFloat64)) * time.Nanosecond
time.Sleep(sleepTime)
}
}
@ -1256,13 +1307,17 @@ func (this *Migrator) executeWriteFuncs() error {
}
}
}
return nil
}
// finalCleanup takes actions at very end of migration, dropping tables etc.
func (this *Migrator) finalCleanup() error {
atomic.StoreInt64(&this.migrationContext.CleanupImminentFlag, 1)
this.migrationContext.Log.Infof("Writing changelog state: %+v", Migrated)
if _, err := this.applier.WriteChangelogState(string(Migrated)); err != nil {
return err
}
if this.migrationContext.Noop {
if createTableStatement, err := this.inspector.showCreateTable(this.migrationContext.GetGhostTableName()); err == nil {
this.migrationContext.Log.Infof("New table structure follows")

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -122,8 +122,6 @@ func (this *Server) onServerCommand(command string, writer *bufio.Writer) (err e
// applyServerCommand parses and executes commands by user
func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (printStatusRule PrintStatusRule, err error) {
printStatusRule = NoPrintStatusRule
tokens := strings.SplitN(command, "=", 2)
command = strings.TrimSpace(tokens[0])
arg := ""
@ -134,7 +132,7 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
}
}
argIsQuestion := (arg == "?")
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged\n"
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged"
if err := this.hooksExecutor.onInteractiveCommand(command); err != nil {
return NoPrintStatusRule, err
@ -147,6 +145,8 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
status # Print a detailed status message
sup # Print a short status message
coordinates # Print the currently inspected coordinates
applier # Print the hostname of the applier
inspector # Print the hostname of the inspector
chunk-size=<newsize> # Set a new chunk-size
dml-batch-size=<newsize> # Set a new dml-batch-size
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
@ -177,6 +177,22 @@ help # This message
}
return NoPrintStatusRule, fmt.Errorf("coordinates are read-only")
}
case "applier":
if this.migrationContext.ApplierConnectionConfig != nil && this.migrationContext.ApplierConnectionConfig.ImpliedKey != nil {
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
this.migrationContext.ApplierConnectionConfig.ImpliedKey.String(),
this.migrationContext.ApplierMySQLVersion,
)
}
return NoPrintStatusRule, nil
case "inspector":
if this.migrationContext.InspectorConnectionConfig != nil && this.migrationContext.InspectorConnectionConfig.ImpliedKey != nil {
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
this.migrationContext.InspectorConnectionConfig.ImpliedKey.String(),
this.migrationContext.InspectorMySQLVersion,
)
}
return NoPrintStatusRule, nil
case "chunk-size":
{
if argIsQuestion {
@ -264,7 +280,7 @@ help # This message
return NoPrintStatusRule, nil
}
this.migrationContext.SetThrottleQuery(arg)
fmt.Fprintf(writer, throttleHint)
fmt.Fprintln(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "throttle-http":
@ -274,7 +290,7 @@ help # This message
return NoPrintStatusRule, nil
}
this.migrationContext.SetThrottleHTTP(arg)
fmt.Fprintf(writer, throttleHint)
fmt.Fprintln(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "throttle-control-replicas":
@ -297,7 +313,7 @@ help # This message
return NoPrintStatusRule, err
}
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
fmt.Fprintf(writer, throttleHint)
fmt.Fprintln(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "no-throttle", "unthrottle", "resume", "continue":

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -16,7 +16,7 @@ import (
"github.com/github/gh-ost/go/binlog"
"github.com/github/gh-ost/go/mysql"
"github.com/outbrain/golib/sqlutils"
"github.com/openark/golib/sqlutils"
)
type BinlogEventListener struct {
@ -42,6 +42,7 @@ type EventsStreamer struct {
listenersMutex *sync.Mutex
eventsChannel chan *binlog.BinlogEntry
binlogReader *binlog.GoMySQLReader
name string
}
func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer {
@ -51,13 +52,13 @@ func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer
listeners: [](*BinlogEventListener){},
listenersMutex: &sync.Mutex{},
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
name: "streamer",
}
}
// AddListener registers a new listener for binlog events, on a per-table basis
func (this *EventsStreamer) AddListener(
async bool, databaseName string, tableName string, onDmlEvent func(event *binlog.BinlogDMLEvent) error) (err error) {
this.listenersMutex.Lock()
defer this.listenersMutex.Unlock()
@ -85,10 +86,10 @@ func (this *EventsStreamer) notifyListeners(binlogEvent *binlog.BinlogDMLEvent)
for _, listener := range this.listeners {
listener := listener
if strings.ToLower(listener.databaseName) != strings.ToLower(binlogEvent.DatabaseName) {
if !strings.EqualFold(listener.databaseName, binlogEvent.DatabaseName) {
continue
}
if strings.ToLower(listener.tableName) != strings.ToLower(binlogEvent.TableName) {
if !strings.EqualFold(listener.tableName, binlogEvent.TableName) {
continue
}
if listener.async {
@ -106,7 +107,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
return err
}
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext); err != nil {
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name); err != nil {
return err
}
if err := this.readCurrentBinlogCoordinates(); err != nil {
@ -121,10 +122,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext)
if err != nil {
return err
}
goMySQLReader := binlog.NewGoMySQLReader(this.migrationContext)
if err := goMySQLReader.ConnectBinlogStreamer(*binlogCoordinates); err != nil {
return err
}
@ -218,5 +216,4 @@ func (this *EventsStreamer) Close() (err error) {
func (this *EventsStreamer) Teardown() {
this.db.Close()
return
}

View File

@ -1,11 +1,12 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
"context"
"fmt"
"net/http"
"strings"
@ -42,16 +43,22 @@ const frenoMagicHint = "freno"
// Throttler collects metrics related to throttling and makes informed decision
// whether throttling should take place.
type Throttler struct {
appVersion string
migrationContext *base.MigrationContext
applier *Applier
httpClient *http.Client
httpClientTimeout time.Duration
inspector *Inspector
finishedMigrating int64
}
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector) *Throttler {
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector, appVersion string) *Throttler {
return &Throttler{
appVersion: appVersion,
migrationContext: migrationContext,
applier: applier,
httpClient: &http.Client{},
httpClientTimeout: time.Duration(migrationContext.ThrottleHTTPTimeoutMillis) * time.Millisecond,
inspector: inspector,
finishedMigrating: 0,
}
@ -161,8 +168,9 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
collectFunc()
firstThrottlingCollected <- true
ticker := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
for range ticker {
ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -172,7 +180,6 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
// collectControlReplicasLag polls all the control replicas to get maximum lag value
func (this *Throttler) collectControlReplicasLag() {
if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 {
return
}
@ -188,9 +195,12 @@ func (this *Throttler) collectControlReplicasLag() {
dbUri := connectionConfig.GetDBUri("information_schema")
var heartbeatValue string
if db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri); err != nil {
db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri)
if err != nil {
return lag, err
} else if err = db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
}
if err := db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
return lag, err
}
@ -234,12 +244,14 @@ func (this *Throttler) collectControlReplicasLag() {
}
this.migrationContext.SetControlReplicasLagResult(readControlReplicasLag())
}
aggressiveTicker := time.Tick(100 * time.Millisecond)
relaxedFactor := 10
counter := 0
shouldReadLagAggressively := false
for range aggressiveTicker {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -282,7 +294,17 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
if url == "" {
return true, nil
}
resp, err := http.Head(url)
ctx, cancel := context.WithTimeout(context.Background(), this.httpClientTimeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil)
if err != nil {
return false, err
}
req.Header.Set("User-Agent", fmt.Sprintf("gh-ost/%s", this.appVersion))
resp, err := this.httpClient.Do(req)
if err != nil {
return false, err
}
@ -300,8 +322,10 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
firstThrottlingCollected <- true
ticker := time.Tick(100 * time.Millisecond)
for range ticker {
collectInterval := time.Duration(this.migrationContext.ThrottleHTTPIntervalMillis) * time.Millisecond
ticker := time.NewTicker(collectInterval)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -420,8 +444,9 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
this.collectGeneralThrottleMetrics()
firstThrottlingCollected <- true
throttlerMetricsTick := time.Tick(1 * time.Second)
for range throttlerMetricsTick {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -432,9 +457,7 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
}
// initiateThrottlerChecks initiates the throttle ticker and sets the basic behavior of throttling.
func (this *Throttler) initiateThrottlerChecks() error {
throttlerTick := time.Tick(100 * time.Millisecond)
func (this *Throttler) initiateThrottlerChecks() {
throttlerFunction := func() {
alreadyThrottling, currentReason, _ := this.migrationContext.IsThrottled()
shouldThrottle, throttleReason, throttleReasonHint := this.shouldThrottle()
@ -451,14 +474,15 @@ func (this *Throttler) initiateThrottlerChecks() error {
this.migrationContext.SetThrottled(shouldThrottle, throttleReason, throttleReasonHint)
}
throttlerFunction()
for range throttlerTick {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return nil
return
}
throttlerFunction()
}
return nil
}
// throttle sees if throttling needs take place, and if so, continuously sleeps (blocks)

View File

@ -1,36 +1,21 @@
/*
Copyright 2015 Shlomi Noach, courtesy Booking.com
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package mysql
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
var detachPattern *regexp.Regexp
func init() {
detachPattern, _ = regexp.Compile(`//([^/:]+):([\d]+)`) // e.g. `//binlog.01234:567890`
}
type BinlogType int
const (
BinaryLog BinlogType = iota
RelayLog
)
// BinlogCoordinates described binary log coordinates in the form of log file & log position.
type BinlogCoordinates struct {
LogFile string
LogPos int64
Type BinlogType
}
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
@ -62,7 +47,7 @@ func (this *BinlogCoordinates) Equals(other *BinlogCoordinates) bool {
if other == nil {
return false
}
return this.LogFile == other.LogFile && this.LogPos == other.LogPos && this.Type == other.Type
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
}
// IsEmpty returns true if the log file is empty, unnamed
@ -87,76 +72,5 @@ func (this *BinlogCoordinates) SmallerThanOrEquals(other *BinlogCoordinates) boo
if this.SmallerThan(other) {
return true
}
return this.LogFile == other.LogFile && this.LogPos == other.LogPos // No Type comparison
}
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
func (this *BinlogCoordinates) FileSmallerThan(other *BinlogCoordinates) bool {
return this.LogFile < other.LogFile
}
// FileNumberDistance returns the numeric distance between this coordinate's file number and the other's.
// Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's"
func (this *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int {
thisNumber, _ := this.FileNumber()
otherNumber, _ := other.FileNumber()
return otherNumber - thisNumber
}
// FileNumber returns the numeric value of the file, and the length in characters representing the number in the filename.
// Example: FileNumber() of mysqld.log.000789 is (789, 6)
func (this *BinlogCoordinates) FileNumber() (int, int) {
tokens := strings.Split(this.LogFile, ".")
numPart := tokens[len(tokens)-1]
numLen := len(numPart)
fileNum, err := strconv.Atoi(numPart)
if err != nil {
return 0, 0
}
return fileNum, numLen
}
// PreviousFileCoordinatesBy guesses the filename of the previous binlog/relaylog, by given offset (number of files back)
func (this *BinlogCoordinates) PreviousFileCoordinatesBy(offset int) (BinlogCoordinates, error) {
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
fileNum, numLen := this.FileNumber()
if fileNum == 0 {
return result, errors.New("Log file number is zero, cannot detect previous file")
}
newNumStr := fmt.Sprintf("%d", (fileNum - offset))
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
tokens := strings.Split(this.LogFile, ".")
tokens[len(tokens)-1] = newNumStr
result.LogFile = strings.Join(tokens, ".")
return result, nil
}
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
func (this *BinlogCoordinates) PreviousFileCoordinates() (BinlogCoordinates, error) {
return this.PreviousFileCoordinatesBy(1)
}
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
func (this *BinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) {
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
fileNum, numLen := this.FileNumber()
newNumStr := fmt.Sprintf("%d", (fileNum + 1))
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
tokens := strings.Split(this.LogFile, ".")
tokens[len(tokens)-1] = newNumStr
result.LogFile = strings.Join(tokens, ".")
return result, nil
}
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
func (this *BinlogCoordinates) DetachedCoordinates() (isDetached bool, detachedLogFile string, detachedLogPos string) {
detachedCoordinatesSubmatch := detachPattern.FindStringSubmatch(this.LogFile)
if len(detachedCoordinatesSubmatch) == 0 {
return false, "", ""
}
return true, detachedCoordinatesSubmatch[1], detachedCoordinatesSubmatch[2]
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
}

View File

@ -8,8 +8,8 @@ package mysql
import (
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {
@ -37,57 +37,6 @@ func TestBinlogCoordinates(t *testing.T) {
test.S(t).ExpectTrue(c1.SmallerThanOrEquals(&c3))
}
func TestBinlogNext(t *testing.T) {
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
cres, err := c1.NextFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00018")
c2 := BinlogCoordinates{LogFile: "mysql-bin.00099", LogPos: 104}
cres, err = c2.NextFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00100")
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00099", LogPos: 104}
cres, err = c3.NextFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00100")
}
func TestBinlogPrevious(t *testing.T) {
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
cres, err := c1.PreviousFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00016")
c2 := BinlogCoordinates{LogFile: "mysql-bin.00100", LogPos: 104}
cres, err = c2.PreviousFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00099")
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00100", LogPos: 104}
cres, err = c3.PreviousFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00099")
c4 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00000", LogPos: 104}
_, err = c4.PreviousFileCoordinates()
test.S(t).ExpectNotNil(err)
}
func TestBinlogCoordinatesAsKey(t *testing.T) {
m := make(map[BinlogCoordinates]bool)
@ -103,20 +52,3 @@ func TestBinlogCoordinatesAsKey(t *testing.T) {
test.S(t).ExpectEquals(len(m), 3)
}
func TestBinlogFileNumber(t *testing.T) {
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
c2 := BinlogCoordinates{LogFile: "mysql-bin.00022", LogPos: 104}
test.S(t).ExpectEquals(c1.FileNumberDistance(&c1), 0)
test.S(t).ExpectEquals(c1.FileNumberDistance(&c2), 5)
test.S(t).ExpectEquals(c2.FileNumberDistance(&c1), -5)
}
func TestBinlogFileNumberDistance(t *testing.T) {
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
fileNum, numLen := c1.FileNumber()
test.S(t).ExpectEquals(fileNum, 17)
test.S(t).ExpectEquals(numLen, 5)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -12,11 +12,13 @@ import (
"fmt"
"io/ioutil"
"net"
"strings"
"github.com/go-sql-driver/mysql"
)
const (
transactionIsolation = "REPEATABLE-READ"
TLS_CONFIG_KEY = "ghost"
)
@ -92,6 +94,7 @@ func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clien
}
this.tlsConfig = &tls.Config{
ServerName: this.Key.Hostname,
Certificates: certs,
RootCAs: rootCertPool,
InsecureSkipVerify: allowInsecure,
@ -111,12 +114,23 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string {
// Wrap IPv6 literals in square brackets
hostname = fmt.Sprintf("[%s]", hostname)
}
interpolateParams := true
// go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to
// simplify construction of the DSN below.
tlsOption := "false"
if this.tlsConfig != nil {
tlsOption = TLS_CONFIG_KEY
}
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?timeout=%fs&readTimeout=%fs&writeTimeout=%fs&interpolateParams=%t&autocommit=true&charset=utf8mb4,utf8,latin1&tls=%s", this.User, this.Password, hostname, this.Key.Port, databaseName, this.Timeout, this.Timeout, this.Timeout, interpolateParams, tlsOption)
connectionParams := []string{
"autocommit=true",
"charset=utf8mb4,utf8,latin1",
"interpolateParams=true",
fmt.Sprintf("tls=%s", tlsOption),
fmt.Sprintf("transaction_isolation=%q", transactionIsolation),
fmt.Sprintf("timeout=%fs", this.Timeout),
fmt.Sprintf("readTimeout=%fs", this.Timeout),
fmt.Sprintf("writeTimeout=%fs", this.Timeout),
}
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?%s", this.User, this.Password, hostname, this.Key.Port, databaseName, strings.Join(connectionParams, "&"))
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -9,8 +9,8 @@ import (
"crypto/tls"
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {
@ -67,9 +67,10 @@ func TestGetDBUri(t *testing.T) {
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
c.Timeout = 1.2345
uri := c.GetDBUri("test")
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?timeout=0.000000s&readTimeout=0.000000s&writeTimeout=0.000000s&interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=false")
test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=false&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
}
func TestGetDBUriWithTLSSetup(t *testing.T) {
@ -77,8 +78,9 @@ func TestGetDBUriWithTLSSetup(t *testing.T) {
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
c.Timeout = 1.2345
c.tlsConfig = &tls.Config{}
uri := c.GetDBUri("test")
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?timeout=0.000000s&readTimeout=0.000000s&writeTimeout=0.000000s&interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=ghost")
test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=ghost&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
}

View File

@ -1,5 +1,6 @@
/*
Copyright 2015 Shlomi Noach, courtesy Booking.com
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -12,15 +13,16 @@ import (
"strings"
)
const (
DefaultInstancePort = 3306
)
const DefaultInstancePort = 3306
var (
ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$")
ipv4HostRegexp = regexp.MustCompile("^([^:]+)$")
ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") // e.g. 2001:db8:1f70::999:de8:7648:6e8
// e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") //nolint:gosimple
// e.g. 2001:db8:1f70::999:de8:7648:6e8
ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$")
)
// InstanceKey is an instance indicator, identified by hostname and port
@ -33,8 +35,7 @@ const detachHint = "//"
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
func NewRawInstanceKey(hostPort string) (*InstanceKey, error) {
hostname := ""
port := ""
var hostname, port string
if submatch := ipv4HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
port = submatch[2]

View File

@ -8,8 +8,8 @@ package mysql
import (
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {

View File

@ -14,12 +14,15 @@ import (
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
"github.com/openark/golib/log"
"github.com/openark/golib/sqlutils"
)
const MaxTableNameLength = 64
const MaxReplicationPasswordLength = 32
const (
MaxTableNameLength = 64
MaxReplicationPasswordLength = 32
MaxDBPoolConnections = 3
)
type ReplicationLagResult struct {
Key InstanceKey
@ -39,23 +42,22 @@ func (this *ReplicationLagResult) HasLag() bool {
var knownDBs map[string]*gosql.DB = make(map[string]*gosql.DB)
var knownDBsMutex = &sync.Mutex{}
func GetDB(migrationUuid string, mysql_uri string) (*gosql.DB, bool, error) {
func GetDB(migrationUuid string, mysql_uri string) (db *gosql.DB, exists bool, err error) {
cacheKey := migrationUuid + ":" + mysql_uri
knownDBsMutex.Lock()
defer func() {
knownDBsMutex.Unlock()
}()
defer knownDBsMutex.Unlock()
var exists bool
if _, exists = knownDBs[cacheKey]; !exists {
if db, err := gosql.Open("mysql", mysql_uri); err == nil {
if db, exists = knownDBs[cacheKey]; !exists {
db, err = gosql.Open("mysql", mysql_uri)
if err != nil {
return nil, false, err
}
db.SetMaxOpenConns(MaxDBPoolConnections)
db.SetMaxIdleConns(MaxDBPoolConnections)
knownDBs[cacheKey] = db
} else {
return db, exists, err
}
}
return knownDBs[cacheKey], exists, nil
return db, exists, nil
}
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
@ -203,3 +205,9 @@ func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnL
}
return sql.NewColumnList(columnNames), sql.NewColumnList(virtualColumnNames), nil
}
// Kill executes a KILL QUERY by connection id
func Kill(db *gosql.DB, connectionID string) error {
_, err := db.Exec(`KILL QUERY %s`, connectionID)
return err
}

View File

@ -1,65 +0,0 @@
/*
Copyright 2014 Outbrain Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package os
import (
"github.com/outbrain/golib/log"
"io/ioutil"
"os"
"os/exec"
)
func execCmd(commandText string, arguments ...string) (*exec.Cmd, string, error) {
commandBytes := []byte(commandText)
tmpFile, err := ioutil.TempFile("", "gh-ost-process-cmd-")
if err != nil {
return nil, "", log.Errore(err)
}
ioutil.WriteFile(tmpFile.Name(), commandBytes, 0644)
log.Debugf("execCmd: %s", commandText)
shellArguments := append([]string{}, tmpFile.Name())
shellArguments = append(shellArguments, arguments...)
log.Debugf("%+v", shellArguments)
return exec.Command("bash", shellArguments...), tmpFile.Name(), nil
}
// CommandRun executes a command
func CommandRun(commandText string, arguments ...string) error {
cmd, tmpFileName, err := execCmd(commandText, arguments...)
defer os.Remove(tmpFileName)
if err != nil {
return log.Errore(err)
}
err = cmd.Run()
return log.Errore(err)
}
// RunCommandWithOutput executes a command and return output bytes
func RunCommandWithOutput(commandText string) ([]byte, error) {
cmd, tmpFileName, err := execCmd(commandText)
defer os.Remove(tmpFileName)
if err != nil {
return nil, log.Errore(err)
}
outputBytes, err := cmd.Output()
if err != nil {
return nil, log.Errore(err)
}
return outputBytes, nil
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -33,11 +33,13 @@ func EscapeName(name string) string {
}
func buildColumnsPreparedValues(columns *ColumnList) []string {
values := make([]string, columns.Len(), columns.Len())
values := make([]string, columns.Len())
for i, column := range columns.Columns() {
var token string
if column.timezoneConversion != nil {
token = fmt.Sprintf("convert_tz(?, '%s', '%s')", column.timezoneConversion.ToTimezone, "+00:00")
} else if column.enumToTextConversion {
token = fmt.Sprintf("ELT(?, %s)", column.EnumValues)
} else if column.Type == JSONColumnType {
token = "convert(? using utf8mb4)"
} else {
@ -49,7 +51,7 @@ func buildColumnsPreparedValues(columns *ColumnList) []string {
}
func buildPreparedValues(length int) []string {
values := make([]string, length, length)
values := make([]string, length)
for i := 0; i < length; i++ {
values[i] = "?"
}
@ -57,7 +59,7 @@ func buildPreparedValues(length int) []string {
}
func duplicateNames(names []string) []string {
duplicate := make([]string, len(names), len(names))
duplicate := make([]string, len(names))
copy(duplicate, names)
return duplicate
}
@ -108,6 +110,8 @@ func BuildSetPreparedClause(columns *ColumnList) (result string, err error) {
var setToken string
if column.timezoneConversion != nil {
setToken = fmt.Sprintf("%s=convert_tz(?, '%s', '%s')", EscapeName(column.Name), column.timezoneConversion.ToTimezone, "+00:00")
} else if column.enumToTextConversion {
setToken = fmt.Sprintf("%s=ELT(?, %s)", EscapeName(column.Name), column.EnumValues)
} else if column.Type == JSONColumnType {
setToken = fmt.Sprintf("%s=convert(? using utf8mb4)", EscapeName(column.Name))
} else {
@ -163,7 +167,7 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
if includeEquals {
comparison, err := BuildEqualsComparison(columns, values)
if err != nil {
return "", explodedArgs, nil
return "", explodedArgs, err
}
comparisons = append(comparisons, comparison)
explodedArgs = append(explodedArgs, args...)
@ -257,8 +261,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@ -312,8 +316,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, tableName str
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@ -364,7 +368,7 @@ func buildUniqueKeyMinMaxValuesPreparedQuery(databaseName, tableName string, uni
tableName = EscapeName(tableName)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@ -396,7 +400,7 @@ func BuildDMLDeleteQuery(databaseName, tableName string, tableColumns, uniqueKey
}
for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(args[tableOrdinal])
arg := column.convertArg(args[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg)
}
databaseName = EscapeName(databaseName)
@ -433,7 +437,7 @@ func BuildDMLInsertQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(args[tableOrdinal])
arg := column.convertArg(args[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg)
}
@ -481,13 +485,13 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(valueArgs[tableOrdinal])
arg := column.convertArg(valueArgs[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg)
}
for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(whereArgs[tableOrdinal])
arg := column.convertArg(whereArgs[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg)
}
@ -497,6 +501,9 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
}
equalsComparison, err := BuildEqualsPreparedComparison(uniqueKeyColumns.Names())
if err != nil {
return "", sharedArgs, uniqueKeyArgs, err
}
result = fmt.Sprintf(`
update /* gh-ost %s.%s */
%s.%s

View File

@ -12,8 +12,8 @@ import (
"regexp"
"strings"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
var (

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -16,6 +16,7 @@ var (
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
autoIncrementRegexp = regexp.MustCompile(`(?i)\bauto_increment[\s]*=[\s]*([0-9]+)`)
alterTableExplicitSchemaTableRegexps = []*regexp.Regexp{
// ALTER TABLE `scm`.`tbl` something
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
@ -32,12 +33,14 @@ var (
// ALTER TABLE tbl something
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)\s+(.*$)`),
}
enumValuesRegexp = regexp.MustCompile("^enum[(](.*)[)]$")
)
type AlterTableParser struct {
columnRenameMap map[string]string
droppedColumns map[string]bool
isRenameTable bool
isAutoIncrementDefined bool
alterStatementOptions string
alterTokens []string
@ -122,11 +125,16 @@ func (this *AlterTableParser) parseAlterToken(alterToken string) (err error) {
this.isRenameTable = true
}
}
{
// auto_increment
if autoIncrementRegexp.MatchString(alterToken) {
this.isAutoIncrementDefined = true
}
}
return nil
}
func (this *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) {
this.alterStatementOptions = alterStatement
for _, alterTableRegexp := range alterTableExplicitSchemaTableRegexps {
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
@ -173,6 +181,11 @@ func (this *AlterTableParser) DroppedColumnsMap() map[string]bool {
func (this *AlterTableParser) IsRenameTable() bool {
return this.isRenameTable
}
func (this *AlterTableParser) IsAutoIncrementDefined() bool {
return this.isAutoIncrementDefined
}
func (this *AlterTableParser) GetExplicitSchema() string {
return this.explicitSchema
}
@ -192,3 +205,10 @@ func (this *AlterTableParser) HasExplicitTable() bool {
func (this *AlterTableParser) GetAlterStatementOptions() string {
return this.alterStatementOptions
}
func ParseEnumValues(enumColumnType string) string {
if submatch := enumValuesRegexp.FindStringSubmatch(enumColumnType); len(submatch) > 0 {
return submatch[1]
}
return enumColumnType
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -9,8 +9,8 @@ import (
"reflect"
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {
@ -24,6 +24,7 @@ func TestParseAlterStatement(t *testing.T) {
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
}
func TestParseAlterStatementTrivialRename(t *testing.T) {
@ -33,10 +34,30 @@ func TestParseAlterStatementTrivialRename(t *testing.T) {
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 1)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
}
func TestParseAlterStatementWithAutoIncrement(t *testing.T) {
statements := []string{
"auto_increment=7",
"auto_increment = 7",
"AUTO_INCREMENT = 71",
"add column t int, change ts ts timestamp, auto_increment=7 engine=innodb",
"add column t int, change ts ts timestamp, auto_increment =7 engine=innodb",
"add column t int, change ts ts timestamp, AUTO_INCREMENT = 7 engine=innodb",
"add column t int, change ts ts timestamp, engine=innodb auto_increment=73425",
}
for _, statement := range statements {
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectTrue(parser.IsAutoIncrementDefined())
}
}
func TestParseAlterStatementTrivialRenames(t *testing.T) {
statement := "add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb"
parser := NewAlterTableParser()
@ -44,6 +65,7 @@ func TestParseAlterStatementTrivialRenames(t *testing.T) {
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 2)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
test.S(t).ExpectEquals(parser.columnRenameMap["f"], "f")
@ -64,6 +86,7 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
renames := parser.GetNonTrivialRenames()
test.S(t).ExpectEquals(len(renames), 2)
@ -126,7 +149,6 @@ func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
}
func TestParseAlterStatementDroppedColumns(t *testing.T) {
{
parser := NewAlterTableParser()
statement := "drop column b"
@ -166,7 +188,6 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
}
func TestParseAlterStatementRenameTable(t *testing.T) {
{
parser := NewAlterTableParser()
statement := "drop column b"
@ -206,7 +227,6 @@ func TestParseAlterStatementRenameTable(t *testing.T) {
}
func TestParseAlterStatementExplicitTable(t *testing.T) {
{
parser := NewAlterTableParser()
statement := "drop column b"
@ -298,3 +318,21 @@ func TestParseAlterStatementExplicitTable(t *testing.T) {
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b", "add index idx(i)"}))
}
}
func TestParseEnumValues(t *testing.T) {
{
s := "enum('red','green','blue','orange')"
values := ParseEnumValues(s)
test.S(t).ExpectEquals(values, "'red','green','blue','orange'")
}
{
s := "('red','green','blue','orange')"
values := ParseEnumValues(s)
test.S(t).ExpectEquals(values, "('red','green','blue','orange')")
}
{
s := "zzz"
values := ParseEnumValues(s)
test.S(t).ExpectEquals(values, "zzz")
}
}

View File

@ -6,6 +6,7 @@
package sql
import (
"bytes"
"fmt"
"reflect"
"strconv"
@ -22,6 +23,7 @@ const (
MediumIntColumnType
JSONColumnType
FloatColumnType
BinaryColumnType
)
const maxMediumintUnsigned int32 = 16777215
@ -35,15 +37,33 @@ type Column struct {
IsUnsigned bool
Charset string
Type ColumnType
EnumValues string
timezoneConversion *TimezoneConversion
enumToTextConversion bool
// add Octet length for binary type, fix bytes with suffix "00" get clipped in mysql binlog.
// https://github.com/github/gh-ost/issues/909
BinaryOctetLength uint
}
func (this *Column) convertArg(arg interface{}) interface{} {
func (this *Column) convertArg(arg interface{}, isUniqueKeyColumn bool) interface{} {
if s, ok := arg.(string); ok {
// string, charset conversion
if encoding, ok := charsetEncodingMap[this.Charset]; ok {
arg, _ = encoding.NewDecoder().String(s)
}
if this.Type == BinaryColumnType && isUniqueKeyColumn {
arg2Bytes := []byte(arg.(string))
size := len(arg2Bytes)
if uint(size) < this.BinaryOctetLength {
buf := bytes.NewBuffer(arg2Bytes)
for i := uint(0); i < (this.BinaryOctetLength - uint(size)); i++ {
buf.Write([]byte{0})
}
arg = buf.String()
}
}
return arg
}
@ -179,6 +199,18 @@ func (this *ColumnList) HasTimezoneConversion(columnName string) bool {
return this.GetColumn(columnName).timezoneConversion != nil
}
func (this *ColumnList) SetEnumToTextConversion(columnName string) {
this.GetColumn(columnName).enumToTextConversion = true
}
func (this *ColumnList) IsEnumToTextConversion(columnName string) bool {
return this.GetColumn(columnName).enumToTextConversion
}
func (this *ColumnList) SetEnumValues(columnName string, enumValues string) {
this.GetColumn(columnName).EnumValues = enumValues
}
func (this *ColumnList) String() string {
return strings.Join(this.Names(), ",")
}

View File

@ -10,8 +10,8 @@ import (
"reflect"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {

View File

@ -0,0 +1,17 @@
drop event if exists gh_ost_test;
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (NULL, 11);
insert into gh_ost_test values (NULL, 13);
insert into gh_ost_test values (NULL, 17);
insert into gh_ost_test values (NULL, 23);
insert into gh_ost_test values (NULL, 29);
insert into gh_ost_test values (NULL, 31);
insert into gh_ost_test values (NULL, 37);
delete from gh_ost_test where id>=5;

View File

@ -0,0 +1 @@
AUTO_INCREMENT=7

View File

@ -0,0 +1 @@
--alter='AUTO_INCREMENT=7'

View File

@ -0,0 +1,17 @@
drop event if exists gh_ost_test;
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (NULL, 11);
insert into gh_ost_test values (NULL, 13);
insert into gh_ost_test values (NULL, 17);
insert into gh_ost_test values (NULL, 23);
insert into gh_ost_test values (NULL, 29);
insert into gh_ost_test values (NULL, 31);
insert into gh_ost_test values (NULL, 37);
delete from gh_ost_test where id>=5;

View File

@ -0,0 +1 @@
AUTO_INCREMENT=8

View File

@ -0,0 +1,13 @@
drop event if exists gh_ost_test;
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (NULL, 11);
insert into gh_ost_test values (NULL, 13);
insert into gh_ost_test values (NULL, 17);
insert into gh_ost_test values (NULL, 23);

View File

@ -0,0 +1 @@
AUTO_INCREMENT=5

View File

@ -0,0 +1,40 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
ts0 timestamp(6) default current_timestamp(6),
updated tinyint unsigned default 0,
primary key(id, ts0)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
insert into gh_ost_test values (null, 13, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
insert into gh_ost_test values (null, 17, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
insert into gh_ost_test values (null, 19, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
insert into gh_ost_test values (null, 23, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
insert into gh_ost_test values (null, 29, sysdate(6), 0);
insert into gh_ost_test values (null, 31, sysdate(6), 0);
insert into gh_ost_test values (null, 37, sysdate(6), 0);
insert into gh_ost_test values (null, 41, sysdate(6), 0);
delete from gh_ost_test where i = 31 order by id desc limit 1;
end ;;

View File

@ -0,0 +1,40 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
v varchar(128),
updated tinyint unsigned default 0,
primary key(id, v)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, 'eleven', 0);
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
insert into gh_ost_test values (null, 13, 'thirteen', 0);
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
insert into gh_ost_test values (null, 17, 'seventeen', 0);
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
insert into gh_ost_test values (null, 19, 'nineteen', 0);
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
insert into gh_ost_test values (null, 23, 'twenty three', 0);
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
insert into gh_ost_test values (null, 29, 'twenty nine', 0);
insert into gh_ost_test values (null, 31, 'thirty one', 0);
insert into gh_ost_test values (null, 37, 'thirty seven', 0);
insert into gh_ost_test values (null, 41, 'forty one', 0);
delete from gh_ost_test where i = 31 order by id desc limit 1;
end ;;

View File

@ -1 +0,0 @@
(5.5)

View File

@ -0,0 +1,20 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int unsigned auto_increment,
i int not null,
dt datetime,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
end ;;

View File

@ -0,0 +1 @@
--allow-zero-in-date --alter="change column dt dt datetime not null default '1970-00-00 00:00:00'"

View File

@ -1 +0,0 @@
(5.5)

View File

@ -0,0 +1,26 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
e enum('red', 'green', 'blue', 'orange') null default null collate 'utf8_bin',
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 7, 'red');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, 'red');
insert into gh_ost_test values (null, 13, 'green');
insert into gh_ost_test values (null, 17, 'blue');
set @last_insert_id := last_insert_id();
update gh_ost_test set e='orange' where id = @last_insert_id;
end ;;

View File

@ -0,0 +1 @@
--alter="change e e varchar(32) not null default ''"

View File

@ -0,0 +1,21 @@
set session sql_mode='';
drop table if exists gh_ost_test;
create table gh_ost_test (
id int unsigned auto_increment,
i int not null,
dt datetime not null default '1970-00-00 00:00:00',
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
end ;;

View File

@ -0,0 +1 @@
--allow-zero-in-date --alter="engine=innodb"

View File

@ -0,0 +1,20 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int unsigned auto_increment,
i int not null,
dt datetime,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
end ;;

View File

@ -0,0 +1 @@
Invalid default value for 'dt'

View File

@ -0,0 +1 @@
--alter="change column dt dt datetime not null default '1970-00-00 00:00:00'"

View File

@ -0,0 +1,21 @@
set session sql_mode='';
drop table if exists gh_ost_test;
create table gh_ost_test (
id int unsigned auto_increment,
i int not null,
dt datetime not null default '1970-00-00 00:00:00',
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
end ;;

View File

@ -0,0 +1 @@
Invalid default value for 'dt'

View File

@ -0,0 +1 @@
--alter="engine=innodb"

View File

@ -1 +0,0 @@
(5.5|5.6)

View File

@ -1 +0,0 @@
(5.5|5.6)

View File

@ -0,0 +1,30 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
`idb` varchar(36) CHARACTER SET utf8mb4 GENERATED ALWAYS AS (json_unquote(json_extract(`jsonobj`,_utf8mb4'$._id'))) STORED NOT NULL,
`jsonobj` json NOT NULL,
PRIMARY KEY (`id`,`idb`)
) auto_increment=1;
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":2}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":3}');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":5}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":7}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":11}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":13}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":17}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":19}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":23}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":27}');
end ;;

View File

@ -1 +0,0 @@
(5.5|5.6)

View File

@ -1 +0,0 @@
(5.5|5.6)

View File

@ -1 +0,0 @@
(5.5|5.6)

View File

@ -1 +0,0 @@
(5.5|5.6)

View File

@ -1 +0,0 @@
(5.5|5.6)

View File

@ -1 +0,0 @@
(5.5)

View File

@ -1 +0,0 @@
(5.5)

View File

@ -12,6 +12,7 @@ test_logfile=/tmp/gh-ost-test.log
default_ghost_binary=/tmp/gh-ost-test
ghost_binary=""
exec_command_file=/tmp/gh-ost-test.bash
ghost_structure_output_file=/tmp/gh-ost-test.ghost.structure.sql
orig_content_output_file=/tmp/gh-ost-test.orig.content.csv
ghost_content_output_file=/tmp/gh-ost-test.ghost.content.csv
throttle_flag_file=/tmp/gh-ost-test.ghost.throttle.flag
@ -204,6 +205,18 @@ test_single() {
return 1
fi
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "show create table _gh_ost_test_gho\G" -ss > $ghost_structure_output_file
if [ -f $tests_path/$test_name/expect_table_structure ] ; then
expected_table_structure="$(cat $tests_path/$test_name/expect_table_structure)"
if ! grep -q "$expected_table_structure" $ghost_structure_output_file ; then
echo
echo "ERROR $test_name: table structure was expected to include ${expected_table_structure} but did not. cat $ghost_structure_output_file:"
cat $ghost_structure_output_file
return 1
fi
fi
echo_dot
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test ${order_by}" -ss > $orig_content_output_file
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho ${order_by}" -ss > $ghost_content_output_file
@ -211,6 +224,8 @@ test_single() {
ghost_checksum=$(cat $ghost_content_output_file | md5sum)
if [ "$orig_checksum" != "$ghost_checksum" ] ; then
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test" -ss > $orig_content_output_file
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho" -ss > $ghost_content_output_file
echo "ERROR $test_name: checksum mismatch"
echo "---"
diff $orig_content_output_file $ghost_content_output_file
@ -229,7 +244,9 @@ build_binary() {
echo "Using binary: $ghost_binary"
return 0
fi
go build -o $ghost_binary go/cmd/gh-ost/main.go
if [ $? -ne 0 ] ; then
echo "Build failure"
exit 1

View File

@ -1 +0,0 @@
(5.5)

View File

@ -1 +0,0 @@
(5.5)

Some files were not shown because too many files have changed in this diff Show More