Compare commits
6 Commits
master
...
abort-miss
Author | SHA1 | Date | |
---|---|---|---|
|
b3502ebc59 | ||
|
0986970fc7 | ||
|
9aef2707ea | ||
|
1a45b5fffc | ||
|
e2482d4e2e | ||
|
21a251b620 |
15
.github/workflows/ci.yml
vendored
15
.github/workflows/ci.yml
vendored
@ -5,21 +5,16 @@ on: [pull_request]
|
|||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@master
|
||||||
|
|
||||||
- name: Set up Go
|
- name: Set up Go 1.12
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v1
|
||||||
with:
|
with:
|
||||||
go-version: 1.17
|
version: 1.12
|
||||||
|
id: go
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: script/cibuild
|
run: script/cibuild
|
||||||
|
|
||||||
- name: Upload gh-ost binary artifact
|
|
||||||
uses: actions/upload-artifact@v1
|
|
||||||
with:
|
|
||||||
name: gh-ost
|
|
||||||
path: bin/gh-ost
|
|
||||||
|
25
.github/workflows/codeql.yml
vendored
25
.github/workflows/codeql.yml
vendored
@ -1,25 +0,0 @@
|
|||||||
name: "CodeQL analysis"
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
pull_request:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 0 * * 0'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
codeql:
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
runs-on: ubuntu-latest # windows-latest and ubuntu-latest are supported. macos-latest is not supported at this time.
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v1
|
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
|
||||||
uses: github/codeql-action/analyze@v1
|
|
23
.github/workflows/golangci-lint.yml
vendored
23
.github/workflows/golangci-lint.yml
vendored
@ -1,23 +0,0 @@
|
|||||||
name: golangci-lint
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
# Optional: allow read access to pull request. Use with `only-new-issues` option.
|
|
||||||
# pull-requests: read
|
|
||||||
jobs:
|
|
||||||
golangci:
|
|
||||||
name: lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: 1.17
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: golangci-lint
|
|
||||||
uses: golangci/golangci-lint-action@v3
|
|
||||||
with:
|
|
||||||
version: v1.46.2
|
|
14
.github/workflows/replica-tests.yml
vendored
14
.github/workflows/replica-tests.yml
vendored
@ -5,20 +5,16 @@ on: [pull_request]
|
|||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
version: [mysql-5.7.25,mysql-8.0.16,PerconaServer-8.0.21]
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@master
|
||||||
|
|
||||||
- name: Set up Go
|
- name: Set up Go 1.12
|
||||||
uses: actions/setup-go@v1
|
uses: actions/setup-go@v1
|
||||||
with:
|
with:
|
||||||
go-version: 1.17
|
version: 1.12
|
||||||
|
id: go
|
||||||
|
|
||||||
- name: migration tests
|
- name: migration tests
|
||||||
env:
|
|
||||||
TEST_MYSQL_VERSION: ${{ matrix.version }}
|
|
||||||
run: script/cibuild-gh-ost-replica-tests
|
run: script/cibuild-gh-ost-replica-tests
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,4 +2,3 @@
|
|||||||
/bin/
|
/bin/
|
||||||
/libexec/
|
/libexec/
|
||||||
/.vendor/
|
/.vendor/
|
||||||
.idea/
|
|
||||||
|
@ -1,30 +0,0 @@
|
|||||||
run:
|
|
||||||
timeout: 5m
|
|
||||||
linters:
|
|
||||||
disable:
|
|
||||||
- errcheck
|
|
||||||
enable:
|
|
||||||
- bodyclose
|
|
||||||
- containedctx
|
|
||||||
- contextcheck
|
|
||||||
- dogsled
|
|
||||||
- durationcheck
|
|
||||||
- errname
|
|
||||||
- errorlint
|
|
||||||
- execinquery
|
|
||||||
- gofmt
|
|
||||||
- ifshort
|
|
||||||
- misspell
|
|
||||||
- nilerr
|
|
||||||
- nilnil
|
|
||||||
- noctx
|
|
||||||
- nolintlint
|
|
||||||
- nosprintfhostport
|
|
||||||
- prealloc
|
|
||||||
- rowserrcheck
|
|
||||||
- sqlclosecheck
|
|
||||||
- unconvert
|
|
||||||
- unparam
|
|
||||||
- unused
|
|
||||||
- wastedassign
|
|
||||||
- whitespace
|
|
33
.travis.yml
Normal file
33
.travis.yml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
# http://docs.travis-ci.com/user/languages/go/
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- "1.12.x"
|
||||||
|
|
||||||
|
os:
|
||||||
|
- linux
|
||||||
|
|
||||||
|
services:
|
||||||
|
- mysql
|
||||||
|
|
||||||
|
env:
|
||||||
|
- MYSQL_USER=root
|
||||||
|
- CURRENT_CI_ENV=travis
|
||||||
|
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- git
|
||||||
|
- numactl
|
||||||
|
- libaio1
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- mysql -e 'CREATE DATABASE IF NOT EXISTS test;'
|
||||||
|
|
||||||
|
install: true
|
||||||
|
|
||||||
|
script:
|
||||||
|
- script/cibuild
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
email: false
|
@ -1,20 +0,0 @@
|
|||||||
FROM golang:1.17
|
|
||||||
|
|
||||||
RUN apt-get update
|
|
||||||
RUN apt-get install -y ruby ruby-dev rubygems build-essential
|
|
||||||
RUN gem install --no-ri --no-rdoc fpm
|
|
||||||
ENV GOPATH=/tmp/go
|
|
||||||
|
|
||||||
RUN apt-get install -y curl
|
|
||||||
RUN apt-get install -y rsync
|
|
||||||
RUN apt-get install -y gcc
|
|
||||||
RUN apt-get install -y g++
|
|
||||||
RUN apt-get install -y bash
|
|
||||||
RUN apt-get install -y git
|
|
||||||
RUN apt-get install -y tar
|
|
||||||
RUN apt-get install -y rpm
|
|
||||||
|
|
||||||
RUN mkdir -p $GOPATH/src/github.com/github/gh-ost
|
|
||||||
WORKDIR $GOPATH/src/github.com/github/gh-ost
|
|
||||||
COPY . .
|
|
||||||
RUN bash build.sh
|
|
@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.17
|
FROM golang:1.12.1
|
||||||
LABEL maintainer="github@github.com"
|
LABEL maintainer="github@github.com"
|
||||||
|
|
||||||
RUN apt-get update
|
RUN apt-get update
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# gh-ost
|
# gh-ost
|
||||||
|
|
||||||
[![ci](https://github.com/github/gh-ost/actions/workflows/ci.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/ci.yml) [![replica-tests](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
|
[![build status](https://travis-ci.org/github/gh-ost.svg)](https://travis-ci.org/github/gh-ost) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
|
||||||
|
|
||||||
#### GitHub's online schema migration for MySQL <img src="doc/images/gh-ost-logo-light-160.png" align="right">
|
#### GitHub's online schema migration for MySQL <img src="doc/images/gh-ost-logo-light-160.png" align="right">
|
||||||
|
|
||||||
@ -65,7 +65,6 @@ Also see:
|
|||||||
- [the fine print](doc/the-fine-print.md)
|
- [the fine print](doc/the-fine-print.md)
|
||||||
- [Community questions](https://github.com/github/gh-ost/issues?q=label%3Aquestion)
|
- [Community questions](https://github.com/github/gh-ost/issues?q=label%3Aquestion)
|
||||||
- [Using `gh-ost` on AWS RDS](doc/rds.md)
|
- [Using `gh-ost` on AWS RDS](doc/rds.md)
|
||||||
- [Using `gh-ost` on Azure Database for MySQL](doc/azure.md)
|
|
||||||
|
|
||||||
## What's in a name?
|
## What's in a name?
|
||||||
|
|
||||||
@ -95,7 +94,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
|
|||||||
|
|
||||||
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
|
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
|
||||||
|
|
||||||
`gh-ost` is a Go project; it is built with Go `1.15` and above. To build on your own, use either:
|
`gh-ost` is a Go project; it is built with Go `1.12` and above. To build on your own, use either:
|
||||||
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
|
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
|
||||||
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
|
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
|
||||||
|
|
||||||
@ -110,4 +109,3 @@ Generally speaking, `master` branch is stable, but only [releases](https://githu
|
|||||||
- [@shlomi-noach](https://github.com/shlomi-noach)
|
- [@shlomi-noach](https://github.com/shlomi-noach)
|
||||||
- [@jessbreckenridge](https://github.com/jessbreckenridge)
|
- [@jessbreckenridge](https://github.com/jessbreckenridge)
|
||||||
- [@gtowey](https://github.com/gtowey)
|
- [@gtowey](https://github.com/gtowey)
|
||||||
- [@timvaillancourt](https://github.com/timvaillancourt)
|
|
||||||
|
@ -1 +1 @@
|
|||||||
1.1.2
|
1.0.48
|
||||||
|
27
build.sh
27
build.sh
@ -18,32 +18,30 @@ function build {
|
|||||||
GOOS=$3
|
GOOS=$3
|
||||||
GOARCH=$4
|
GOARCH=$4
|
||||||
|
|
||||||
if ! go version | egrep -q 'go1\.(1[5-9]|[2-9][0-9]{1})' ; then
|
if ! go version | egrep -q 'go(1\.1[234])' ; then
|
||||||
echo "go version must be 1.15 or above"
|
echo "go version must be 1.12 or above"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Building ${osname}-${GOARCH} binary"
|
echo "Building ${osname} binary"
|
||||||
export GOOS
|
export GOOS
|
||||||
export GOARCH
|
export GOARCH
|
||||||
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
|
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "Build failed for ${osname} ${GOARCH}."
|
echo "Build failed for ${osname}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${GOARCH}-${timestamp}.tar.gz $target)
|
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
|
||||||
|
|
||||||
# build RPM and deb for Linux, x86-64 only
|
if [ "$GOOS" == "linux" ] ; then
|
||||||
if [ "$GOOS" == "linux" ] && [ "$GOARCH" == "amd64" ] ; then
|
|
||||||
echo "Creating Distro full packages"
|
echo "Creating Distro full packages"
|
||||||
builddir=$(setuptree)
|
builddir=$(setuptree)
|
||||||
cp $buildpath/$target $builddir/gh-ost/usr/bin
|
cp $buildpath/$target $builddir/gh-ost/usr/bin
|
||||||
cd $buildpath
|
cd $buildpath
|
||||||
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" --rpm-os linux .
|
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
|
||||||
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
|
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
|
||||||
cd -
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,16 +61,11 @@ main() {
|
|||||||
|
|
||||||
mkdir -p ${buildpath}
|
mkdir -p ${buildpath}
|
||||||
rm -rf ${buildpath:?}/*
|
rm -rf ${buildpath:?}/*
|
||||||
build GNU/Linux linux linux amd64
|
|
||||||
build GNU/Linux linux linux arm64
|
|
||||||
build macOS osx darwin amd64
|
build macOS osx darwin amd64
|
||||||
build macOS osx darwin arm64
|
build GNU/Linux linux linux amd64
|
||||||
|
|
||||||
echo "Binaries found in:"
|
echo "Binaries found in:"
|
||||||
find $buildpath/gh-ost* -type f -maxdepth 1
|
ls -1 $buildpath/gh-ost-binary*${timestamp}.tar.gz
|
||||||
|
|
||||||
echo "Checksums:"
|
|
||||||
(cd $buildpath && shasum -a256 gh-ost* 2>/dev/null)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
main "$@"
|
main "$@"
|
||||||
|
26
doc/azure.md
26
doc/azure.md
@ -1,26 +0,0 @@
|
|||||||
`gh-ost` has been updated to work with Azure Database for MySQL however due to GitHub does not use it, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
|
|
||||||
|
|
||||||
# Azure Database for MySQL
|
|
||||||
|
|
||||||
## Limitations
|
|
||||||
|
|
||||||
- `gh-ost` runs should be setup use [`--assume-rbr`][assume_rbr_docs] and use `binlog_row_image=FULL`.
|
|
||||||
- Azure Database for MySQL does not use same user name suffix for master and replica, so master host, user and password need to be pointed out.
|
|
||||||
|
|
||||||
## Step
|
|
||||||
1. Change the replica server's `binlog_row_image` from `MINIMAL` to `FULL`. See [guide](https://docs.microsoft.com/en-us/azure/mysql/howto-server-parameters) on Azure document.
|
|
||||||
2. Use your `gh-ost` always with additional 5 parameter
|
|
||||||
```{bash}
|
|
||||||
gh-ost \
|
|
||||||
--azure \
|
|
||||||
--assume-master-host=master-server-dns-name \
|
|
||||||
--master-user="master-user-name" \
|
|
||||||
--master-password="master-password" \
|
|
||||||
--assume-rbr \
|
|
||||||
[-- other paramters you need]
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
[new_issue]: https://github.com/github/gh-ost/issues/new
|
|
||||||
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
|
|
||||||
[migrate_test_on_replica_docs]: https://github.com/github/gh-ost/blob/master/doc/cheatsheet.md#c-migratetest-on-replica
|
|
@ -5,7 +5,7 @@
|
|||||||
Getting started with gh-ost development is simple!
|
Getting started with gh-ost development is simple!
|
||||||
|
|
||||||
- First obtain the repository with `git clone` or `go get`.
|
- First obtain the repository with `git clone` or `go get`.
|
||||||
- From inside of the repository run `script/cibuild`.
|
- From inside of the repository run `script/cibuild`
|
||||||
- This will bootstrap the environment if needed, format the code, build the code, and then run the unit test.
|
- This will bootstrap the environment if needed, format the code, build the code, and then run the unit test.
|
||||||
|
|
||||||
## CI build workflow
|
## CI build workflow
|
||||||
@ -14,12 +14,6 @@ Getting started with gh-ost development is simple!
|
|||||||
|
|
||||||
If additional steps are needed, please add them into this workflow so that the workflow remains simple.
|
If additional steps are needed, please add them into this workflow so that the workflow remains simple.
|
||||||
|
|
||||||
## `golang-ci` linter
|
|
||||||
|
|
||||||
To enfore best-practices, Pull Requests are automatically linted by [`golang-ci`](https://golangci-lint.run/). The linter config is located at [`.golangci.yml`](https://github.com/github/gh-ost/blob/master/.golangci.yml) and the `golangci-lint` GitHub Action is located at [`.github/workflows/golangci-lint.yml`](https://github.com/github/gh-ost/blob/master/.github/workflows/golangci-lint.yml).
|
|
||||||
|
|
||||||
To run the `golang-ci` linters locally _(recommended before push)_, use `script/lint`.
|
|
||||||
|
|
||||||
## Notes:
|
## Notes:
|
||||||
|
|
||||||
Currently, `script/ensure-go-installed` will install `go` for Mac OS X and Linux. We welcome PR's to add other platforms.
|
Currently, `script/ensure-go-installed` will install `go` for Mac OS X and Linux. We welcome PR's to add other platforms.
|
||||||
|
@ -6,14 +6,6 @@ A more in-depth discussion of various `gh-ost` command line flags: implementatio
|
|||||||
|
|
||||||
Add this flag when executing on Aliyun RDS.
|
Add this flag when executing on Aliyun RDS.
|
||||||
|
|
||||||
### allow-zero-in-date
|
|
||||||
|
|
||||||
Allows the user to make schema changes that include a zero date or zero in date (e.g. adding a `datetime default '0000-00-00 00:00:00'` column), even if global `sql_mode` on MySQL has `NO_ZERO_IN_DATE,NO_ZERO_DATE`.
|
|
||||||
|
|
||||||
### azure
|
|
||||||
|
|
||||||
Add this flag when executing on Azure Database for MySQL.
|
|
||||||
|
|
||||||
### allow-master-master
|
### allow-master-master
|
||||||
|
|
||||||
See [`--assume-master-host`](#assume-master-host).
|
See [`--assume-master-host`](#assume-master-host).
|
||||||
@ -26,7 +18,7 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c
|
|||||||
|
|
||||||
### approve-renamed-columns
|
### approve-renamed-columns
|
||||||
|
|
||||||
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise, the new structure may also look like some column was dropped and another was added.
|
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
|
||||||
|
|
||||||
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
|
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
|
||||||
|
|
||||||
@ -36,7 +28,7 @@ If you think `gh-ost` is mistaken and that there's actually no _rename_ involved
|
|||||||
|
|
||||||
`gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in:
|
`gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in:
|
||||||
|
|
||||||
- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters, and you prefer that it picks a specific one
|
- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters and you prefer that it picks a specific one
|
||||||
- _tungsten replicator_ topologies (together with [`--tungsten`](#tungsten)), where `gh-ost` is unable to crawl and detect the master
|
- _tungsten replicator_ topologies (together with [`--tungsten`](#tungsten)), where `gh-ost` is unable to crawl and detect the master
|
||||||
|
|
||||||
### assume-rbr
|
### assume-rbr
|
||||||
@ -45,25 +37,6 @@ If you happen to _know_ your servers use RBR (Row Based Replication, i.e. `binlo
|
|||||||
Skipping this step means `gh-ost` would not need the `SUPER` privilege in order to operate.
|
Skipping this step means `gh-ost` would not need the `SUPER` privilege in order to operate.
|
||||||
You may want to use this on Amazon RDS.
|
You may want to use this on Amazon RDS.
|
||||||
|
|
||||||
### attempt-instant-ddl
|
|
||||||
|
|
||||||
MySQL 8.0 supports "instant DDL" for some operations. If an alter statement can be completed with instant DDL, only a metadata change is required internally. Instant operations include:
|
|
||||||
|
|
||||||
- Adding a column
|
|
||||||
- Dropping a column
|
|
||||||
- Dropping an index
|
|
||||||
- Extending a varchar column
|
|
||||||
- Adding a virtual generated column
|
|
||||||
|
|
||||||
It is not reliable to parse the `ALTER` statement to determine if it is instant or not. This is because the table might be in an older row format, or have some other incompatibility that is difficult to identify.
|
|
||||||
|
|
||||||
`--attempt-instant-ddl` is disabled by default, but the risks of enabling it are relatively minor: `gh-ost` may need to acquire a metadata lock at the start of the operation. This is not a problem for most scenarios, but it could be a problem for users that start the DDL during a period with long running transactions.
|
|
||||||
|
|
||||||
`gh-ost` will automatically fallback to the normal DDL process if the attempt to use instant DDL is unsuccessful.
|
|
||||||
|
|
||||||
### binlogsyncer-max-reconnect-attempts
|
|
||||||
`--binlogsyncer-max-reconnect-attempts=0`, the maximum number of attempts to re-establish a broken inspector connection for sync binlog. `0` or `negative number` means infinite retry, default `0`
|
|
||||||
|
|
||||||
### conf
|
### conf
|
||||||
|
|
||||||
`--conf=/path/to/my.cnf`: file where credentials are specified. Should be in (or contain) the following format:
|
`--conf=/path/to/my.cnf`: file where credentials are specified. Should be in (or contain) the following format:
|
||||||
@ -84,13 +57,7 @@ Comma delimited status-name=threshold, same format as [`--max-load`](#max-load).
|
|||||||
|
|
||||||
`--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold.
|
`--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold.
|
||||||
|
|
||||||
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10-hour migration.
|
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10 hour migration.
|
||||||
|
|
||||||
### critical-load-hibernate-seconds
|
|
||||||
|
|
||||||
When `--critical-load-hibernate-seconds` is non-zero (e.g. `--critical-load-hibernate-seconds=300`), `critical-load` does not panic and bail out; instead, `gh-ost` goes into hibernation for the specified duration. It will not read/write anything from/to any server during this time. Execution then continues upon waking from hibernation.
|
|
||||||
|
|
||||||
If `critical-load` is met again, `gh-ost` will repeat this cycle, and never panic and bail out.
|
|
||||||
|
|
||||||
### critical-load-interval-millis
|
### critical-load-interval-millis
|
||||||
|
|
||||||
@ -127,7 +94,7 @@ Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `g
|
|||||||
|
|
||||||
### exact-rowcount
|
### exact-rowcount
|
||||||
|
|
||||||
A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can and often will be, a large number. Exactly what that number is?
|
A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can, and often be, a large number. Exactly what that number is?
|
||||||
`gh-ost` initially estimates the number of rows in your table by issuing an `explain select * from your_table`. This will use statistics on your table and return with a rough estimate. How rough? It might go as low as half or as high as double the actual number of rows in your table. This is the same method as used in [`pt-online-schema-change`](https://www.percona.com/doc/percona-toolkit/2.2/pt-online-schema-change.html).
|
`gh-ost` initially estimates the number of rows in your table by issuing an `explain select * from your_table`. This will use statistics on your table and return with a rough estimate. How rough? It might go as low as half or as high as double the actual number of rows in your table. This is the same method as used in [`pt-online-schema-change`](https://www.percona.com/doc/percona-toolkit/2.2/pt-online-schema-change.html).
|
||||||
|
|
||||||
`gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen:
|
`gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen:
|
||||||
@ -164,10 +131,6 @@ Add this flag when executing on a 1st generation Google Cloud Platform (GCP).
|
|||||||
|
|
||||||
Default 100. See [`subsecond-lag`](subsecond-lag.md) for details.
|
Default 100. See [`subsecond-lag`](subsecond-lag.md) for details.
|
||||||
|
|
||||||
### hooks-status-interval
|
|
||||||
|
|
||||||
Defaults to 60 seconds. Configures how often the `gh-ost-on-status` hook is called, see [`hooks`](hooks.md) for full details on how to use hooks.
|
|
||||||
|
|
||||||
### initially-drop-ghost-table
|
### initially-drop-ghost-table
|
||||||
|
|
||||||
`gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand.
|
`gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand.
|
||||||
@ -214,9 +177,6 @@ Optionally involve the process ID, for example: `--replica-server-id=$((10000000
|
|||||||
It's on you to choose a number that does not collide with another `gh-ost` or another running replica.
|
It's on you to choose a number that does not collide with another `gh-ost` or another running replica.
|
||||||
See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet.
|
See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet.
|
||||||
|
|
||||||
### serve-socket-file
|
|
||||||
|
|
||||||
Defaults to an auto-determined and advertised upon startup file. Defines Unix socket file to serve on.
|
|
||||||
### skip-foreign-key-checks
|
### skip-foreign-key-checks
|
||||||
|
|
||||||
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
|
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
|
||||||
@ -249,18 +209,6 @@ Allows `gh-ost` to connect to the MySQL servers using encrypted connections, but
|
|||||||
|
|
||||||
`--ssl-key=/path/to/ssl-key.key`: SSL private key file (in PEM format).
|
`--ssl-key=/path/to/ssl-key.key`: SSL private key file (in PEM format).
|
||||||
|
|
||||||
### storage-engine
|
|
||||||
Default is `innodb`, and `rocksdb` support is currently experimental. InnoDB and RocksDB are both transactional engines, supporting both shared and exclusive row locks.
|
|
||||||
|
|
||||||
But RocksDB currently lacks a few features support compared to InnoDB:
|
|
||||||
- Gap Locks
|
|
||||||
- Foreign Key
|
|
||||||
- Generated Columns
|
|
||||||
- Spatial
|
|
||||||
- Geometry
|
|
||||||
|
|
||||||
When `--storage-engine=rocksdb`, `gh-ost` will make some changes necessary (e.g. sets isolation level to `READ_COMMITTED`) to support RocksDB.
|
|
||||||
|
|
||||||
### test-on-replica
|
### test-on-replica
|
||||||
|
|
||||||
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [`testing-on-replica`](testing-on-replica.md)
|
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [`testing-on-replica`](testing-on-replica.md)
|
||||||
@ -275,15 +223,7 @@ Provide a command delimited list of replicas; `gh-ost` will throttle when any of
|
|||||||
|
|
||||||
### throttle-http
|
### throttle-http
|
||||||
|
|
||||||
Provide an HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check.
|
Provide a HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check.
|
||||||
|
|
||||||
### throttle-http-interval-millis
|
|
||||||
|
|
||||||
Defaults to 100. Configures the HTTP throttle check interval in milliseconds.
|
|
||||||
|
|
||||||
### throttle-http-timeout-millis
|
|
||||||
|
|
||||||
Defaults to 1000 (1 second). Configures the HTTP throttler check timeout in milliseconds.
|
|
||||||
|
|
||||||
### timestamp-old-table
|
### timestamp-old-table
|
||||||
|
|
||||||
|
@ -66,9 +66,7 @@ The following variables are available on all hooks:
|
|||||||
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
|
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
|
||||||
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
|
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
|
||||||
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
|
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
|
||||||
- `GH_OST_HEARTBEAT_LAG` - lag in seconds (floating point) of heartbeat
|
|
||||||
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
|
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
|
||||||
- `GH_OST_ETA_SECONDS` - estimated duration until migration finishes in seconds
|
|
||||||
- `GH_OST_MIGRATED_HOST`
|
- `GH_OST_MIGRATED_HOST`
|
||||||
- `GH_OST_INSPECTED_HOST`
|
- `GH_OST_INSPECTED_HOST`
|
||||||
- `GH_OST_EXECUTING_HOST`
|
- `GH_OST_EXECUTING_HOST`
|
||||||
|
@ -18,8 +18,6 @@ Both interfaces may serve at the same time. Both respond to simple text command,
|
|||||||
- `status`: returns a detailed status summary of migration progress and configuration
|
- `status`: returns a detailed status summary of migration progress and configuration
|
||||||
- `sup`: returns a brief status summary of migration progress
|
- `sup`: returns a brief status summary of migration progress
|
||||||
- `coordinates`: returns recent (though not exactly up to date) binary log coordinates of the inspected server
|
- `coordinates`: returns recent (though not exactly up to date) binary log coordinates of the inspected server
|
||||||
- `applier`: returns the hostname of the applier
|
|
||||||
- `inspector`: returns the hostname of the inspector
|
|
||||||
- `chunk-size=<newsize>`: modify the `chunk-size`; applies on next running copy-iteration
|
- `chunk-size=<newsize>`: modify the `chunk-size`; applies on next running copy-iteration
|
||||||
- `dml-batch-size=<newsize>`: modify the `dml-batch-size`; applies on next applying of binary log events
|
- `dml-batch-size=<newsize>`: modify the `dml-batch-size`; applies on next applying of binary log events
|
||||||
- `max-lag-millis=<max-lag>`: modify the maximum replication lag threshold (milliseconds, minimum value is `100`, i.e. `0.1` second)
|
- `max-lag-millis=<max-lag>`: modify the maximum replication lag threshold (milliseconds, minimum value is `100`, i.e. `0.1` second)
|
||||||
|
@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
### Requirements
|
### Requirements
|
||||||
|
|
||||||
- `gh-ost` currently requires MySQL versions 5.7 and greater.
|
|
||||||
|
|
||||||
- You will need to have one server serving Row Based Replication (RBR) format binary logs. Right now `FULL` row image is supported. `MINIMAL` to be supported in the near future. `gh-ost` prefers to work with replicas. You may [still have your master configured with Statement Based Replication](migrating-with-sbr.md) (SBR).
|
- You will need to have one server serving Row Based Replication (RBR) format binary logs. Right now `FULL` row image is supported. `MINIMAL` to be supported in the near future. `gh-ost` prefers to work with replicas. You may [still have your master configured with Statement Based Replication](migrating-with-sbr.md) (SBR).
|
||||||
|
|
||||||
- If you are using a replica, the table must have an identical schema between the master and replica.
|
- If you are using a replica, the table must have an identical schema between the master and replica.
|
||||||
@ -20,8 +18,6 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
|
|||||||
- Switching your `binlog_format` to `ROW`, in the case where it is _not_ `ROW` and you explicitly specified `--switch-to-rbr`
|
- Switching your `binlog_format` to `ROW`, in the case where it is _not_ `ROW` and you explicitly specified `--switch-to-rbr`
|
||||||
- If your replication is already in RBR (`binlog_format=ROW`) you can specify `--assume-rbr` to avoid the `STOP SLAVE/START SLAVE` operations, hence no need for `SUPER`.
|
- If your replication is already in RBR (`binlog_format=ROW`) you can specify `--assume-rbr` to avoid the `STOP SLAVE/START SLAVE` operations, hence no need for `SUPER`.
|
||||||
|
|
||||||
- `gh-ost` uses the `REPEATABLE_READ` transaction isolation level for all MySQL connections, regardless of the server default.
|
|
||||||
|
|
||||||
- Running `--test-on-replica`: before the cut-over phase, `gh-ost` stops replication so that you can compare the two tables and satisfy that the migration is sound.
|
- Running `--test-on-replica`: before the cut-over phase, `gh-ost` stops replication so that you can compare the two tables and satisfy that the migration is sound.
|
||||||
|
|
||||||
### Limitations
|
### Limitations
|
||||||
@ -45,7 +41,6 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
|
|||||||
- Amazon RDS works, but has its own [limitations](rds.md).
|
- Amazon RDS works, but has its own [limitations](rds.md).
|
||||||
- Google Cloud SQL works, `--gcp` flag required.
|
- Google Cloud SQL works, `--gcp` flag required.
|
||||||
- Aliyun RDS works, `--aliyun-rds` flag required.
|
- Aliyun RDS works, `--aliyun-rds` flag required.
|
||||||
- Azure Database for MySQL works, `--azure` flag required, and have detailed document about it. (azure.md)
|
|
||||||
|
|
||||||
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)
|
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)
|
||||||
|
|
||||||
@ -55,5 +50,4 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
|
|||||||
|
|
||||||
- Migrating a `FEDERATED` table is unsupported and is irrelevant to the problem `gh-ost` tackles.
|
- Migrating a `FEDERATED` table is unsupported and is irrelevant to the problem `gh-ost` tackles.
|
||||||
|
|
||||||
- [Encrypted binary logs](https://www.percona.com/blog/2018/03/08/binlog-encryption-percona-server-mysql/) are not supported.
|
|
||||||
- `ALTER TABLE ... RENAME TO some_other_name` is not supported (and you shouldn't use `gh-ost` for such a trivial operation).
|
- `ALTER TABLE ... RENAME TO some_other_name` is not supported (and you shouldn't use `gh-ost` for such a trivial operation).
|
||||||
|
@ -29,7 +29,7 @@ CREATE TABLE tbl (
|
|||||||
|
|
||||||
(This is also the definition of the _ghost_ table, except that that table would be called `_tbl_gho`).
|
(This is also the definition of the _ghost_ table, except that that table would be called `_tbl_gho`).
|
||||||
|
|
||||||
In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tbl` onto `_tbl_gho`.
|
In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tble` onto `_tbl_gho`.
|
||||||
|
|
||||||
The applying of the binlog events is what requires the shared unique key. For example, an `UPDATE` statement to `tbl` translates to a `REPLACE` statement which `gh-ost` applies to `_tbl_gho`. A `REPLACE` statement expects to insert or replace an existing row based on its row's values and the table's unique key constraints. In particular, if inserting that row would result in a unique key violation (e.g., a row with that primary key already exists), it would _replace_ that existing row with the new values.
|
The applying of the binlog events is what requires the shared unique key. For example, an `UPDATE` statement to `tbl` translates to a `REPLACE` statement which `gh-ost` applies to `_tbl_gho`. A `REPLACE` statement expects to insert or replace an existing row based on its row's values and the table's unique key constraints. In particular, if inserting that row would result in a unique key violation (e.g., a row with that primary key already exists), it would _replace_ that existing row with the new values.
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
|
|||||||
|
|
||||||
`--max-load='Threads_running=100,Threads_connected=500'`
|
`--max-load='Threads_running=100,Threads_connected=500'`
|
||||||
|
|
||||||
Metrics must be valid, numeric [status variables](https://dev.mysql.com/doc/refman/5.7/en/server-status-variables.html)
|
Metrics must be valid, numeric [status variables](http://dev.mysql.com/doc/refman/5.6/en/server-status-variables.html)
|
||||||
|
|
||||||
#### Throttle query
|
#### Throttle query
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ Copy: 0/2915 0.0%; Applied: 0; Backlog: 0/100; Elapsed: 42s(copy), 42s(total); s
|
|||||||
|
|
||||||
Throttling time is limited by the availability of the binary logs. When throttling begins, `gh-ost` suspends reading the binary logs, and expects to resume reading from same binary log where it paused.
|
Throttling time is limited by the availability of the binary logs. When throttling begins, `gh-ost` suspends reading the binary logs, and expects to resume reading from same binary log where it paused.
|
||||||
|
|
||||||
Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
|
Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
|
||||||
|
|
||||||
Having said that, throttling for so long is far fetching, in that the `gh-ost` process itself must be kept alive during that time; and the amount of binary logs to process once it resumes will potentially take days to replay.
|
Having said that, throttling for so long is far fetching, in that the `gh-ost` process itself must be kept alive during that time; and the amount of binary logs to process once it resumes will potentially take days to replay.
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ It is also interesting to observe that `gh-ost` is the only application writing
|
|||||||
|
|
||||||
When `gh-ost` pauses (throttles), it issues no writes on the ghost table. Because there are no triggers, write workload is decoupled from the `gh-ost` write workload. And because we're using an asynchronous approach, the algorithm already handles a time difference between a master write time and the ghost apply time. A difference of a few microseconds is no different from a difference of minutes or hours.
|
When `gh-ost` pauses (throttles), it issues no writes on the ghost table. Because there are no triggers, write workload is decoupled from the `gh-ost` write workload. And because we're using an asynchronous approach, the algorithm already handles a time difference between a master write time and the ghost apply time. A difference of a few microseconds is no different from a difference of minutes or hours.
|
||||||
|
|
||||||
When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or an explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
|
When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or and explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
|
||||||
An exception is the ongoing heartbeat writes onto the changelog table, which we consider to be negligible.
|
An exception is the ongoing heartbeat writes onto the changelog table, which we consider to be negligible.
|
||||||
|
|
||||||
#### Testability
|
#### Testability
|
||||||
|
@ -7,7 +7,7 @@ Existing MySQL schema migration tools:
|
|||||||
- [LHM](https://github.com/soundcloud/lhm)
|
- [LHM](https://github.com/soundcloud/lhm)
|
||||||
- [oak-online-alter-table](https://github.com/shlomi-noach/openarkkit)
|
- [oak-online-alter-table](https://github.com/shlomi-noach/openarkkit)
|
||||||
|
|
||||||
are all using [triggers](https://dev.mysql.com/doc/refman/5.7/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
|
are all using [triggers](http://dev.mysql.com/doc/refman/5.6/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
|
||||||
|
|
||||||
Use of triggers simplifies a lot of the flow in doing a live table migration, but also poses some limitations or difficulties. Here are reasons why we choose to [design a triggerless solution](triggerless-design.md) to schema migrations.
|
Use of triggers simplifies a lot of the flow in doing a live table migration, but also poses some limitations or difficulties. Here are reasons why we choose to [design a triggerless solution](triggerless-design.md) to schema migrations.
|
||||||
|
|
||||||
|
27
go.mod
27
go.mod
@ -1,27 +0,0 @@
|
|||||||
module github.com/github/gh-ost
|
|
||||||
|
|
||||||
go 1.17
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/go-ini/ini v1.62.0
|
|
||||||
github.com/go-mysql-org/go-mysql v1.3.0
|
|
||||||
github.com/go-sql-driver/mysql v1.6.0
|
|
||||||
github.com/openark/golib v0.0.0-20210531070646-355f37940af8
|
|
||||||
github.com/satori/go.uuid v1.2.0
|
|
||||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
|
||||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
|
|
||||||
golang.org/x/text v0.3.6
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
|
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
|
|
||||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
|
|
||||||
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 // indirect
|
|
||||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
|
|
||||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
|
||||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
|
||||||
)
|
|
136
go.sum
136
go.sum
@ -1,136 +0,0 @@
|
|||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
|
|
||||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
|
|
||||||
github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM=
|
|
||||||
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
|
|
||||||
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
|
|
||||||
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/go-ini/ini v1.62.0 h1:7VJT/ZXjzqSrvtraFp4ONq80hTcRQth1c9ZnQ3uNQvU=
|
|
||||||
github.com/go-ini/ini v1.62.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
|
||||||
github.com/go-mysql-org/go-mysql v1.3.0 h1:lpNqkwdPzIrYSZGdqt8HIgAXZaK6VxBNfr8f7Z4FgGg=
|
|
||||||
github.com/go-mysql-org/go-mysql v1.3.0/go.mod h1:3lFZKf7l95Qo70+3XB2WpiSf9wu2s3na3geLMaIIrqQ=
|
|
||||||
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
|
||||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
|
||||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
|
||||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
|
||||||
github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
|
|
||||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
|
||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
|
||||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|
||||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
|
||||||
github.com/openark/golib v0.0.0-20210531070646-355f37940af8 h1:9ciIHNuyFqRWi9NpMNw9sVLB6z1ItpP5ZhTY9Q1xVu4=
|
|
||||||
github.com/openark/golib v0.0.0-20210531070646-355f37940af8/go.mod h1:1jj8x1eDVZxgc/Z4VyamX4qTbAdHPUQA6NeVtCd8Sl8=
|
|
||||||
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
|
|
||||||
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
|
|
||||||
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
|
||||||
github.com/pingcap/errors v0.11.5-0.20201029093017-5a7df2af2ac7/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
|
|
||||||
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 h1:LllgC9eGfqzkfubMgjKIDyZYaa609nNWAyNZtpy2B3M=
|
|
||||||
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
|
|
||||||
github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
|
|
||||||
github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
|
|
||||||
github.com/pingcap/parser v0.0.0-20210415081931-48e7f467fd74/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw=
|
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
|
||||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
|
||||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
|
|
||||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
|
|
||||||
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 h1:oI+RNwuC9jF2g2lP0u0cVEEZrc/AYBCuFdvwrLWM/6Q=
|
|
||||||
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07/go.mod h1:yFdBgwXP24JziuRl2NMUahT7nGLNOKi1SIiFxMttVD4=
|
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
|
||||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
|
||||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|
||||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|
||||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
|
||||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
|
||||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
|
||||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|
||||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
|
||||||
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
|
||||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
|
||||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
|
|
||||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
|
|
||||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|
||||||
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
|
|
||||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -15,13 +15,13 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
uuid "github.com/satori/go.uuid"
|
"github.com/satori/go.uuid"
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
"github.com/openark/golib/log"
|
|
||||||
|
|
||||||
"github.com/go-ini/ini"
|
"gopkg.in/gcfg.v1"
|
||||||
|
gcfgscanner "gopkg.in/gcfg.v1/scanner"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RowsEstimateMethod is the type of row number estimation
|
// RowsEstimateMethod is the type of row number estimation
|
||||||
@ -51,7 +51,6 @@ const (
|
|||||||
const (
|
const (
|
||||||
HTTPStatusOK = 200
|
HTTPStatusOK = 200
|
||||||
MaxEventsBatchSize = 1000
|
MaxEventsBatchSize = 1000
|
||||||
ETAUnknown = math.MinInt64
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -80,10 +79,7 @@ type MigrationContext struct {
|
|||||||
DatabaseName string
|
DatabaseName string
|
||||||
OriginalTableName string
|
OriginalTableName string
|
||||||
AlterStatement string
|
AlterStatement string
|
||||||
AlterStatementOptions string // anything following the 'ALTER TABLE [schema.]table' from AlterStatement
|
|
||||||
|
|
||||||
countMutex sync.Mutex
|
|
||||||
countTableRowsCancelFunc func()
|
|
||||||
CountTableRows bool
|
CountTableRows bool
|
||||||
ConcurrentCountTableRows bool
|
ConcurrentCountTableRows bool
|
||||||
AllowedRunningOnMaster bool
|
AllowedRunningOnMaster bool
|
||||||
@ -92,7 +88,6 @@ type MigrationContext struct {
|
|||||||
AssumeRBR bool
|
AssumeRBR bool
|
||||||
SkipForeignKeyChecks bool
|
SkipForeignKeyChecks bool
|
||||||
SkipStrictMode bool
|
SkipStrictMode bool
|
||||||
AllowZeroInDate bool
|
|
||||||
NullableUniqueKeyAllowed bool
|
NullableUniqueKeyAllowed bool
|
||||||
ApproveRenamedColumns bool
|
ApproveRenamedColumns bool
|
||||||
SkipRenamedColumns bool
|
SkipRenamedColumns bool
|
||||||
@ -100,8 +95,6 @@ type MigrationContext struct {
|
|||||||
DiscardForeignKeys bool
|
DiscardForeignKeys bool
|
||||||
AliyunRDS bool
|
AliyunRDS bool
|
||||||
GoogleCloudPlatform bool
|
GoogleCloudPlatform bool
|
||||||
AzureMySQL bool
|
|
||||||
AttemptInstantDDL bool
|
|
||||||
|
|
||||||
config ContextConfig
|
config ContextConfig
|
||||||
configMutex *sync.Mutex
|
configMutex *sync.Mutex
|
||||||
@ -126,7 +119,6 @@ type MigrationContext struct {
|
|||||||
ThrottleAdditionalFlagFile string
|
ThrottleAdditionalFlagFile string
|
||||||
throttleQuery string
|
throttleQuery string
|
||||||
throttleHTTP string
|
throttleHTTP string
|
||||||
IgnoreHTTPErrors bool
|
|
||||||
ThrottleCommandedByUser int64
|
ThrottleCommandedByUser int64
|
||||||
HibernateUntil int64
|
HibernateUntil int64
|
||||||
maxLoad LoadMap
|
maxLoad LoadMap
|
||||||
@ -144,7 +136,6 @@ type MigrationContext struct {
|
|||||||
HooksHintMessage string
|
HooksHintMessage string
|
||||||
HooksHintOwner string
|
HooksHintOwner string
|
||||||
HooksHintToken string
|
HooksHintToken string
|
||||||
HooksStatusIntervalSec int64
|
|
||||||
|
|
||||||
DropServeSocket bool
|
DropServeSocket bool
|
||||||
ServeSocketFile string
|
ServeSocketFile string
|
||||||
@ -183,14 +174,9 @@ type MigrationContext struct {
|
|||||||
RenameTablesEndTime time.Time
|
RenameTablesEndTime time.Time
|
||||||
pointOfInterestTime time.Time
|
pointOfInterestTime time.Time
|
||||||
pointOfInterestTimeMutex *sync.Mutex
|
pointOfInterestTimeMutex *sync.Mutex
|
||||||
lastHeartbeatOnChangelogTime time.Time
|
|
||||||
lastHeartbeatOnChangelogMutex *sync.Mutex
|
|
||||||
CurrentLag int64
|
CurrentLag int64
|
||||||
currentProgress uint64
|
currentProgress uint64
|
||||||
etaNanoseonds int64
|
|
||||||
ThrottleHTTPIntervalMillis int64
|
|
||||||
ThrottleHTTPStatusCode int64
|
ThrottleHTTPStatusCode int64
|
||||||
ThrottleHTTPTimeoutMillis int64
|
|
||||||
controlReplicasLagResult mysql.ReplicationLagResult
|
controlReplicasLagResult mysql.ReplicationLagResult
|
||||||
TotalRowsCopied int64
|
TotalRowsCopied int64
|
||||||
TotalDMLEventsApplied int64
|
TotalDMLEventsApplied int64
|
||||||
@ -214,7 +200,6 @@ type MigrationContext struct {
|
|||||||
OriginalTableColumns *sql.ColumnList
|
OriginalTableColumns *sql.ColumnList
|
||||||
OriginalTableVirtualColumns *sql.ColumnList
|
OriginalTableVirtualColumns *sql.ColumnList
|
||||||
OriginalTableUniqueKeys [](*sql.UniqueKey)
|
OriginalTableUniqueKeys [](*sql.UniqueKey)
|
||||||
OriginalTableAutoIncrement uint64
|
|
||||||
GhostTableColumns *sql.ColumnList
|
GhostTableColumns *sql.ColumnList
|
||||||
GhostTableVirtualColumns *sql.ColumnList
|
GhostTableVirtualColumns *sql.ColumnList
|
||||||
GhostTableUniqueKeys [](*sql.UniqueKey)
|
GhostTableUniqueKeys [](*sql.UniqueKey)
|
||||||
@ -231,27 +216,6 @@ type MigrationContext struct {
|
|||||||
ForceTmpTableName string
|
ForceTmpTableName string
|
||||||
|
|
||||||
recentBinlogCoordinates mysql.BinlogCoordinates
|
recentBinlogCoordinates mysql.BinlogCoordinates
|
||||||
|
|
||||||
BinlogSyncerMaxReconnectAttempts int
|
|
||||||
|
|
||||||
Log Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
type Logger interface {
|
|
||||||
Debug(args ...interface{})
|
|
||||||
Debugf(format string, args ...interface{})
|
|
||||||
Info(args ...interface{})
|
|
||||||
Infof(format string, args ...interface{})
|
|
||||||
Warning(args ...interface{}) error
|
|
||||||
Warningf(format string, args ...interface{}) error
|
|
||||||
Error(args ...interface{}) error
|
|
||||||
Errorf(format string, args ...interface{}) error
|
|
||||||
Errore(err error) error
|
|
||||||
Fatal(args ...interface{}) error
|
|
||||||
Fatalf(format string, args ...interface{}) error
|
|
||||||
Fatale(err error) error
|
|
||||||
SetLevel(level log.LogLevel)
|
|
||||||
SetPrintStackTrace(printStackTraceFlag bool)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ContextConfig struct {
|
type ContextConfig struct {
|
||||||
@ -277,7 +241,6 @@ func NewMigrationContext() *MigrationContext {
|
|||||||
MaxLagMillisecondsThrottleThreshold: 1500,
|
MaxLagMillisecondsThrottleThreshold: 1500,
|
||||||
CutOverLockTimeoutSeconds: 3,
|
CutOverLockTimeoutSeconds: 3,
|
||||||
DMLBatchSize: 10,
|
DMLBatchSize: 10,
|
||||||
etaNanoseonds: ETAUnknown,
|
|
||||||
maxLoad: NewLoadMap(),
|
maxLoad: NewLoadMap(),
|
||||||
criticalLoad: NewLoadMap(),
|
criticalLoad: NewLoadMap(),
|
||||||
throttleMutex: &sync.Mutex{},
|
throttleMutex: &sync.Mutex{},
|
||||||
@ -285,26 +248,11 @@ func NewMigrationContext() *MigrationContext {
|
|||||||
throttleControlReplicaKeys: mysql.NewInstanceKeyMap(),
|
throttleControlReplicaKeys: mysql.NewInstanceKeyMap(),
|
||||||
configMutex: &sync.Mutex{},
|
configMutex: &sync.Mutex{},
|
||||||
pointOfInterestTimeMutex: &sync.Mutex{},
|
pointOfInterestTimeMutex: &sync.Mutex{},
|
||||||
lastHeartbeatOnChangelogMutex: &sync.Mutex{},
|
|
||||||
ColumnRenameMap: make(map[string]string),
|
ColumnRenameMap: make(map[string]string),
|
||||||
PanicAbort: make(chan error),
|
PanicAbort: make(chan error),
|
||||||
Log: NewDefaultLogger(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *MigrationContext) SetConnectionConfig(storageEngine string) error {
|
|
||||||
var transactionIsolation string
|
|
||||||
switch storageEngine {
|
|
||||||
case "rocksdb":
|
|
||||||
transactionIsolation = "READ-COMMITTED"
|
|
||||||
default:
|
|
||||||
transactionIsolation = "REPEATABLE-READ"
|
|
||||||
}
|
|
||||||
this.InspectorConnectionConfig.TransactionIsolation = transactionIsolation
|
|
||||||
this.ApplierConnectionConfig.TransactionIsolation = transactionIsolation
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSafeTableName(baseName string, suffix string) string {
|
func getSafeTableName(baseName string, suffix string) string {
|
||||||
name := fmt.Sprintf("_%s_%s", baseName, suffix)
|
name := fmt.Sprintf("_%s_%s", baseName, suffix)
|
||||||
if len(name) <= mysql.MaxTableNameLength {
|
if len(name) <= mysql.MaxTableNameLength {
|
||||||
@ -443,44 +391,10 @@ func (this *MigrationContext) IsTransactionalTable() bool {
|
|||||||
{
|
{
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
case "rocksdb":
|
|
||||||
{
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetCountTableRowsCancelFunc sets the cancel function for the CountTableRows query context
|
|
||||||
func (this *MigrationContext) SetCountTableRowsCancelFunc(f func()) {
|
|
||||||
this.countMutex.Lock()
|
|
||||||
defer this.countMutex.Unlock()
|
|
||||||
|
|
||||||
this.countTableRowsCancelFunc = f
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsCountingTableRows returns true if the migration has a table count query running
|
|
||||||
func (this *MigrationContext) IsCountingTableRows() bool {
|
|
||||||
this.countMutex.Lock()
|
|
||||||
defer this.countMutex.Unlock()
|
|
||||||
|
|
||||||
return this.countTableRowsCancelFunc != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CancelTableRowsCount cancels the CountTableRows query context. It is safe to
|
|
||||||
// call function even when IsCountingTableRows is false.
|
|
||||||
func (this *MigrationContext) CancelTableRowsCount() {
|
|
||||||
this.countMutex.Lock()
|
|
||||||
defer this.countMutex.Unlock()
|
|
||||||
|
|
||||||
if this.countTableRowsCancelFunc == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
this.countTableRowsCancelFunc()
|
|
||||||
this.countTableRowsCancelFunc = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ElapsedTime returns time since very beginning of the process
|
// ElapsedTime returns time since very beginning of the process
|
||||||
func (this *MigrationContext) ElapsedTime() time.Duration {
|
func (this *MigrationContext) ElapsedTime() time.Duration {
|
||||||
return time.Since(this.StartTime)
|
return time.Since(this.StartTime)
|
||||||
@ -516,10 +430,6 @@ func (this *MigrationContext) MarkRowCopyEndTime() {
|
|||||||
this.RowCopyEndTime = time.Now()
|
this.RowCopyEndTime = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *MigrationContext) TimeSinceLastHeartbeatOnChangelog() time.Duration {
|
|
||||||
return time.Since(this.GetLastHeartbeatOnChangelogTime())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
|
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
|
||||||
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
|
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
|
||||||
}
|
}
|
||||||
@ -532,22 +442,6 @@ func (this *MigrationContext) SetProgressPct(progressPct float64) {
|
|||||||
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
|
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *MigrationContext) GetETADuration() time.Duration {
|
|
||||||
return time.Duration(atomic.LoadInt64(&this.etaNanoseonds))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *MigrationContext) SetETADuration(etaDuration time.Duration) {
|
|
||||||
atomic.StoreInt64(&this.etaNanoseonds, etaDuration.Nanoseconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *MigrationContext) GetETASeconds() int64 {
|
|
||||||
nano := atomic.LoadInt64(&this.etaNanoseonds)
|
|
||||||
if nano < 0 {
|
|
||||||
return ETAUnknown
|
|
||||||
}
|
|
||||||
return nano / int64(time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
// math.Float64bits([f=0..100])
|
// math.Float64bits([f=0..100])
|
||||||
|
|
||||||
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
|
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
|
||||||
@ -575,20 +469,6 @@ func (this *MigrationContext) TimeSincePointOfInterest() time.Duration {
|
|||||||
return time.Since(this.pointOfInterestTime)
|
return time.Since(this.pointOfInterestTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *MigrationContext) SetLastHeartbeatOnChangelogTime(t time.Time) {
|
|
||||||
this.lastHeartbeatOnChangelogMutex.Lock()
|
|
||||||
defer this.lastHeartbeatOnChangelogMutex.Unlock()
|
|
||||||
|
|
||||||
this.lastHeartbeatOnChangelogTime = t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *MigrationContext) GetLastHeartbeatOnChangelogTime() time.Time {
|
|
||||||
this.lastHeartbeatOnChangelogMutex.Lock()
|
|
||||||
defer this.lastHeartbeatOnChangelogMutex.Unlock()
|
|
||||||
|
|
||||||
return this.lastHeartbeatOnChangelogTime
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *MigrationContext) SetHeartbeatIntervalMilliseconds(heartbeatIntervalMilliseconds int64) {
|
func (this *MigrationContext) SetHeartbeatIntervalMilliseconds(heartbeatIntervalMilliseconds int64) {
|
||||||
if heartbeatIntervalMilliseconds < 100 {
|
if heartbeatIntervalMilliseconds < 100 {
|
||||||
heartbeatIntervalMilliseconds = 100
|
heartbeatIntervalMilliseconds = 100
|
||||||
@ -607,8 +487,8 @@ func (this *MigrationContext) SetMaxLagMillisecondsThrottleThreshold(maxLagMilli
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (this *MigrationContext) SetChunkSize(chunkSize int64) {
|
func (this *MigrationContext) SetChunkSize(chunkSize int64) {
|
||||||
if chunkSize < 10 {
|
if chunkSize < 100 {
|
||||||
chunkSize = 10
|
chunkSize = 100
|
||||||
}
|
}
|
||||||
if chunkSize > 100000 {
|
if chunkSize > 100000 {
|
||||||
chunkSize = 100000
|
chunkSize = 100000
|
||||||
@ -694,13 +574,6 @@ func (this *MigrationContext) SetThrottleHTTP(throttleHTTP string) {
|
|||||||
this.throttleHTTP = throttleHTTP
|
this.throttleHTTP = throttleHTTP
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *MigrationContext) SetIgnoreHTTPErrors(ignoreHTTPErrors bool) {
|
|
||||||
this.throttleHTTPMutex.Lock()
|
|
||||||
defer this.throttleHTTPMutex.Unlock()
|
|
||||||
|
|
||||||
this.IgnoreHTTPErrors = ignoreHTTPErrors
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *MigrationContext) GetMaxLoad() LoadMap {
|
func (this *MigrationContext) GetMaxLoad() LoadMap {
|
||||||
this.throttleMutex.Lock()
|
this.throttleMutex.Lock()
|
||||||
defer this.throttleMutex.Unlock()
|
defer this.throttleMutex.Unlock()
|
||||||
@ -862,39 +735,10 @@ func (this *MigrationContext) ReadConfigFile() error {
|
|||||||
if this.ConfigFile == "" {
|
if this.ConfigFile == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cfg, err := ini.Load(this.ConfigFile)
|
gcfg.RelaxedParserMode = true
|
||||||
if err != nil {
|
gcfgscanner.RelaxedScannerMode = true
|
||||||
return err
|
if err := gcfg.ReadFileInto(&this.config, this.ConfigFile); err != nil {
|
||||||
}
|
return fmt.Errorf("Error reading config file %s. Details: %s", this.ConfigFile, err.Error())
|
||||||
|
|
||||||
if cfg.Section("client").HasKey("user") {
|
|
||||||
this.config.Client.User = cfg.Section("client").Key("user").String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Section("client").HasKey("password") {
|
|
||||||
this.config.Client.Password = cfg.Section("client").Key("password").String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Section("osc").HasKey("chunk_size") {
|
|
||||||
this.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable to read osc chunk size: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Section("osc").HasKey("max_load") {
|
|
||||||
this.config.Osc.Max_Load = cfg.Section("osc").Key("max_load").String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Section("osc").HasKey("replication_lag_query") {
|
|
||||||
this.config.Osc.Replication_Lag_Query = cfg.Section("osc").Key("replication_lag_query").String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Section("osc").HasKey("max_lag_millis") {
|
|
||||||
this.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable to read max lag millis: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We accept user & password in the form "${SOME_ENV_VARIABLE}" in which case we pull
|
// We accept user & password in the form "${SOME_ENV_VARIABLE}" in which case we pull
|
||||||
@ -908,3 +752,20 @@ func (this *MigrationContext) ReadConfigFile() error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *MigrationContext) PanicAbortIfTableError(err error) {
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if strings.Contains(err.Error(), mysql.Error1146TableDoesntExist) || strings.Contains(err.Error(), mysql.Error1017CantFindFile) {
|
||||||
|
this.PanicAbortOnError(err)
|
||||||
|
}
|
||||||
|
// otherwise irrelevant error and we do not panic
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *MigrationContext) PanicAbortOnError(err error) {
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
this.PanicAbort <- err
|
||||||
|
}
|
||||||
|
@ -1,18 +1,16 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package base
|
package base
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
test "github.com/openark/golib/tests"
|
test "github.com/outbrain/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -58,65 +56,3 @@ func TestGetTableNames(t *testing.T) {
|
|||||||
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_tmp_ghc")
|
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_tmp_ghc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadConfigFile(t *testing.T) {
|
|
||||||
{
|
|
||||||
context := NewMigrationContext()
|
|
||||||
context.ConfigFile = "/does/not/exist"
|
|
||||||
if err := context.ReadConfigFile(); err == nil {
|
|
||||||
t.Fatal("Expected .ReadConfigFile() to return an error, got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
f, err := ioutil.TempFile("", t.Name())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create tmp file: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(f.Name())
|
|
||||||
|
|
||||||
f.Write([]byte("[client]"))
|
|
||||||
context := NewMigrationContext()
|
|
||||||
context.ConfigFile = f.Name()
|
|
||||||
if err := context.ReadConfigFile(); err != nil {
|
|
||||||
t.Fatalf(".ReadConfigFile() failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
f, err := ioutil.TempFile("", t.Name())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create tmp file: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(f.Name())
|
|
||||||
|
|
||||||
f.Write([]byte("[client]\nuser=test\npassword=123456"))
|
|
||||||
context := NewMigrationContext()
|
|
||||||
context.ConfigFile = f.Name()
|
|
||||||
if err := context.ReadConfigFile(); err != nil {
|
|
||||||
t.Fatalf(".ReadConfigFile() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if context.config.Client.User != "test" {
|
|
||||||
t.Fatalf("Expected client user %q, got %q", "test", context.config.Client.User)
|
|
||||||
} else if context.config.Client.Password != "123456" {
|
|
||||||
t.Fatalf("Expected client password %q, got %q", "123456", context.config.Client.Password)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
f, err := ioutil.TempFile("", t.Name())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create tmp file: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(f.Name())
|
|
||||||
|
|
||||||
f.Write([]byte("[osc]\nmax_load=10"))
|
|
||||||
context := NewMigrationContext()
|
|
||||||
context.ConfigFile = f.Name()
|
|
||||||
if err := context.ReadConfigFile(); err != nil {
|
|
||||||
t.Fatalf(".ReadConfigFile() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if context.config.Osc.Max_Load != "10" {
|
|
||||||
t.Fatalf("Expected osc 'max_load' %q, got %q", "10", context.config.Osc.Max_Load)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1,72 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2022 GitHub Inc.
|
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
|
||||||
*/
|
|
||||||
|
|
||||||
package base
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/openark/golib/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
type simpleLogger struct{}
|
|
||||||
|
|
||||||
func NewDefaultLogger() *simpleLogger {
|
|
||||||
return &simpleLogger{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Debug(args ...interface{}) {
|
|
||||||
log.Debug(args[0].(string), args[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Debugf(format string, args ...interface{}) {
|
|
||||||
log.Debugf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Info(args ...interface{}) {
|
|
||||||
log.Info(args[0].(string), args[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Infof(format string, args ...interface{}) {
|
|
||||||
log.Infof(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Warning(args ...interface{}) error {
|
|
||||||
return log.Warning(args[0].(string), args[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Warningf(format string, args ...interface{}) error {
|
|
||||||
return log.Warningf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Error(args ...interface{}) error {
|
|
||||||
return log.Error(args[0].(string), args[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Errorf(format string, args ...interface{}) error {
|
|
||||||
return log.Errorf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Errore(err error) error {
|
|
||||||
return log.Errore(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Fatal(args ...interface{}) error {
|
|
||||||
return log.Fatal(args[0].(string), args[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Fatalf(format string, args ...interface{}) error {
|
|
||||||
return log.Fatalf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) Fatale(err error) error {
|
|
||||||
return log.Fatale(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) SetLevel(level log.LogLevel) {
|
|
||||||
log.SetLevel(level)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*simpleLogger) SetPrintStackTrace(printStackTraceFlag bool) {
|
|
||||||
log.SetPrintStackTrace(printStackTraceFlag)
|
|
||||||
}
|
|
@ -8,8 +8,8 @@ package base
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
test "github.com/openark/golib/tests"
|
test "github.com/outbrain/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -13,8 +13,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
gosql "database/sql"
|
gosql "database/sql"
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
|
"github.com/outbrain/golib/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -25,7 +25,9 @@ func PrettifyDurationOutput(d time.Duration) string {
|
|||||||
if d < time.Second {
|
if d < time.Second {
|
||||||
return "0s"
|
return "0s"
|
||||||
}
|
}
|
||||||
return prettifyDurationRegexp.ReplaceAllString(d.String(), "")
|
result := fmt.Sprintf("%s", d)
|
||||||
|
result = prettifyDurationRegexp.ReplaceAllString(result, "")
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func FileExists(fileName string) bool {
|
func FileExists(fileName string) bool {
|
||||||
@ -61,7 +63,7 @@ func StringContainsAll(s string, substrings ...string) bool {
|
|||||||
return nonEmptyStringsFound
|
return nonEmptyStringsFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext, name string) (string, error) {
|
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext) (string, error) {
|
||||||
versionQuery := `select @@global.version`
|
versionQuery := `select @@global.version`
|
||||||
var port, extraPort int
|
var port, extraPort int
|
||||||
var version string
|
var version string
|
||||||
@ -69,13 +71,12 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
extraPortQuery := `select @@global.extra_port`
|
extraPortQuery := `select @@global.extra_port`
|
||||||
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil { // nolint:staticcheck
|
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil {
|
||||||
// swallow this error. not all servers support extra_port
|
// swallow this error. not all servers support extra_port
|
||||||
}
|
}
|
||||||
// AliyunRDS set users port to "NULL", replace it by gh-ost param
|
// AliyunRDS set users port to "NULL", replace it by gh-ost param
|
||||||
// GCP set users port to "NULL", replace it by gh-ost param
|
// GCP set users port to "NULL", replace it by gh-ost param
|
||||||
// Azure MySQL set users port to a different value by design, replace it by gh-ost para
|
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform {
|
||||||
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform || migrationContext.AzureMySQL {
|
|
||||||
port = connectionConfig.Key.Port
|
port = connectionConfig.Key.Port
|
||||||
} else {
|
} else {
|
||||||
portQuery := `select @@global.port`
|
portQuery := `select @@global.port`
|
||||||
@ -85,7 +86,7 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
|
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
|
||||||
migrationContext.Log.Infof("%s connection validated on %+v", name, connectionConfig.Key)
|
log.Infof("connection validated on %+v", connectionConfig.Key)
|
||||||
return version, nil
|
return version, nil
|
||||||
} else if extraPort == 0 {
|
} else if extraPort == 0 {
|
||||||
return "", fmt.Errorf("Unexpected database port reported: %+v", port)
|
return "", fmt.Errorf("Unexpected database port reported: %+v", port)
|
||||||
|
@ -8,8 +8,8 @@ package base
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
test "github.com/openark/golib/tests"
|
test "github.com/outbrain/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -13,13 +13,13 @@ import (
|
|||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
|
|
||||||
gomysql "github.com/go-mysql-org/go-mysql/mysql"
|
"github.com/outbrain/golib/log"
|
||||||
"github.com/go-mysql-org/go-mysql/replication"
|
gomysql "github.com/siddontang/go-mysql/mysql"
|
||||||
|
"github.com/siddontang/go-mysql/replication"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
type GoMySQLReader struct {
|
type GoMySQLReader struct {
|
||||||
migrationContext *base.MigrationContext
|
|
||||||
connectionConfig *mysql.ConnectionConfig
|
connectionConfig *mysql.ConnectionConfig
|
||||||
binlogSyncer *replication.BinlogSyncer
|
binlogSyncer *replication.BinlogSyncer
|
||||||
binlogStreamer *replication.BinlogStreamer
|
binlogStreamer *replication.BinlogStreamer
|
||||||
@ -28,40 +28,42 @@ type GoMySQLReader struct {
|
|||||||
LastAppliedRowsEventHint mysql.BinlogCoordinates
|
LastAppliedRowsEventHint mysql.BinlogCoordinates
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGoMySQLReader(migrationContext *base.MigrationContext) *GoMySQLReader {
|
func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *GoMySQLReader, err error) {
|
||||||
connectionConfig := migrationContext.InspectorConnectionConfig
|
binlogReader = &GoMySQLReader{
|
||||||
return &GoMySQLReader{
|
connectionConfig: migrationContext.InspectorConnectionConfig,
|
||||||
migrationContext: migrationContext,
|
|
||||||
connectionConfig: connectionConfig,
|
|
||||||
currentCoordinates: mysql.BinlogCoordinates{},
|
currentCoordinates: mysql.BinlogCoordinates{},
|
||||||
currentCoordinatesMutex: &sync.Mutex{},
|
currentCoordinatesMutex: &sync.Mutex{},
|
||||||
binlogSyncer: replication.NewBinlogSyncer(replication.BinlogSyncerConfig{
|
binlogSyncer: nil,
|
||||||
ServerID: uint32(migrationContext.ReplicaServerId),
|
binlogStreamer: nil,
|
||||||
Flavor: gomysql.MySQLFlavor,
|
|
||||||
Host: connectionConfig.Key.Hostname,
|
|
||||||
Port: uint16(connectionConfig.Key.Port),
|
|
||||||
User: connectionConfig.User,
|
|
||||||
Password: connectionConfig.Password,
|
|
||||||
TLSConfig: connectionConfig.TLSConfig(),
|
|
||||||
UseDecimal: true,
|
|
||||||
MaxReconnectAttempts: migrationContext.BinlogSyncerMaxReconnectAttempts,
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
serverId := uint32(migrationContext.ReplicaServerId)
|
||||||
|
|
||||||
|
binlogSyncerConfig := replication.BinlogSyncerConfig{
|
||||||
|
ServerID: serverId,
|
||||||
|
Flavor: "mysql",
|
||||||
|
Host: binlogReader.connectionConfig.Key.Hostname,
|
||||||
|
Port: uint16(binlogReader.connectionConfig.Key.Port),
|
||||||
|
User: binlogReader.connectionConfig.User,
|
||||||
|
Password: binlogReader.connectionConfig.Password,
|
||||||
|
TLSConfig: binlogReader.connectionConfig.TLSConfig(),
|
||||||
|
UseDecimal: true,
|
||||||
|
}
|
||||||
|
binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
|
||||||
|
|
||||||
|
return binlogReader, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConnectBinlogStreamer
|
// ConnectBinlogStreamer
|
||||||
func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) {
|
func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) {
|
||||||
if coordinates.IsEmpty() {
|
if coordinates.IsEmpty() {
|
||||||
return this.migrationContext.Log.Errorf("Empty coordinates at ConnectBinlogStreamer()")
|
return log.Errorf("Empty coordinates at ConnectBinlogStreamer()")
|
||||||
}
|
}
|
||||||
|
|
||||||
this.currentCoordinates = coordinates
|
this.currentCoordinates = coordinates
|
||||||
this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
|
log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
|
||||||
// Start sync with specified binlog file and position
|
// Start sync with specified binlog file and position
|
||||||
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{
|
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
|
||||||
Name: this.currentCoordinates.LogFile,
|
|
||||||
Pos: uint32(this.currentCoordinates.LogPos),
|
|
||||||
})
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -76,7 +78,7 @@ func (this *GoMySQLReader) GetCurrentBinlogCoordinates() *mysql.BinlogCoordinate
|
|||||||
// StreamEvents
|
// StreamEvents
|
||||||
func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *replication.RowsEvent, entriesChannel chan<- *BinlogEntry) error {
|
func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *replication.RowsEvent, entriesChannel chan<- *BinlogEntry) error {
|
||||||
if this.currentCoordinates.SmallerThanOrEquals(&this.LastAppliedRowsEventHint) {
|
if this.currentCoordinates.SmallerThanOrEquals(&this.LastAppliedRowsEventHint) {
|
||||||
this.migrationContext.Log.Debugf("Skipping handled query at %+v", this.currentCoordinates)
|
log.Debugf("Skipping handled query at %+v", this.currentCoordinates)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,22 +141,20 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
|
|||||||
defer this.currentCoordinatesMutex.Unlock()
|
defer this.currentCoordinatesMutex.Unlock()
|
||||||
this.currentCoordinates.LogPos = int64(ev.Header.LogPos)
|
this.currentCoordinates.LogPos = int64(ev.Header.LogPos)
|
||||||
}()
|
}()
|
||||||
|
if rotateEvent, ok := ev.Event.(*replication.RotateEvent); ok {
|
||||||
switch binlogEvent := ev.Event.(type) {
|
|
||||||
case *replication.RotateEvent:
|
|
||||||
func() {
|
func() {
|
||||||
this.currentCoordinatesMutex.Lock()
|
this.currentCoordinatesMutex.Lock()
|
||||||
defer this.currentCoordinatesMutex.Unlock()
|
defer this.currentCoordinatesMutex.Unlock()
|
||||||
this.currentCoordinates.LogFile = string(binlogEvent.NextLogName)
|
this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
|
||||||
}()
|
}()
|
||||||
this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), binlogEvent.NextLogName)
|
log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), rotateEvent.NextLogName)
|
||||||
case *replication.RowsEvent:
|
} else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
|
||||||
if err := this.handleRowsEvent(ev, binlogEvent, entriesChannel); err != nil {
|
if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Debugf("done streaming events")
|
log.Debugf("done streaming events")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -8,18 +8,16 @@ package main
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/base"
|
"github.com/github/gh-ost/go/base"
|
||||||
"github.com/github/gh-ost/go/logic"
|
"github.com/github/gh-ost/go/logic"
|
||||||
"github.com/github/gh-ost/go/sql"
|
|
||||||
_ "github.com/go-sql-driver/mysql"
|
_ "github.com/go-sql-driver/mysql"
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
|
|
||||||
"golang.org/x/term"
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
)
|
)
|
||||||
|
|
||||||
var AppVersion string
|
var AppVersion string
|
||||||
@ -33,7 +31,7 @@ func acceptSignals(migrationContext *base.MigrationContext) {
|
|||||||
for sig := range c {
|
for sig := range c {
|
||||||
switch sig {
|
switch sig {
|
||||||
case syscall.SIGHUP:
|
case syscall.SIGHUP:
|
||||||
migrationContext.Log.Infof("Received SIGHUP. Reloading configuration")
|
log.Infof("Received SIGHUP. Reloading configuration")
|
||||||
if err := migrationContext.ReadConfigFile(); err != nil {
|
if err := migrationContext.ReadConfigFile(); err != nil {
|
||||||
log.Errore(err)
|
log.Errore(err)
|
||||||
} else {
|
} else {
|
||||||
@ -50,7 +48,6 @@ func main() {
|
|||||||
flag.StringVar(&migrationContext.InspectorConnectionConfig.Key.Hostname, "host", "127.0.0.1", "MySQL hostname (preferably a replica, not the master)")
|
flag.StringVar(&migrationContext.InspectorConnectionConfig.Key.Hostname, "host", "127.0.0.1", "MySQL hostname (preferably a replica, not the master)")
|
||||||
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unable to determine the master")
|
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unable to determine the master")
|
||||||
flag.IntVar(&migrationContext.InspectorConnectionConfig.Key.Port, "port", 3306, "MySQL port (preferably a replica, not the master)")
|
flag.IntVar(&migrationContext.InspectorConnectionConfig.Key.Port, "port", 3306, "MySQL port (preferably a replica, not the master)")
|
||||||
flag.Float64Var(&migrationContext.InspectorConnectionConfig.Timeout, "mysql-timeout", 0.0, "Connect, read and write timeout for MySQL")
|
|
||||||
flag.StringVar(&migrationContext.CliUser, "user", "", "MySQL user")
|
flag.StringVar(&migrationContext.CliUser, "user", "", "MySQL user")
|
||||||
flag.StringVar(&migrationContext.CliPassword, "password", "", "MySQL password")
|
flag.StringVar(&migrationContext.CliPassword, "password", "", "MySQL password")
|
||||||
flag.StringVar(&migrationContext.CliMasterUser, "master-user", "", "MySQL user on master, if different from that on replica. Requires --assume-master-host")
|
flag.StringVar(&migrationContext.CliMasterUser, "master-user", "", "MySQL user on master, if different from that on replica. Requires --assume-master-host")
|
||||||
@ -67,9 +64,6 @@ func main() {
|
|||||||
flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)")
|
flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)")
|
||||||
flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)")
|
flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)")
|
||||||
flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)")
|
flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)")
|
||||||
flag.BoolVar(&migrationContext.AttemptInstantDDL, "attempt-instant-ddl", false, "Attempt to use instant DDL for this migration first")
|
|
||||||
storageEngine := flag.String("storage-engine", "innodb", "Specify table storage engine (default: 'innodb'). When 'rocksdb': the session transaction isolation level is changed from REPEATABLE_READ to READ_COMMITTED.")
|
|
||||||
|
|
||||||
flag.BoolVar(&migrationContext.CountTableRows, "exact-rowcount", false, "actually count table rows as opposed to estimate them (results in more accurate progress estimation)")
|
flag.BoolVar(&migrationContext.CountTableRows, "exact-rowcount", false, "actually count table rows as opposed to estimate them (results in more accurate progress estimation)")
|
||||||
flag.BoolVar(&migrationContext.ConcurrentCountTableRows, "concurrent-rowcount", true, "(with --exact-rowcount), when true (default): count rows after row-copy begins, concurrently, and adjust row estimate later on; when false: first count rows, then start row copy")
|
flag.BoolVar(&migrationContext.ConcurrentCountTableRows, "concurrent-rowcount", true, "(with --exact-rowcount), when true (default): count rows after row-copy begins, concurrently, and adjust row estimate later on; when false: first count rows, then start row copy")
|
||||||
flag.BoolVar(&migrationContext.AllowedRunningOnMaster, "allow-on-master", false, "allow this migration to run directly on master. Preferably it would run on a replica")
|
flag.BoolVar(&migrationContext.AllowedRunningOnMaster, "allow-on-master", false, "allow this migration to run directly on master. Preferably it would run on a replica")
|
||||||
@ -81,10 +75,8 @@ func main() {
|
|||||||
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
|
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
|
||||||
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
|
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
|
||||||
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
|
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
|
||||||
flag.BoolVar(&migrationContext.AllowZeroInDate, "allow-zero-in-date", false, "explicitly tell gh-ost binlog applier to ignore NO_ZERO_IN_DATE,NO_ZERO_DATE in sql_mode")
|
|
||||||
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
|
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
|
||||||
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
|
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
|
||||||
flag.BoolVar(&migrationContext.AzureMySQL, "azure", false, "set to 'true' when you execute on Azure Database on MySQL.")
|
|
||||||
|
|
||||||
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
|
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
|
||||||
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
|
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
|
||||||
@ -103,7 +95,7 @@ func main() {
|
|||||||
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
|
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
|
||||||
flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').")
|
flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').")
|
||||||
exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.")
|
exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.")
|
||||||
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 10-100,000)")
|
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
|
||||||
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
|
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
|
||||||
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
|
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
|
||||||
cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)")
|
cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)")
|
||||||
@ -114,9 +106,6 @@ func main() {
|
|||||||
throttleControlReplicas := flag.String("throttle-control-replicas", "", "List of replicas on which to check for lag; comma delimited. Example: myhost1.com:3306,myhost2.com,myhost3.com:3307")
|
throttleControlReplicas := flag.String("throttle-control-replicas", "", "List of replicas on which to check for lag; comma delimited. Example: myhost1.com:3306,myhost2.com,myhost3.com:3307")
|
||||||
throttleQuery := flag.String("throttle-query", "", "when given, issued (every second) to check if operation should throttle. Expecting to return zero for no-throttle, >0 for throttle. Query is issued on the migrated server. Make sure this query is lightweight")
|
throttleQuery := flag.String("throttle-query", "", "when given, issued (every second) to check if operation should throttle. Expecting to return zero for no-throttle, >0 for throttle. Query is issued on the migrated server. Make sure this query is lightweight")
|
||||||
throttleHTTP := flag.String("throttle-http", "", "when given, gh-ost checks given URL via HEAD request; any response code other than 200 (OK) causes throttling; make sure it has low latency response")
|
throttleHTTP := flag.String("throttle-http", "", "when given, gh-ost checks given URL via HEAD request; any response code other than 200 (OK) causes throttling; make sure it has low latency response")
|
||||||
flag.Int64Var(&migrationContext.ThrottleHTTPIntervalMillis, "throttle-http-interval-millis", 100, "Number of milliseconds to wait before triggering another HTTP throttle check")
|
|
||||||
flag.Int64Var(&migrationContext.ThrottleHTTPTimeoutMillis, "throttle-http-timeout-millis", 1000, "Number of milliseconds to use as an HTTP throttle check timeout")
|
|
||||||
ignoreHTTPErrors := flag.Bool("ignore-http-errors", false, "ignore HTTP connection errors during throttle check")
|
|
||||||
heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 100, "how frequently would gh-ost inject a heartbeat value")
|
heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 100, "how frequently would gh-ost inject a heartbeat value")
|
||||||
flag.StringVar(&migrationContext.ThrottleFlagFile, "throttle-flag-file", "", "operation pauses when this file exists; hint: use a file that is specific to the table being altered")
|
flag.StringVar(&migrationContext.ThrottleFlagFile, "throttle-flag-file", "", "operation pauses when this file exists; hint: use a file that is specific to the table being altered")
|
||||||
flag.StringVar(&migrationContext.ThrottleAdditionalFlagFile, "throttle-additional-flag-file", "/tmp/gh-ost.throttle", "operation pauses when this file exists; hint: keep default, use for throttling multiple gh-ost operations")
|
flag.StringVar(&migrationContext.ThrottleAdditionalFlagFile, "throttle-additional-flag-file", "/tmp/gh-ost.throttle", "operation pauses when this file exists; hint: keep default, use for throttling multiple gh-ost operations")
|
||||||
@ -131,15 +120,13 @@ func main() {
|
|||||||
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
|
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
|
||||||
flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience")
|
flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience")
|
||||||
flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience")
|
flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience")
|
||||||
flag.Int64Var(&migrationContext.HooksStatusIntervalSec, "hooks-status-interval", 60, "how many seconds to wait between calling onStatus hook")
|
|
||||||
|
|
||||||
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
|
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
|
||||||
flag.IntVar(&migrationContext.BinlogSyncerMaxReconnectAttempts, "binlogsyncer-max-reconnect-attempts", 0, "when master node fails, the maximum number of binlog synchronization attempts to reconnect. 0 is unlimited")
|
|
||||||
|
|
||||||
maxLoad := flag.String("max-load", "", "Comma delimited status-name=threshold. e.g: 'Threads_running=100,Threads_connected=500'. When status exceeds threshold, app throttles writes")
|
maxLoad := flag.String("max-load", "", "Comma delimited status-name=threshold. e.g: 'Threads_running=100,Threads_connected=500'. When status exceeds threshold, app throttles writes")
|
||||||
criticalLoad := flag.String("critical-load", "", "Comma delimited status-name=threshold, same format as --max-load. When status exceeds threshold, app panics and quits")
|
criticalLoad := flag.String("critical-load", "", "Comma delimited status-name=threshold, same format as --max-load. When status exceeds threshold, app panics and quits")
|
||||||
flag.Int64Var(&migrationContext.CriticalLoadIntervalMilliseconds, "critical-load-interval-millis", 0, "When 0, migration immediately bails out upon meeting critical-load. When non-zero, a second check is done after given interval, and migration only bails out if 2nd check still meets critical load")
|
flag.Int64Var(&migrationContext.CriticalLoadIntervalMilliseconds, "critical-load-interval-millis", 0, "When 0, migration immediately bails out upon meeting critical-load. When non-zero, a second check is done after given interval, and migration only bails out if 2nd check still meets critical load")
|
||||||
flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When non-zero, critical-load does not panic and bail out; instead, gh-ost goes into hibernation for the specified duration. It will not read/write anything from/to any server")
|
flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When nonzero, critical-load does not panic and bail out; instead, gh-ost goes into hibernate for the specified duration. It will not read/write anything to from/to any server")
|
||||||
quiet := flag.Bool("quiet", false, "quiet")
|
quiet := flag.Bool("quiet", false, "quiet")
|
||||||
verbose := flag.Bool("verbose", false, "verbose")
|
verbose := flag.Bool("verbose", false, "verbose")
|
||||||
debug := flag.Bool("debug", false, "debug mode (very verbose)")
|
debug := flag.Bool("debug", false, "debug mode (very verbose)")
|
||||||
@ -169,92 +156,69 @@ func main() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
migrationContext.Log.SetLevel(log.ERROR)
|
log.SetLevel(log.ERROR)
|
||||||
if *verbose {
|
if *verbose {
|
||||||
migrationContext.Log.SetLevel(log.INFO)
|
log.SetLevel(log.INFO)
|
||||||
}
|
}
|
||||||
if *debug {
|
if *debug {
|
||||||
migrationContext.Log.SetLevel(log.DEBUG)
|
log.SetLevel(log.DEBUG)
|
||||||
}
|
}
|
||||||
if *stack {
|
if *stack {
|
||||||
migrationContext.Log.SetPrintStackTrace(*stack)
|
log.SetPrintStackTrace(*stack)
|
||||||
}
|
}
|
||||||
if *quiet {
|
if *quiet {
|
||||||
// Override!!
|
// Override!!
|
||||||
migrationContext.Log.SetLevel(log.ERROR)
|
log.SetLevel(log.ERROR)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := migrationContext.SetConnectionConfig(*storageEngine); err != nil {
|
|
||||||
migrationContext.Log.Fatale(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if migrationContext.AlterStatement == "" {
|
|
||||||
log.Fatal("--alter must be provided and statement must not be empty")
|
|
||||||
}
|
|
||||||
parser := sql.NewParserFromAlterStatement(migrationContext.AlterStatement)
|
|
||||||
migrationContext.AlterStatementOptions = parser.GetAlterStatementOptions()
|
|
||||||
|
|
||||||
if migrationContext.DatabaseName == "" {
|
if migrationContext.DatabaseName == "" {
|
||||||
if parser.HasExplicitSchema() {
|
log.Fatalf("--database must be provided and database name must not be empty")
|
||||||
migrationContext.DatabaseName = parser.GetExplicitSchema()
|
|
||||||
} else {
|
|
||||||
log.Fatal("--database must be provided and database name must not be empty, or --alter must specify database name")
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if err := flag.Set("database", url.QueryEscape(migrationContext.DatabaseName)); err != nil {
|
|
||||||
migrationContext.Log.Fatale(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if migrationContext.OriginalTableName == "" {
|
if migrationContext.OriginalTableName == "" {
|
||||||
if parser.HasExplicitTable() {
|
log.Fatalf("--table must be provided and table name must not be empty")
|
||||||
migrationContext.OriginalTableName = parser.GetExplicitTable()
|
|
||||||
} else {
|
|
||||||
log.Fatal("--table must be provided and table name must not be empty, or --alter must specify table name")
|
|
||||||
}
|
}
|
||||||
|
if migrationContext.AlterStatement == "" {
|
||||||
|
log.Fatalf("--alter must be provided and statement must not be empty")
|
||||||
}
|
}
|
||||||
migrationContext.Noop = !(*executeFlag)
|
migrationContext.Noop = !(*executeFlag)
|
||||||
if migrationContext.AllowedRunningOnMaster && migrationContext.TestOnReplica {
|
if migrationContext.AllowedRunningOnMaster && migrationContext.TestOnReplica {
|
||||||
migrationContext.Log.Fatal("--allow-on-master and --test-on-replica are mutually exclusive")
|
log.Fatalf("--allow-on-master and --test-on-replica are mutually exclusive")
|
||||||
}
|
}
|
||||||
if migrationContext.AllowedRunningOnMaster && migrationContext.MigrateOnReplica {
|
if migrationContext.AllowedRunningOnMaster && migrationContext.MigrateOnReplica {
|
||||||
migrationContext.Log.Fatal("--allow-on-master and --migrate-on-replica are mutually exclusive")
|
log.Fatalf("--allow-on-master and --migrate-on-replica are mutually exclusive")
|
||||||
}
|
}
|
||||||
if migrationContext.MigrateOnReplica && migrationContext.TestOnReplica {
|
if migrationContext.MigrateOnReplica && migrationContext.TestOnReplica {
|
||||||
migrationContext.Log.Fatal("--migrate-on-replica and --test-on-replica are mutually exclusive")
|
log.Fatalf("--migrate-on-replica and --test-on-replica are mutually exclusive")
|
||||||
}
|
}
|
||||||
if migrationContext.SwitchToRowBinlogFormat && migrationContext.AssumeRBR {
|
if migrationContext.SwitchToRowBinlogFormat && migrationContext.AssumeRBR {
|
||||||
migrationContext.Log.Fatal("--switch-to-rbr and --assume-rbr are mutually exclusive")
|
log.Fatalf("--switch-to-rbr and --assume-rbr are mutually exclusive")
|
||||||
}
|
}
|
||||||
if migrationContext.TestOnReplicaSkipReplicaStop {
|
if migrationContext.TestOnReplicaSkipReplicaStop {
|
||||||
if !migrationContext.TestOnReplica {
|
if !migrationContext.TestOnReplica {
|
||||||
migrationContext.Log.Fatal("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
|
log.Fatalf("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
|
||||||
}
|
}
|
||||||
migrationContext.Log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
|
log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
|
||||||
}
|
}
|
||||||
if migrationContext.CliMasterUser != "" && migrationContext.AssumeMasterHostname == "" {
|
if migrationContext.CliMasterUser != "" && migrationContext.AssumeMasterHostname == "" {
|
||||||
migrationContext.Log.Fatal("--master-user requires --assume-master-host")
|
log.Fatalf("--master-user requires --assume-master-host")
|
||||||
}
|
}
|
||||||
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
|
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
|
||||||
migrationContext.Log.Fatal("--master-password requires --assume-master-host")
|
log.Fatalf("--master-password requires --assume-master-host")
|
||||||
}
|
}
|
||||||
if migrationContext.TLSCACertificate != "" && !migrationContext.UseTLS {
|
if migrationContext.TLSCACertificate != "" && !migrationContext.UseTLS {
|
||||||
migrationContext.Log.Fatal("--ssl-ca requires --ssl")
|
log.Fatalf("--ssl-ca requires --ssl")
|
||||||
}
|
}
|
||||||
if migrationContext.TLSCertificate != "" && !migrationContext.UseTLS {
|
if migrationContext.TLSCertificate != "" && !migrationContext.UseTLS {
|
||||||
migrationContext.Log.Fatal("--ssl-cert requires --ssl")
|
log.Fatalf("--ssl-cert requires --ssl")
|
||||||
}
|
}
|
||||||
if migrationContext.TLSKey != "" && !migrationContext.UseTLS {
|
if migrationContext.TLSKey != "" && !migrationContext.UseTLS {
|
||||||
migrationContext.Log.Fatal("--ssl-key requires --ssl")
|
log.Fatalf("--ssl-key requires --ssl")
|
||||||
}
|
}
|
||||||
if migrationContext.TLSAllowInsecure && !migrationContext.UseTLS {
|
if migrationContext.TLSAllowInsecure && !migrationContext.UseTLS {
|
||||||
migrationContext.Log.Fatal("--ssl-allow-insecure requires --ssl")
|
log.Fatalf("--ssl-allow-insecure requires --ssl")
|
||||||
}
|
}
|
||||||
if *replicationLagQuery != "" {
|
if *replicationLagQuery != "" {
|
||||||
migrationContext.Log.Warning("--replication-lag-query is deprecated")
|
log.Warningf("--replication-lag-query is deprecated")
|
||||||
}
|
|
||||||
if *storageEngine == "rocksdb" {
|
|
||||||
migrationContext.Log.Warning("RocksDB storage engine support is experimental")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch *cutOver {
|
switch *cutOver {
|
||||||
@ -263,28 +227,28 @@ func main() {
|
|||||||
case "two-step":
|
case "two-step":
|
||||||
migrationContext.CutOverType = base.CutOverTwoStep
|
migrationContext.CutOverType = base.CutOverTwoStep
|
||||||
default:
|
default:
|
||||||
migrationContext.Log.Fatalf("Unknown cut-over: %s", *cutOver)
|
log.Fatalf("Unknown cut-over: %s", *cutOver)
|
||||||
}
|
}
|
||||||
if err := migrationContext.ReadConfigFile(); err != nil {
|
if err := migrationContext.ReadConfigFile(); err != nil {
|
||||||
migrationContext.Log.Fatale(err)
|
log.Fatale(err)
|
||||||
}
|
}
|
||||||
if err := migrationContext.ReadThrottleControlReplicaKeys(*throttleControlReplicas); err != nil {
|
if err := migrationContext.ReadThrottleControlReplicaKeys(*throttleControlReplicas); err != nil {
|
||||||
migrationContext.Log.Fatale(err)
|
log.Fatale(err)
|
||||||
}
|
}
|
||||||
if err := migrationContext.ReadMaxLoad(*maxLoad); err != nil {
|
if err := migrationContext.ReadMaxLoad(*maxLoad); err != nil {
|
||||||
migrationContext.Log.Fatale(err)
|
log.Fatale(err)
|
||||||
}
|
}
|
||||||
if err := migrationContext.ReadCriticalLoad(*criticalLoad); err != nil {
|
if err := migrationContext.ReadCriticalLoad(*criticalLoad); err != nil {
|
||||||
migrationContext.Log.Fatale(err)
|
log.Fatale(err)
|
||||||
}
|
}
|
||||||
if migrationContext.ServeSocketFile == "" {
|
if migrationContext.ServeSocketFile == "" {
|
||||||
migrationContext.ServeSocketFile = fmt.Sprintf("/tmp/gh-ost.%s.%s.sock", migrationContext.DatabaseName, migrationContext.OriginalTableName)
|
migrationContext.ServeSocketFile = fmt.Sprintf("/tmp/gh-ost.%s.%s.sock", migrationContext.DatabaseName, migrationContext.OriginalTableName)
|
||||||
}
|
}
|
||||||
if *askPass {
|
if *askPass {
|
||||||
fmt.Println("Password:")
|
fmt.Println("Password:")
|
||||||
bytePassword, err := term.ReadPassword(syscall.Stdin)
|
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
migrationContext.Log.Fatale(err)
|
log.Fatale(err)
|
||||||
}
|
}
|
||||||
migrationContext.CliPassword = string(bytePassword)
|
migrationContext.CliPassword = string(bytePassword)
|
||||||
}
|
}
|
||||||
@ -295,26 +259,26 @@ func main() {
|
|||||||
migrationContext.SetMaxLagMillisecondsThrottleThreshold(*maxLagMillis)
|
migrationContext.SetMaxLagMillisecondsThrottleThreshold(*maxLagMillis)
|
||||||
migrationContext.SetThrottleQuery(*throttleQuery)
|
migrationContext.SetThrottleQuery(*throttleQuery)
|
||||||
migrationContext.SetThrottleHTTP(*throttleHTTP)
|
migrationContext.SetThrottleHTTP(*throttleHTTP)
|
||||||
migrationContext.SetIgnoreHTTPErrors(*ignoreHTTPErrors)
|
|
||||||
migrationContext.SetDefaultNumRetries(*defaultRetries)
|
migrationContext.SetDefaultNumRetries(*defaultRetries)
|
||||||
migrationContext.ApplyCredentials()
|
migrationContext.ApplyCredentials()
|
||||||
if err := migrationContext.SetupTLS(); err != nil {
|
if err := migrationContext.SetupTLS(); err != nil {
|
||||||
migrationContext.Log.Fatale(err)
|
log.Fatale(err)
|
||||||
}
|
}
|
||||||
if err := migrationContext.SetCutOverLockTimeoutSeconds(*cutOverLockTimeoutSeconds); err != nil {
|
if err := migrationContext.SetCutOverLockTimeoutSeconds(*cutOverLockTimeoutSeconds); err != nil {
|
||||||
migrationContext.Log.Errore(err)
|
log.Errore(err)
|
||||||
}
|
}
|
||||||
if err := migrationContext.SetExponentialBackoffMaxInterval(*exponentialBackoffMaxInterval); err != nil {
|
if err := migrationContext.SetExponentialBackoffMaxInterval(*exponentialBackoffMaxInterval); err != nil {
|
||||||
migrationContext.Log.Errore(err)
|
log.Errore(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("starting gh-ost %+v", AppVersion)
|
log.Infof("starting gh-ost %+v", AppVersion)
|
||||||
acceptSignals(migrationContext)
|
acceptSignals(migrationContext)
|
||||||
|
|
||||||
migrator := logic.NewMigrator(migrationContext, AppVersion)
|
migrator := logic.NewMigrator(migrationContext)
|
||||||
if err := migrator.Migrate(); err != nil {
|
err := migrator.Migrate()
|
||||||
|
if err != nil {
|
||||||
migrator.ExecOnFailureHook()
|
migrator.ExecOnFailureHook()
|
||||||
migrationContext.Log.Fatale(err)
|
log.Fatale(err)
|
||||||
}
|
}
|
||||||
fmt.Fprintln(os.Stdout, "# Done")
|
fmt.Fprintf(os.Stdout, "# Done\n")
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -8,7 +8,6 @@ package logic
|
|||||||
import (
|
import (
|
||||||
gosql "database/sql"
|
gosql "database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -17,12 +16,11 @@ import (
|
|||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
"github.com/openark/golib/sqlutils"
|
"github.com/outbrain/golib/sqlutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
GhostChangelogTableComment = "gh-ost changelog"
|
|
||||||
atomicCutOverMagicHint = "ghost-cut-over-sentry"
|
atomicCutOverMagicHint = "ghost-cut-over-sentry"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -48,7 +46,7 @@ func newDmlBuildResultError(err error) *dmlBuildResult {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Applier connects and writes the applier-server, which is the server where migration
|
// Applier connects and writes the the applier-server, which is the server where migration
|
||||||
// happens. This is typically the master, but could be a replica when `--test-on-replica` or
|
// happens. This is typically the master, but could be a replica when `--test-on-replica` or
|
||||||
// `--execute-on-replica` are given.
|
// `--execute-on-replica` are given.
|
||||||
// Applier is the one to actually write row data and apply binlog events onto the ghost table.
|
// Applier is the one to actually write row data and apply binlog events onto the ghost table.
|
||||||
@ -59,7 +57,6 @@ type Applier struct {
|
|||||||
singletonDB *gosql.DB
|
singletonDB *gosql.DB
|
||||||
migrationContext *base.MigrationContext
|
migrationContext *base.MigrationContext
|
||||||
finishedMigrating int64
|
finishedMigrating int64
|
||||||
name string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewApplier(migrationContext *base.MigrationContext) *Applier {
|
func NewApplier(migrationContext *base.MigrationContext) *Applier {
|
||||||
@ -67,11 +64,11 @@ func NewApplier(migrationContext *base.MigrationContext) *Applier {
|
|||||||
connectionConfig: migrationContext.ApplierConnectionConfig,
|
connectionConfig: migrationContext.ApplierConnectionConfig,
|
||||||
migrationContext: migrationContext,
|
migrationContext: migrationContext,
|
||||||
finishedMigrating: 0,
|
finishedMigrating: 0,
|
||||||
name: "applier",
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *Applier) InitDBConnections() (err error) {
|
func (this *Applier) InitDBConnections() (err error) {
|
||||||
|
|
||||||
applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
||||||
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
|
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -81,18 +78,18 @@ func (this *Applier) InitDBConnections() (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.singletonDB.SetMaxOpenConns(1)
|
this.singletonDB.SetMaxOpenConns(1)
|
||||||
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
|
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext, this.name); err != nil {
|
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.ApplierMySQLVersion = version
|
this.migrationContext.ApplierMySQLVersion = version
|
||||||
if err := this.validateAndReadTimeZone(); err != nil {
|
if err := this.validateAndReadTimeZone(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
|
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
|
||||||
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
@ -102,7 +99,7 @@ func (this *Applier) InitDBConnections() (err error) {
|
|||||||
if err := this.readTableColumns(); err != nil {
|
if err := this.readTableColumns(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Applier initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.ApplierMySQLVersion)
|
log.Infof("Applier initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.ApplierMySQLVersion)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,40 +110,13 @@ func (this *Applier) validateAndReadTimeZone() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
this.migrationContext.Log.Infof("will use time_zone='%s' on applier", this.migrationContext.ApplierTimeZone)
|
log.Infof("will use time_zone='%s' on applier", this.migrationContext.ApplierTimeZone)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateSqlModeQuery return a `sql_mode = ...` query, to be wrapped with a `set session` or `set global`,
|
|
||||||
// based on gh-ost configuration:
|
|
||||||
// - User may skip strict mode
|
|
||||||
// - User may allow zero dats or zero in dates
|
|
||||||
func (this *Applier) generateSqlModeQuery() string {
|
|
||||||
sqlModeAddendum := []string{`NO_AUTO_VALUE_ON_ZERO`}
|
|
||||||
if !this.migrationContext.SkipStrictMode {
|
|
||||||
sqlModeAddendum = append(sqlModeAddendum, `STRICT_ALL_TABLES`)
|
|
||||||
}
|
|
||||||
sqlModeQuery := fmt.Sprintf("CONCAT(@@session.sql_mode, ',%s')", strings.Join(sqlModeAddendum, ","))
|
|
||||||
if this.migrationContext.AllowZeroInDate {
|
|
||||||
sqlModeQuery = fmt.Sprintf("REPLACE(REPLACE(%s, 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')", sqlModeQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("sql_mode = %s", sqlModeQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateInstantDDLQuery returns the SQL for this ALTER operation
|
|
||||||
// with an INSTANT assertion (requires MySQL 8.0+)
|
|
||||||
func (this *Applier) generateInstantDDLQuery() string {
|
|
||||||
return fmt.Sprintf(`ALTER /* gh-ost */ TABLE %s.%s %s, ALGORITHM=INSTANT`,
|
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
|
||||||
this.migrationContext.AlterStatementOptions,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// readTableColumns reads table columns on applier
|
// readTableColumns reads table columns on applier
|
||||||
func (this *Applier) readTableColumns() (err error) {
|
func (this *Applier) readTableColumns() (err error) {
|
||||||
this.migrationContext.Log.Infof("Examining table structure on applier")
|
log.Infof("Examining table structure on applier")
|
||||||
this.migrationContext.OriginalTableColumnsOnApplier, _, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName)
|
this.migrationContext.OriginalTableColumnsOnApplier, _, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -187,7 +157,7 @@ func (this *Applier) ValidateOrDropExistingTables() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(this.migrationContext.GetOldTableName()) > mysql.MaxTableNameLength {
|
if len(this.migrationContext.GetOldTableName()) > mysql.MaxTableNameLength {
|
||||||
this.migrationContext.Log.Fatalf("--timestamp-old-table defined, but resulting table name (%s) is too long (only %d characters allowed)", this.migrationContext.GetOldTableName(), mysql.MaxTableNameLength)
|
log.Fatalf("--timestamp-old-table defined, but resulting table name (%s) is too long (only %d characters allowed)", this.migrationContext.GetOldTableName(), mysql.MaxTableNameLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
if this.tableExists(this.migrationContext.GetOldTableName()) {
|
if this.tableExists(this.migrationContext.GetOldTableName()) {
|
||||||
@ -197,27 +167,6 @@ func (this *Applier) ValidateOrDropExistingTables() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AttemptInstantDDL attempts to use instant DDL (from MySQL 8.0, and earlier in Aurora and some others).
|
|
||||||
// If successful, the operation is only a meta-data change so a lot of time is saved!
|
|
||||||
// The risk of attempting to instant DDL when not supported is that a metadata lock may be acquired.
|
|
||||||
// This is minor, since gh-ost will eventually require a metadata lock anyway, but at the cut-over stage.
|
|
||||||
// Instant operations include:
|
|
||||||
// - Adding a column
|
|
||||||
// - Dropping a column
|
|
||||||
// - Dropping an index
|
|
||||||
// - Extending a VARCHAR column
|
|
||||||
// - Adding a virtual generated column
|
|
||||||
// It is not reliable to parse the `alter` statement to determine if it is instant or not.
|
|
||||||
// This is because the table might be in an older row format, or have some other incompatibility
|
|
||||||
// that is difficult to identify.
|
|
||||||
func (this *Applier) AttemptInstantDDL() error {
|
|
||||||
query := this.generateInstantDDLQuery()
|
|
||||||
this.migrationContext.Log.Infof("INSTANT DDL query is: %s", query)
|
|
||||||
// We don't need a trx, because for instant DDL the SQL mode doesn't matter.
|
|
||||||
_, err := this.db.Exec(query)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateGhostTable creates the ghost table on the applier host
|
// CreateGhostTable creates the ghost table on the applier host
|
||||||
func (this *Applier) CreateGhostTable() error {
|
func (this *Applier) CreateGhostTable() error {
|
||||||
query := fmt.Sprintf(`create /* gh-ost */ table %s.%s like %s.%s`,
|
query := fmt.Sprintf(`create /* gh-ost */ table %s.%s like %s.%s`,
|
||||||
@ -226,37 +175,15 @@ func (this *Applier) CreateGhostTable() error {
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Creating ghost table %s.%s",
|
log.Infof("Creating ghost table %s.%s",
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
||||||
)
|
)
|
||||||
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
err := func() error {
|
|
||||||
tx, err := this.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
|
|
||||||
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
|
|
||||||
|
|
||||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := tx.Exec(query); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
this.migrationContext.Log.Infof("Ghost table created")
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
// Neither SET SESSION nor ALTER are really transactional, so strictly speaking
|
|
||||||
// there's no need to commit; but let's do this the legit way anyway.
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
log.Infof("Ghost table created")
|
||||||
return nil
|
return nil
|
||||||
}()
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AlterGhost applies `alter` statement on ghost table
|
// AlterGhost applies `alter` statement on ghost table
|
||||||
@ -264,58 +191,17 @@ func (this *Applier) AlterGhost() error {
|
|||||||
query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s %s`,
|
query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s %s`,
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
||||||
this.migrationContext.AlterStatementOptions,
|
this.migrationContext.AlterStatement,
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Altering ghost table %s.%s",
|
log.Infof("Altering ghost table %s.%s",
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Debugf("ALTER statement: %s", query)
|
log.Debugf("ALTER statement: %s", query)
|
||||||
|
|
||||||
err := func() error {
|
|
||||||
tx, err := this.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
|
|
||||||
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
|
|
||||||
|
|
||||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := tx.Exec(query); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
this.migrationContext.Log.Infof("Ghost table altered")
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
// Neither SET SESSION nor ALTER are really transactional, so strictly speaking
|
|
||||||
// there's no need to commit; but let's do this the legit way anyway.
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// AlterGhost applies `alter` statement on ghost table
|
|
||||||
func (this *Applier) AlterGhostAutoIncrement() error {
|
|
||||||
query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s AUTO_INCREMENT=%d`,
|
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
|
||||||
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
|
||||||
this.migrationContext.OriginalTableAutoIncrement,
|
|
||||||
)
|
|
||||||
this.migrationContext.Log.Infof("Altering ghost table AUTO_INCREMENT value %s.%s",
|
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
|
||||||
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
|
||||||
)
|
|
||||||
this.migrationContext.Log.Debugf("AUTO_INCREMENT ALTER statement: %s", query)
|
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Ghost table AUTO_INCREMENT altered")
|
log.Infof("Ghost table altered")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,25 +211,25 @@ func (this *Applier) CreateChangelogTable() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
query := fmt.Sprintf(`create /* gh-ost */ table %s.%s (
|
query := fmt.Sprintf(`create /* gh-ost */ table %s.%s (
|
||||||
id bigint unsigned auto_increment,
|
id bigint auto_increment,
|
||||||
last_update timestamp not null DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
last_update timestamp not null DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||||
hint varchar(64) charset ascii not null,
|
hint varchar(64) charset ascii not null,
|
||||||
value varchar(4096) charset ascii not null,
|
value varchar(4096) charset ascii not null,
|
||||||
primary key(id),
|
primary key(id),
|
||||||
unique key hint_uidx(hint)
|
unique key hint_uidx(hint)
|
||||||
) auto_increment=256 comment='%s'`,
|
) auto_increment=256
|
||||||
|
`,
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
||||||
GhostChangelogTableComment,
|
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Creating changelog table %s.%s",
|
log.Infof("Creating changelog table %s.%s",
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
||||||
)
|
)
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Changelog table created")
|
log.Infof("Changelog table created")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -353,14 +239,14 @@ func (this *Applier) dropTable(tableName string) error {
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(tableName),
|
sql.EscapeName(tableName),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Dropping table %s.%s",
|
log.Infof("Dropping table %s.%s",
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(tableName),
|
sql.EscapeName(tableName),
|
||||||
)
|
)
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Table dropped")
|
log.Infof("Table dropped")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -404,6 +290,7 @@ func (this *Applier) WriteChangelog(hint, value string) (string, error) {
|
|||||||
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
||||||
)
|
)
|
||||||
_, err := sqlutils.ExecNoPrepare(this.db, query, explicitId, hint, value)
|
_, err := sqlutils.ExecNoPrepare(this.db, query, explicitId, hint, value)
|
||||||
|
this.migrationContext.PanicAbortIfTableError(err)
|
||||||
return hint, err
|
return hint, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -427,7 +314,7 @@ func (this *Applier) InitiateHeartbeat() {
|
|||||||
if _, err := this.WriteChangelog("heartbeat", time.Now().Format(time.RFC3339Nano)); err != nil {
|
if _, err := this.WriteChangelog("heartbeat", time.Now().Format(time.RFC3339Nano)); err != nil {
|
||||||
numSuccessiveFailures++
|
numSuccessiveFailures++
|
||||||
if numSuccessiveFailures > this.migrationContext.MaxRetries() {
|
if numSuccessiveFailures > this.migrationContext.MaxRetries() {
|
||||||
return this.migrationContext.Log.Errore(err)
|
return log.Errore(err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
numSuccessiveFailures = 0
|
numSuccessiveFailures = 0
|
||||||
@ -436,9 +323,8 @@ func (this *Applier) InitiateHeartbeat() {
|
|||||||
}
|
}
|
||||||
injectHeartbeat()
|
injectHeartbeat()
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
heartbeatTick := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||||
defer ticker.Stop()
|
for range heartbeatTick {
|
||||||
for range ticker.C {
|
|
||||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -463,97 +349,62 @@ func (this *Applier) ExecuteThrottleQuery() (int64, error) {
|
|||||||
}
|
}
|
||||||
var result int64
|
var result int64
|
||||||
if err := this.db.QueryRow(throttleQuery).Scan(&result); err != nil {
|
if err := this.db.QueryRow(throttleQuery).Scan(&result); err != nil {
|
||||||
return 0, this.migrationContext.Log.Errore(err)
|
return 0, log.Errore(err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMigrationMinValues returns the minimum values to be iterated on rowcopy
|
// ReadMigrationMinValues returns the minimum values to be iterated on rowcopy
|
||||||
func (this *Applier) readMigrationMinValues(tx *gosql.Tx, uniqueKey *sql.UniqueKey) error {
|
func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
|
||||||
this.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
|
log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
|
||||||
query, err := sql.BuildUniqueKeyMinValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &uniqueKey.Columns)
|
query, err := sql.BuildUniqueKeyMinValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &uniqueKey.Columns)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
rows, err := this.db.Query(query)
|
||||||
rows, err := tx.Query(query)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
this.migrationContext.MigrationRangeMinValues = sql.NewColumnValues(uniqueKey.Len())
|
this.migrationContext.MigrationRangeMinValues = sql.NewColumnValues(uniqueKey.Len())
|
||||||
if err = rows.Scan(this.migrationContext.MigrationRangeMinValues.ValuesPointers...); err != nil {
|
if err = rows.Scan(this.migrationContext.MigrationRangeMinValues.ValuesPointers...); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues)
|
log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues)
|
||||||
|
return err
|
||||||
return rows.Err()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMigrationMaxValues returns the maximum values to be iterated on rowcopy
|
// ReadMigrationMaxValues returns the maximum values to be iterated on rowcopy
|
||||||
func (this *Applier) readMigrationMaxValues(tx *gosql.Tx, uniqueKey *sql.UniqueKey) error {
|
func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
|
||||||
this.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
|
log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
|
||||||
query, err := sql.BuildUniqueKeyMaxValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &uniqueKey.Columns)
|
query, err := sql.BuildUniqueKeyMaxValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &uniqueKey.Columns)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
rows, err := this.db.Query(query)
|
||||||
rows, err := tx.Query(query)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
this.migrationContext.MigrationRangeMaxValues = sql.NewColumnValues(uniqueKey.Len())
|
this.migrationContext.MigrationRangeMaxValues = sql.NewColumnValues(uniqueKey.Len())
|
||||||
if err = rows.Scan(this.migrationContext.MigrationRangeMaxValues.ValuesPointers...); err != nil {
|
if err = rows.Scan(this.migrationContext.MigrationRangeMaxValues.ValuesPointers...); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues)
|
log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues)
|
||||||
|
return err
|
||||||
return rows.Err()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadMigrationRangeValues reads min/max values that will be used for rowcopy.
|
// ReadMigrationRangeValues reads min/max values that will be used for rowcopy
|
||||||
// Before read min/max, write a changelog state into the ghc table to avoid lost data in mysql two-phase commit.
|
|
||||||
/*
|
|
||||||
Detail description of the lost data in mysql two-phase commit issue by @Fanduzi:
|
|
||||||
When using semi-sync and setting rpl_semi_sync_master_wait_point=AFTER_SYNC,
|
|
||||||
if an INSERT statement is being committed but blocks due to an unmet ack count,
|
|
||||||
the data inserted by the transaction is not visible to ReadMigrationRangeValues,
|
|
||||||
so the copy of the existing data in the table does not include the new row inserted by the transaction.
|
|
||||||
However, the binlog event for the transaction is already written to the binlog,
|
|
||||||
so the addDMLEventsListener only captures the binlog event after the transaction,
|
|
||||||
and thus the transaction's binlog event is not captured, resulting in data loss.
|
|
||||||
|
|
||||||
If write a changelog into ghc table before ReadMigrationRangeValues, and the transaction commit blocks
|
|
||||||
because the ack is not met, then the changelog will not be able to write, so the ReadMigrationRangeValues
|
|
||||||
will not be run. When the changelog writes successfully, the ReadMigrationRangeValues will read the
|
|
||||||
newly inserted data, thus Avoiding data loss due to the above problem.
|
|
||||||
*/
|
|
||||||
func (this *Applier) ReadMigrationRangeValues() error {
|
func (this *Applier) ReadMigrationRangeValues() error {
|
||||||
if _, err := this.WriteChangelogState(string(ReadMigrationRangeValues)); err != nil {
|
if err := this.ReadMigrationMinValues(this.migrationContext.UniqueKey); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := this.ReadMigrationMaxValues(this.migrationContext.UniqueKey); err != nil {
|
||||||
tx, err := this.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer tx.Rollback()
|
return nil
|
||||||
|
|
||||||
if err := this.readMigrationMinValues(tx, this.migrationContext.UniqueKey); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := this.readMigrationMaxValues(tx, this.migrationContext.UniqueKey); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tx.Commit()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CalculateNextIterationRangeEndValues reads the next-iteration-range-end unique key values,
|
// CalculateNextIterationRangeEndValues reads the next-iteration-range-end unique key values,
|
||||||
@ -583,13 +434,10 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return hasFurtherRange, err
|
return hasFurtherRange, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err := this.db.Query(query, explodedArgs...)
|
rows, err := this.db.Query(query, explodedArgs...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return hasFurtherRange, err
|
return hasFurtherRange, err
|
||||||
}
|
}
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
iterationRangeMaxValues := sql.NewColumnValues(this.migrationContext.UniqueKey.Len())
|
iterationRangeMaxValues := sql.NewColumnValues(this.migrationContext.UniqueKey.Len())
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
if err = rows.Scan(iterationRangeMaxValues.ValuesPointers...); err != nil {
|
if err = rows.Scan(iterationRangeMaxValues.ValuesPointers...); err != nil {
|
||||||
@ -597,15 +445,12 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo
|
|||||||
}
|
}
|
||||||
hasFurtherRange = true
|
hasFurtherRange = true
|
||||||
}
|
}
|
||||||
if err = rows.Err(); err != nil {
|
|
||||||
return hasFurtherRange, err
|
|
||||||
}
|
|
||||||
if hasFurtherRange {
|
if hasFurtherRange {
|
||||||
this.migrationContext.MigrationIterationRangeMaxValues = iterationRangeMaxValues
|
this.migrationContext.MigrationIterationRangeMaxValues = iterationRangeMaxValues
|
||||||
return hasFurtherRange, nil
|
return hasFurtherRange, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Debugf("Iteration complete: no further range to iterate")
|
log.Debugf("Iteration complete: no further range to iterate")
|
||||||
return hasFurtherRange, nil
|
return hasFurtherRange, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -638,9 +483,12 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
|
|
||||||
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
|
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
|
||||||
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
|
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
|
||||||
|
if !this.migrationContext.SkipStrictMode {
|
||||||
|
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
|
||||||
|
}
|
||||||
|
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
|
||||||
|
|
||||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
if _, err := tx.Exec(sessionQuery); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -660,7 +508,7 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
|
|||||||
}
|
}
|
||||||
rowsAffected, _ = sqlResult.RowsAffected()
|
rowsAffected, _ = sqlResult.RowsAffected()
|
||||||
duration = time.Since(startTime)
|
duration = time.Since(startTime)
|
||||||
this.migrationContext.Log.Debugf(
|
log.Debugf(
|
||||||
"Issued INSERT on range: [%s]..[%s]; iteration: %d; chunk-size: %d",
|
"Issued INSERT on range: [%s]..[%s]; iteration: %d; chunk-size: %d",
|
||||||
this.migrationContext.MigrationIterationRangeMinValues,
|
this.migrationContext.MigrationIterationRangeMinValues,
|
||||||
this.migrationContext.MigrationIterationRangeMaxValues,
|
this.migrationContext.MigrationIterationRangeMaxValues,
|
||||||
@ -675,7 +523,7 @@ func (this *Applier) LockOriginalTable() error {
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Locking %s.%s",
|
log.Infof("Locking %s.%s",
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
)
|
)
|
||||||
@ -683,18 +531,18 @@ func (this *Applier) LockOriginalTable() error {
|
|||||||
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Table locked")
|
log.Infof("Table locked")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnlockTables makes tea. No wait, it unlocks tables.
|
// UnlockTables makes tea. No wait, it unlocks tables.
|
||||||
func (this *Applier) UnlockTables() error {
|
func (this *Applier) UnlockTables() error {
|
||||||
query := `unlock /* gh-ost */ tables`
|
query := `unlock /* gh-ost */ tables`
|
||||||
this.migrationContext.Log.Infof("Unlocking tables")
|
log.Infof("Unlocking tables")
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Tables unlocked")
|
log.Infof("Tables unlocked")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -708,7 +556,7 @@ func (this *Applier) SwapTablesQuickAndBumpy() error {
|
|||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
sql.EscapeName(this.migrationContext.GetOldTableName()),
|
sql.EscapeName(this.migrationContext.GetOldTableName()),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Renaming original table")
|
log.Infof("Renaming original table")
|
||||||
this.migrationContext.RenameTablesStartTime = time.Now()
|
this.migrationContext.RenameTablesStartTime = time.Now()
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -718,13 +566,13 @@ func (this *Applier) SwapTablesQuickAndBumpy() error {
|
|||||||
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Renaming ghost table")
|
log.Infof("Renaming ghost table")
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.RenameTablesEndTime = time.Now()
|
this.migrationContext.RenameTablesEndTime = time.Now()
|
||||||
|
|
||||||
this.migrationContext.Log.Infof("Tables renamed")
|
log.Infof("Tables renamed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -743,7 +591,7 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Renaming back both tables")
|
log.Infof("Renaming back both tables")
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err == nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -754,7 +602,7 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
sql.EscapeName(this.migrationContext.GetGhostTableName()),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Renaming back to ghost table")
|
log.Infof("Renaming back to ghost table")
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
renameError = err
|
renameError = err
|
||||||
}
|
}
|
||||||
@ -764,11 +612,11 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Renaming back to original table")
|
log.Infof("Renaming back to original table")
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
renameError = err
|
renameError = err
|
||||||
}
|
}
|
||||||
return this.migrationContext.Log.Errore(renameError)
|
return log.Errore(renameError)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StopSlaveIOThread is applicable with --test-on-replica; it stops the IO thread, duh.
|
// StopSlaveIOThread is applicable with --test-on-replica; it stops the IO thread, duh.
|
||||||
@ -776,44 +624,44 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
|
|||||||
// and have them written to the binary log, so that we can then read them via streamer.
|
// and have them written to the binary log, so that we can then read them via streamer.
|
||||||
func (this *Applier) StopSlaveIOThread() error {
|
func (this *Applier) StopSlaveIOThread() error {
|
||||||
query := `stop /* gh-ost */ slave io_thread`
|
query := `stop /* gh-ost */ slave io_thread`
|
||||||
this.migrationContext.Log.Infof("Stopping replication IO thread")
|
log.Infof("Stopping replication IO thread")
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Replication IO thread stopped")
|
log.Infof("Replication IO thread stopped")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartSlaveIOThread is applicable with --test-on-replica
|
// StartSlaveIOThread is applicable with --test-on-replica
|
||||||
func (this *Applier) StartSlaveIOThread() error {
|
func (this *Applier) StartSlaveIOThread() error {
|
||||||
query := `start /* gh-ost */ slave io_thread`
|
query := `start /* gh-ost */ slave io_thread`
|
||||||
this.migrationContext.Log.Infof("Starting replication IO thread")
|
log.Infof("Starting replication IO thread")
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Replication IO thread started")
|
log.Infof("Replication IO thread started")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartSlaveSQLThread is applicable with --test-on-replica
|
// StartSlaveSQLThread is applicable with --test-on-replica
|
||||||
func (this *Applier) StopSlaveSQLThread() error {
|
func (this *Applier) StopSlaveSQLThread() error {
|
||||||
query := `stop /* gh-ost */ slave sql_thread`
|
query := `stop /* gh-ost */ slave sql_thread`
|
||||||
this.migrationContext.Log.Infof("Verifying SQL thread is stopped")
|
log.Infof("Verifying SQL thread is stopped")
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("SQL thread stopped")
|
log.Infof("SQL thread stopped")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartSlaveSQLThread is applicable with --test-on-replica
|
// StartSlaveSQLThread is applicable with --test-on-replica
|
||||||
func (this *Applier) StartSlaveSQLThread() error {
|
func (this *Applier) StartSlaveSQLThread() error {
|
||||||
query := `start /* gh-ost */ slave sql_thread`
|
query := `start /* gh-ost */ slave sql_thread`
|
||||||
this.migrationContext.Log.Infof("Verifying SQL thread is running")
|
log.Infof("Verifying SQL thread is running")
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("SQL thread started")
|
log.Infof("SQL thread started")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -830,7 +678,7 @@ func (this *Applier) StopReplication() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Replication IO thread at %+v. SQL thread is at %+v", *readBinlogCoordinates, *executeBinlogCoordinates)
|
log.Infof("Replication IO thread at %+v. SQL thread is at %+v", *readBinlogCoordinates, *executeBinlogCoordinates)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -842,7 +690,7 @@ func (this *Applier) StartReplication() error {
|
|||||||
if err := this.StartSlaveSQLThread(); err != nil {
|
if err := this.StartSlaveSQLThread(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Replication started")
|
log.Infof("Replication started")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -856,7 +704,7 @@ func (this *Applier) ExpectUsedLock(sessionId int64) error {
|
|||||||
var result int64
|
var result int64
|
||||||
query := `select is_used_lock(?)`
|
query := `select is_used_lock(?)`
|
||||||
lockName := this.GetSessionLockName(sessionId)
|
lockName := this.GetSessionLockName(sessionId)
|
||||||
this.migrationContext.Log.Infof("Checking session lock: %s", lockName)
|
log.Infof("Checking session lock: %s", lockName)
|
||||||
if err := this.db.QueryRow(query, lockName).Scan(&result); err != nil || result != sessionId {
|
if err := this.db.QueryRow(query, lockName).Scan(&result); err != nil || result != sessionId {
|
||||||
return fmt.Errorf("Session lock %s expected to be found but wasn't", lockName)
|
return fmt.Errorf("Session lock %s expected to be found but wasn't", lockName)
|
||||||
}
|
}
|
||||||
@ -891,7 +739,7 @@ func (this *Applier) ExpectProcess(sessionId int64, stateHint, infoHint string)
|
|||||||
// DropAtomicCutOverSentryTableIfExists checks if the "old" table name
|
// DropAtomicCutOverSentryTableIfExists checks if the "old" table name
|
||||||
// happens to be a cut-over magic table; if so, it drops it.
|
// happens to be a cut-over magic table; if so, it drops it.
|
||||||
func (this *Applier) DropAtomicCutOverSentryTableIfExists() error {
|
func (this *Applier) DropAtomicCutOverSentryTableIfExists() error {
|
||||||
this.migrationContext.Log.Infof("Looking for magic cut-over table")
|
log.Infof("Looking for magic cut-over table")
|
||||||
tableName := this.migrationContext.GetOldTableName()
|
tableName := this.migrationContext.GetOldTableName()
|
||||||
rowMap := this.showTableStatus(tableName)
|
rowMap := this.showTableStatus(tableName)
|
||||||
if rowMap == nil {
|
if rowMap == nil {
|
||||||
@ -901,7 +749,7 @@ func (this *Applier) DropAtomicCutOverSentryTableIfExists() error {
|
|||||||
if rowMap["Comment"].String != atomicCutOverMagicHint {
|
if rowMap["Comment"].String != atomicCutOverMagicHint {
|
||||||
return fmt.Errorf("Expected magic comment on %s, did not find it", tableName)
|
return fmt.Errorf("Expected magic comment on %s, did not find it", tableName)
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Dropping magic cut-over table")
|
log.Infof("Dropping magic cut-over table")
|
||||||
return this.dropTable(tableName)
|
return this.dropTable(tableName)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -921,14 +769,14 @@ func (this *Applier) CreateAtomicCutOverSentryTable() error {
|
|||||||
this.migrationContext.TableEngine,
|
this.migrationContext.TableEngine,
|
||||||
atomicCutOverMagicHint,
|
atomicCutOverMagicHint,
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Creating magic cut-over table %s.%s",
|
log.Infof("Creating magic cut-over table %s.%s",
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(tableName),
|
sql.EscapeName(tableName),
|
||||||
)
|
)
|
||||||
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Magic cut-over table created")
|
log.Infof("Magic cut-over table created")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -945,7 +793,6 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
|
|||||||
tableLocked <- fmt.Errorf("Unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads")
|
tableLocked <- fmt.Errorf("Unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads")
|
||||||
tableUnlocked <- fmt.Errorf("Unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads")
|
tableUnlocked <- fmt.Errorf("Unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads")
|
||||||
tx.Rollback()
|
tx.Rollback()
|
||||||
this.DropAtomicCutOverSentryTableIfExists()
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var sessionId int64
|
var sessionId int64
|
||||||
@ -958,7 +805,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
|
|||||||
lockResult := 0
|
lockResult := 0
|
||||||
query := `select get_lock(?, 0)`
|
query := `select get_lock(?, 0)`
|
||||||
lockName := this.GetSessionLockName(sessionId)
|
lockName := this.GetSessionLockName(sessionId)
|
||||||
this.migrationContext.Log.Infof("Grabbing voluntary lock: %s", lockName)
|
log.Infof("Grabbing voluntary lock: %s", lockName)
|
||||||
if err := tx.QueryRow(query, lockName).Scan(&lockResult); err != nil || lockResult != 1 {
|
if err := tx.QueryRow(query, lockName).Scan(&lockResult); err != nil || lockResult != 1 {
|
||||||
err := fmt.Errorf("Unable to acquire lock %s", lockName)
|
err := fmt.Errorf("Unable to acquire lock %s", lockName)
|
||||||
tableLocked <- err
|
tableLocked <- err
|
||||||
@ -966,7 +813,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
|
|||||||
}
|
}
|
||||||
|
|
||||||
tableLockTimeoutSeconds := this.migrationContext.CutOverLockTimeoutSeconds * 2
|
tableLockTimeoutSeconds := this.migrationContext.CutOverLockTimeoutSeconds * 2
|
||||||
this.migrationContext.Log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds)
|
log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds)
|
||||||
query = fmt.Sprintf(`set session lock_wait_timeout:=%d`, tableLockTimeoutSeconds)
|
query = fmt.Sprintf(`set session lock_wait_timeout:=%d`, tableLockTimeoutSeconds)
|
||||||
if _, err := tx.Exec(query); err != nil {
|
if _, err := tx.Exec(query); err != nil {
|
||||||
tableLocked <- err
|
tableLocked <- err
|
||||||
@ -984,7 +831,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.GetOldTableName()),
|
sql.EscapeName(this.migrationContext.GetOldTableName()),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Locking %s.%s, %s.%s",
|
log.Infof("Locking %s.%s, %s.%s",
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
@ -995,7 +842,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
|
|||||||
tableLocked <- err
|
tableLocked <- err
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Tables locked")
|
log.Infof("Tables locked")
|
||||||
tableLocked <- nil // No error.
|
tableLocked <- nil // No error.
|
||||||
|
|
||||||
// From this point on, we are committed to UNLOCK TABLES. No matter what happens,
|
// From this point on, we are committed to UNLOCK TABLES. No matter what happens,
|
||||||
@ -1004,23 +851,22 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
|
|||||||
// The cut-over phase will proceed to apply remaining backlog onto ghost table,
|
// The cut-over phase will proceed to apply remaining backlog onto ghost table,
|
||||||
// and issue RENAME. We wait here until told to proceed.
|
// and issue RENAME. We wait here until told to proceed.
|
||||||
<-okToUnlockTable
|
<-okToUnlockTable
|
||||||
this.migrationContext.Log.Infof("Will now proceed to drop magic table and unlock tables")
|
log.Infof("Will now proceed to drop magic table and unlock tables")
|
||||||
|
|
||||||
// The magic table is here because we locked it. And we are the only ones allowed to drop it.
|
// The magic table is here because we locked it. And we are the only ones allowed to drop it.
|
||||||
// And in fact, we will:
|
// And in fact, we will:
|
||||||
this.migrationContext.Log.Infof("Dropping magic cut-over table")
|
log.Infof("Dropping magic cut-over table")
|
||||||
query = fmt.Sprintf(`drop /* gh-ost */ table if exists %s.%s`,
|
query = fmt.Sprintf(`drop /* gh-ost */ table if exists %s.%s`,
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.GetOldTableName()),
|
sql.EscapeName(this.migrationContext.GetOldTableName()),
|
||||||
)
|
)
|
||||||
|
|
||||||
if _, err := tx.Exec(query); err != nil {
|
if _, err := tx.Exec(query); err != nil {
|
||||||
this.migrationContext.Log.Errore(err)
|
log.Errore(err)
|
||||||
// We DO NOT return here because we must `UNLOCK TABLES`!
|
// We DO NOT return here because we must `UNLOCK TABLES`!
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tables still locked
|
// Tables still locked
|
||||||
this.migrationContext.Log.Infof("Releasing lock from %s.%s, %s.%s",
|
log.Infof("Releasing lock from %s.%s, %s.%s",
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
@ -1029,9 +875,9 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
|
|||||||
query = `unlock tables`
|
query = `unlock tables`
|
||||||
if _, err := tx.Exec(query); err != nil {
|
if _, err := tx.Exec(query); err != nil {
|
||||||
tableUnlocked <- err
|
tableUnlocked <- err
|
||||||
return this.migrationContext.Log.Errore(err)
|
return log.Errore(err)
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Tables unlocked")
|
log.Infof("Tables unlocked")
|
||||||
tableUnlocked <- nil
|
tableUnlocked <- nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1053,7 +899,7 @@ func (this *Applier) AtomicCutoverRename(sessionIdChan chan int64, tablesRenamed
|
|||||||
}
|
}
|
||||||
sessionIdChan <- sessionId
|
sessionIdChan <- sessionId
|
||||||
|
|
||||||
this.migrationContext.Log.Infof("Setting RENAME timeout as %d seconds", this.migrationContext.CutOverLockTimeoutSeconds)
|
log.Infof("Setting RENAME timeout as %d seconds", this.migrationContext.CutOverLockTimeoutSeconds)
|
||||||
query := fmt.Sprintf(`set session lock_wait_timeout:=%d`, this.migrationContext.CutOverLockTimeoutSeconds)
|
query := fmt.Sprintf(`set session lock_wait_timeout:=%d`, this.migrationContext.CutOverLockTimeoutSeconds)
|
||||||
if _, err := tx.Exec(query); err != nil {
|
if _, err := tx.Exec(query); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1069,13 +915,15 @@ func (this *Applier) AtomicCutoverRename(sessionIdChan chan int64, tablesRenamed
|
|||||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||||
sql.EscapeName(this.migrationContext.OriginalTableName),
|
sql.EscapeName(this.migrationContext.OriginalTableName),
|
||||||
)
|
)
|
||||||
this.migrationContext.Log.Infof("Issuing and expecting this to block: %s", query)
|
log.Infof("Issuing and expecting this to block: %s", query)
|
||||||
if _, err := tx.Exec(query); err != nil {
|
if _, err := tx.Exec(query); err != nil {
|
||||||
|
this.migrationContext.PanicAbortIfTableError(err)
|
||||||
|
|
||||||
tablesRenamed <- err
|
tablesRenamed <- err
|
||||||
return this.migrationContext.Log.Errore(err)
|
return log.Errore(err)
|
||||||
}
|
}
|
||||||
tablesRenamed <- nil
|
tablesRenamed <- nil
|
||||||
this.migrationContext.Log.Infof("Tables renamed")
|
log.Infof("Tables renamed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1137,6 +985,7 @@ func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (result
|
|||||||
|
|
||||||
// ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table
|
// ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table
|
||||||
func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error {
|
func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error {
|
||||||
|
|
||||||
var totalDelta int64
|
var totalDelta int64
|
||||||
|
|
||||||
err := func() error {
|
err := func() error {
|
||||||
@ -1151,7 +1000,12 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
|
|||||||
}
|
}
|
||||||
|
|
||||||
sessionQuery := "SET SESSION time_zone = '+00:00'"
|
sessionQuery := "SET SESSION time_zone = '+00:00'"
|
||||||
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
|
|
||||||
|
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
|
||||||
|
if !this.migrationContext.SkipStrictMode {
|
||||||
|
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
|
||||||
|
}
|
||||||
|
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
|
||||||
|
|
||||||
if _, err := tx.Exec(sessionQuery); err != nil {
|
if _, err := tx.Exec(sessionQuery); err != nil {
|
||||||
return rollback(err)
|
return rollback(err)
|
||||||
@ -1161,20 +1015,11 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
|
|||||||
if buildResult.err != nil {
|
if buildResult.err != nil {
|
||||||
return rollback(buildResult.err)
|
return rollback(buildResult.err)
|
||||||
}
|
}
|
||||||
result, err := tx.Exec(buildResult.query, buildResult.args...)
|
if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil {
|
||||||
if err != nil {
|
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args)
|
||||||
err = fmt.Errorf("%w; query=%s; args=%+v", err, buildResult.query, buildResult.args)
|
|
||||||
return rollback(err)
|
return rollback(err)
|
||||||
}
|
}
|
||||||
|
totalDelta += buildResult.rowsDelta
|
||||||
rowsAffected, err := result.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
log.Warningf("error getting rows affected from DML event query: %s. i'm going to assume that the DML affected a single row, but this may result in inaccurate statistics", err)
|
|
||||||
rowsAffected = 1
|
|
||||||
}
|
|
||||||
// each DML is either a single insert (delta +1), update (delta +0) or delete (delta -1).
|
|
||||||
// multiplying by the rows actually affected (either 0 or 1) will give an accurate row delta for this DML event
|
|
||||||
totalDelta += buildResult.rowsDelta * rowsAffected
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
@ -1184,19 +1029,19 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return this.migrationContext.Log.Errore(err)
|
return log.Errore(err)
|
||||||
}
|
}
|
||||||
// no error
|
// no error
|
||||||
atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, int64(len(dmlEvents)))
|
atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, int64(len(dmlEvents)))
|
||||||
if this.migrationContext.CountTableRows {
|
if this.migrationContext.CountTableRows {
|
||||||
atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, totalDelta)
|
atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, totalDelta)
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents))
|
log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *Applier) Teardown() {
|
func (this *Applier) Teardown() {
|
||||||
this.migrationContext.Log.Debugf("Tearing down...")
|
log.Debugf("Tearing down...")
|
||||||
this.db.Close()
|
this.db.Close()
|
||||||
this.singletonDB.Close()
|
this.singletonDB.Close()
|
||||||
atomic.StoreInt64(&this.finishedMigrating, 1)
|
atomic.StoreInt64(&this.finishedMigrating, 1)
|
||||||
|
@ -1,185 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2022 GitHub Inc.
|
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
|
||||||
*/
|
|
||||||
|
|
||||||
package logic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
test "github.com/openark/golib/tests"
|
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/base"
|
|
||||||
"github.com/github/gh-ost/go/binlog"
|
|
||||||
"github.com/github/gh-ost/go/sql"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestApplierGenerateSqlModeQuery(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
applier := NewApplier(migrationContext)
|
|
||||||
|
|
||||||
{
|
|
||||||
test.S(t).ExpectEquals(
|
|
||||||
applier.generateSqlModeQuery(),
|
|
||||||
`sql_mode = CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO,STRICT_ALL_TABLES')`,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
migrationContext.SkipStrictMode = true
|
|
||||||
migrationContext.AllowZeroInDate = false
|
|
||||||
test.S(t).ExpectEquals(
|
|
||||||
applier.generateSqlModeQuery(),
|
|
||||||
`sql_mode = CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO')`,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
migrationContext.SkipStrictMode = false
|
|
||||||
migrationContext.AllowZeroInDate = true
|
|
||||||
test.S(t).ExpectEquals(
|
|
||||||
applier.generateSqlModeQuery(),
|
|
||||||
`sql_mode = REPLACE(REPLACE(CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO,STRICT_ALL_TABLES'), 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')`,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
migrationContext.SkipStrictMode = true
|
|
||||||
migrationContext.AllowZeroInDate = true
|
|
||||||
test.S(t).ExpectEquals(
|
|
||||||
applier.generateSqlModeQuery(),
|
|
||||||
`sql_mode = REPLACE(REPLACE(CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO'), 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')`,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplierUpdateModifiesUniqueKeyColumns(t *testing.T) {
|
|
||||||
columns := sql.NewColumnList([]string{"id", "item_id"})
|
|
||||||
columnValues := sql.ToColumnValues([]interface{}{123456, 42})
|
|
||||||
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrationContext.OriginalTableColumns = columns
|
|
||||||
migrationContext.UniqueKey = &sql.UniqueKey{
|
|
||||||
Name: t.Name(),
|
|
||||||
Columns: *columns,
|
|
||||||
}
|
|
||||||
|
|
||||||
applier := NewApplier(migrationContext)
|
|
||||||
|
|
||||||
t.Run("unmodified", func(t *testing.T) {
|
|
||||||
modifiedColumn, isModified := applier.updateModifiesUniqueKeyColumns(&binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.UpdateDML,
|
|
||||||
NewColumnValues: columnValues,
|
|
||||||
WhereColumnValues: columnValues,
|
|
||||||
})
|
|
||||||
test.S(t).ExpectEquals(modifiedColumn, "")
|
|
||||||
test.S(t).ExpectFalse(isModified)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("modified", func(t *testing.T) {
|
|
||||||
modifiedColumn, isModified := applier.updateModifiesUniqueKeyColumns(&binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.UpdateDML,
|
|
||||||
NewColumnValues: sql.ToColumnValues([]interface{}{123456, 24}),
|
|
||||||
WhereColumnValues: columnValues,
|
|
||||||
})
|
|
||||||
test.S(t).ExpectEquals(modifiedColumn, "item_id")
|
|
||||||
test.S(t).ExpectTrue(isModified)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplierBuildDMLEventQuery(t *testing.T) {
|
|
||||||
columns := sql.NewColumnList([]string{"id", "item_id"})
|
|
||||||
columnValues := sql.ToColumnValues([]interface{}{123456, 42})
|
|
||||||
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrationContext.OriginalTableName = "test"
|
|
||||||
migrationContext.OriginalTableColumns = columns
|
|
||||||
migrationContext.SharedColumns = columns
|
|
||||||
migrationContext.MappedSharedColumns = columns
|
|
||||||
migrationContext.UniqueKey = &sql.UniqueKey{
|
|
||||||
Name: t.Name(),
|
|
||||||
Columns: *columns,
|
|
||||||
}
|
|
||||||
|
|
||||||
applier := NewApplier(migrationContext)
|
|
||||||
|
|
||||||
t.Run("delete", func(t *testing.T) {
|
|
||||||
binlogEvent := &binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.DeleteDML,
|
|
||||||
WhereColumnValues: columnValues,
|
|
||||||
}
|
|
||||||
|
|
||||||
res := applier.buildDMLEventQuery(binlogEvent)
|
|
||||||
test.S(t).ExpectEquals(len(res), 1)
|
|
||||||
test.S(t).ExpectNil(res[0].err)
|
|
||||||
test.S(t).ExpectEquals(strings.TrimSpace(res[0].query),
|
|
||||||
`delete /* gh-ost `+"`test`.`_test_gho`"+` */
|
|
||||||
from
|
|
||||||
`+"`test`.`_test_gho`"+`
|
|
||||||
where
|
|
||||||
((`+"`id`"+` = ?) and (`+"`item_id`"+` = ?))`)
|
|
||||||
|
|
||||||
test.S(t).ExpectEquals(len(res[0].args), 2)
|
|
||||||
test.S(t).ExpectEquals(res[0].args[0], 123456)
|
|
||||||
test.S(t).ExpectEquals(res[0].args[1], 42)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("insert", func(t *testing.T) {
|
|
||||||
binlogEvent := &binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.InsertDML,
|
|
||||||
NewColumnValues: columnValues,
|
|
||||||
}
|
|
||||||
res := applier.buildDMLEventQuery(binlogEvent)
|
|
||||||
test.S(t).ExpectEquals(len(res), 1)
|
|
||||||
test.S(t).ExpectNil(res[0].err)
|
|
||||||
test.S(t).ExpectEquals(strings.TrimSpace(res[0].query),
|
|
||||||
`replace /* gh-ost `+"`test`.`_test_gho`"+` */ into
|
|
||||||
`+"`test`.`_test_gho`"+`
|
|
||||||
`+"(`id`, `item_id`)"+`
|
|
||||||
values
|
|
||||||
(?, ?)`)
|
|
||||||
test.S(t).ExpectEquals(len(res[0].args), 2)
|
|
||||||
test.S(t).ExpectEquals(res[0].args[0], 123456)
|
|
||||||
test.S(t).ExpectEquals(res[0].args[1], 42)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("update", func(t *testing.T) {
|
|
||||||
binlogEvent := &binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.UpdateDML,
|
|
||||||
NewColumnValues: columnValues,
|
|
||||||
WhereColumnValues: columnValues,
|
|
||||||
}
|
|
||||||
res := applier.buildDMLEventQuery(binlogEvent)
|
|
||||||
test.S(t).ExpectEquals(len(res), 1)
|
|
||||||
test.S(t).ExpectNil(res[0].err)
|
|
||||||
test.S(t).ExpectEquals(strings.TrimSpace(res[0].query),
|
|
||||||
`update /* gh-ost `+"`test`.`_test_gho`"+` */
|
|
||||||
`+"`test`.`_test_gho`"+`
|
|
||||||
set
|
|
||||||
`+"`id`"+`=?, `+"`item_id`"+`=?
|
|
||||||
where
|
|
||||||
((`+"`id`"+` = ?) and (`+"`item_id`"+` = ?))`)
|
|
||||||
test.S(t).ExpectEquals(len(res[0].args), 4)
|
|
||||||
test.S(t).ExpectEquals(res[0].args[0], 123456)
|
|
||||||
test.S(t).ExpectEquals(res[0].args[1], 42)
|
|
||||||
test.S(t).ExpectEquals(res[0].args[2], 123456)
|
|
||||||
test.S(t).ExpectEquals(res[0].args[3], 42)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplierInstantDDL(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrationContext.DatabaseName = "test"
|
|
||||||
migrationContext.OriginalTableName = "mytable"
|
|
||||||
migrationContext.AlterStatementOptions = "ADD INDEX (foo)"
|
|
||||||
applier := NewApplier(migrationContext)
|
|
||||||
|
|
||||||
t.Run("instantDDLstmt", func(t *testing.T) {
|
|
||||||
stmt := applier.generateInstantDDLQuery()
|
|
||||||
test.S(t).ExpectEquals(stmt, "ALTER /* gh-ost */ TABLE `test`.`mytable` ADD INDEX (foo), ALGORITHM=INSTANT")
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,5 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
/*
|
||||||
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -7,14 +8,13 @@ package logic
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/base"
|
"github.com/github/gh-ost/go/base"
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -35,16 +35,18 @@ const (
|
|||||||
|
|
||||||
type HooksExecutor struct {
|
type HooksExecutor struct {
|
||||||
migrationContext *base.MigrationContext
|
migrationContext *base.MigrationContext
|
||||||
writer io.Writer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHooksExecutor(migrationContext *base.MigrationContext) *HooksExecutor {
|
func NewHooksExecutor(migrationContext *base.MigrationContext) *HooksExecutor {
|
||||||
return &HooksExecutor{
|
return &HooksExecutor{
|
||||||
migrationContext: migrationContext,
|
migrationContext: migrationContext,
|
||||||
writer: os.Stderr,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (this *HooksExecutor) initHooks() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) []string {
|
func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) []string {
|
||||||
env := os.Environ()
|
env := os.Environ()
|
||||||
env = append(env, fmt.Sprintf("GH_OST_DATABASE_NAME=%s", this.migrationContext.DatabaseName))
|
env = append(env, fmt.Sprintf("GH_OST_DATABASE_NAME=%s", this.migrationContext.DatabaseName))
|
||||||
@ -62,26 +64,26 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
|
|||||||
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
|
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
|
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
|
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_HEARTBEAT_LAG=%f", this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds()))
|
|
||||||
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
|
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_ETA_SECONDS=%d", this.migrationContext.GetETASeconds()))
|
|
||||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
|
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
|
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
|
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
|
||||||
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
|
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
|
||||||
|
|
||||||
env = append(env, extraVariables...)
|
for _, variable := range extraVariables {
|
||||||
|
env = append(env, variable)
|
||||||
|
}
|
||||||
return env
|
return env
|
||||||
}
|
}
|
||||||
|
|
||||||
// executeHook executes a command, and sets relevant environment variables
|
// executeHook executes a command, and sets relevant environment variables
|
||||||
// combined output & error are printed to the configured writer.
|
// combined output & error are printed to gh-ost's standard error.
|
||||||
func (this *HooksExecutor) executeHook(hook string, extraVariables ...string) error {
|
func (this *HooksExecutor) executeHook(hook string, extraVariables ...string) error {
|
||||||
cmd := exec.Command(hook)
|
cmd := exec.Command(hook)
|
||||||
cmd.Env = this.applyEnvironmentVariables(extraVariables...)
|
cmd.Env = this.applyEnvironmentVariables(extraVariables...)
|
||||||
|
|
||||||
combinedOutput, err := cmd.CombinedOutput()
|
combinedOutput, err := cmd.CombinedOutput()
|
||||||
fmt.Fprintln(this.writer, string(combinedOutput))
|
fmt.Fprintln(os.Stderr, string(combinedOutput))
|
||||||
return log.Errore(err)
|
return log.Errore(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,113 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2022 GitHub Inc.
|
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
|
||||||
*/
|
|
||||||
|
|
||||||
package logic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/openark/golib/tests"
|
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/base"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHooksExecutorExecuteHooks(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrationContext.AlterStatement = "ENGINE=InnoDB"
|
|
||||||
migrationContext.DatabaseName = "test"
|
|
||||||
migrationContext.Hostname = "test.example.com"
|
|
||||||
migrationContext.OriginalTableName = "tablename"
|
|
||||||
migrationContext.RowsDeltaEstimate = 1
|
|
||||||
migrationContext.RowsEstimate = 122
|
|
||||||
migrationContext.TotalRowsCopied = 123456
|
|
||||||
migrationContext.SetETADuration(time.Minute)
|
|
||||||
migrationContext.SetProgressPct(50)
|
|
||||||
hooksExecutor := NewHooksExecutor(migrationContext)
|
|
||||||
|
|
||||||
writeTmpHookFunc := func(testName, hookName, script string) (path string, err error) {
|
|
||||||
if path, err = os.MkdirTemp("", testName); err != nil {
|
|
||||||
return path, err
|
|
||||||
}
|
|
||||||
err = os.WriteFile(filepath.Join(path, hookName), []byte(script), 0777)
|
|
||||||
return path, err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("does-not-exist", func(t *testing.T) {
|
|
||||||
migrationContext.HooksPath = "/does/not/exist"
|
|
||||||
tests.S(t).ExpectNil(hooksExecutor.executeHooks("test-hook"))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("failed", func(t *testing.T) {
|
|
||||||
var err error
|
|
||||||
if migrationContext.HooksPath, err = writeTmpHookFunc(
|
|
||||||
"TestHooksExecutorExecuteHooks-failed",
|
|
||||||
"failed-hook",
|
|
||||||
"#!/bin/sh\nexit 1",
|
|
||||||
); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(migrationContext.HooksPath)
|
|
||||||
tests.S(t).ExpectNotNil(hooksExecutor.executeHooks("failed-hook"))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("success", func(t *testing.T) {
|
|
||||||
var err error
|
|
||||||
if migrationContext.HooksPath, err = writeTmpHookFunc(
|
|
||||||
"TestHooksExecutorExecuteHooks-success",
|
|
||||||
"success-hook",
|
|
||||||
"#!/bin/sh\nenv",
|
|
||||||
); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(migrationContext.HooksPath)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
hooksExecutor.writer = &buf
|
|
||||||
tests.S(t).ExpectNil(hooksExecutor.executeHooks("success-hook", "TEST="+t.Name()))
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(&buf)
|
|
||||||
for scanner.Scan() {
|
|
||||||
split := strings.SplitN(scanner.Text(), "=", 2)
|
|
||||||
switch split[0] {
|
|
||||||
case "GH_OST_COPIED_ROWS":
|
|
||||||
copiedRows, _ := strconv.ParseInt(split[1], 10, 64)
|
|
||||||
tests.S(t).ExpectEquals(copiedRows, migrationContext.TotalRowsCopied)
|
|
||||||
case "GH_OST_DATABASE_NAME":
|
|
||||||
tests.S(t).ExpectEquals(split[1], migrationContext.DatabaseName)
|
|
||||||
case "GH_OST_DDL":
|
|
||||||
tests.S(t).ExpectEquals(split[1], migrationContext.AlterStatement)
|
|
||||||
case "GH_OST_DRY_RUN":
|
|
||||||
tests.S(t).ExpectEquals(split[1], "false")
|
|
||||||
case "GH_OST_ESTIMATED_ROWS":
|
|
||||||
estimatedRows, _ := strconv.ParseInt(split[1], 10, 64)
|
|
||||||
tests.S(t).ExpectEquals(estimatedRows, int64(123))
|
|
||||||
case "GH_OST_ETA_SECONDS":
|
|
||||||
etaSeconds, _ := strconv.ParseInt(split[1], 10, 64)
|
|
||||||
tests.S(t).ExpectEquals(etaSeconds, int64(60))
|
|
||||||
case "GH_OST_EXECUTING_HOST":
|
|
||||||
tests.S(t).ExpectEquals(split[1], migrationContext.Hostname)
|
|
||||||
case "GH_OST_GHOST_TABLE_NAME":
|
|
||||||
tests.S(t).ExpectEquals(split[1], fmt.Sprintf("_%s_gho", migrationContext.OriginalTableName))
|
|
||||||
case "GH_OST_OLD_TABLE_NAME":
|
|
||||||
tests.S(t).ExpectEquals(split[1], fmt.Sprintf("_%s_del", migrationContext.OriginalTableName))
|
|
||||||
case "GH_OST_PROGRESS":
|
|
||||||
progress, _ := strconv.ParseFloat(split[1], 64)
|
|
||||||
tests.S(t).ExpectEquals(progress, 50.0)
|
|
||||||
case "GH_OST_TABLE_NAME":
|
|
||||||
tests.S(t).ExpectEquals(split[1], migrationContext.OriginalTableName)
|
|
||||||
case "TEST":
|
|
||||||
tests.S(t).ExpectEquals(split[1], t.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,14 +1,12 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package logic
|
package logic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
gosql "database/sql"
|
gosql "database/sql"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -19,7 +17,8 @@ import (
|
|||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
|
|
||||||
"github.com/openark/golib/sqlutils"
|
"github.com/outbrain/golib/log"
|
||||||
|
"github.com/outbrain/golib/sqlutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const startSlavePostWaitMilliseconds = 500 * time.Millisecond
|
const startSlavePostWaitMilliseconds = 500 * time.Millisecond
|
||||||
@ -31,14 +30,12 @@ type Inspector struct {
|
|||||||
db *gosql.DB
|
db *gosql.DB
|
||||||
informationSchemaDb *gosql.DB
|
informationSchemaDb *gosql.DB
|
||||||
migrationContext *base.MigrationContext
|
migrationContext *base.MigrationContext
|
||||||
name string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInspector(migrationContext *base.MigrationContext) *Inspector {
|
func NewInspector(migrationContext *base.MigrationContext) *Inspector {
|
||||||
return &Inspector{
|
return &Inspector{
|
||||||
connectionConfig: migrationContext.InspectorConnectionConfig,
|
connectionConfig: migrationContext.InspectorConnectionConfig,
|
||||||
migrationContext: migrationContext,
|
migrationContext: migrationContext,
|
||||||
name: "inspector",
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,7 +53,7 @@ func (this *Inspector) InitDBConnections() (err error) {
|
|||||||
if err := this.validateConnection(); err != nil {
|
if err := this.validateConnection(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
|
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
|
||||||
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
@ -72,7 +69,7 @@ func (this *Inspector) InitDBConnections() (err error) {
|
|||||||
if err := this.applyBinlogFormat(); err != nil {
|
if err := this.applyBinlogFormat(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Inspector initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.InspectorMySQLVersion)
|
log.Infof("Inspector initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.InspectorMySQLVersion)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,10 +110,6 @@ func (this *Inspector) InspectOriginalTable() (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.OriginalTableAutoIncrement, err = this.getAutoIncrementValue(this.migrationContext.OriginalTableName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,7 +126,10 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sharedUniqueKeys := this.getSharedUniqueKeys(this.migrationContext.OriginalTableUniqueKeys, this.migrationContext.GhostTableUniqueKeys)
|
sharedUniqueKeys, err := this.getSharedUniqueKeys(this.migrationContext.OriginalTableUniqueKeys, this.migrationContext.GhostTableUniqueKeys)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
for i, sharedUniqueKey := range sharedUniqueKeys {
|
for i, sharedUniqueKey := range sharedUniqueKeys {
|
||||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &sharedUniqueKey.Columns)
|
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &sharedUniqueKey.Columns)
|
||||||
uniqueKeyIsValid := true
|
uniqueKeyIsValid := true
|
||||||
@ -141,14 +137,14 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
|||||||
switch column.Type {
|
switch column.Type {
|
||||||
case sql.FloatColumnType:
|
case sql.FloatColumnType:
|
||||||
{
|
{
|
||||||
this.migrationContext.Log.Warning("Will not use %+v as shared key due to FLOAT data type", sharedUniqueKey.Name)
|
log.Warning("Will not use %+v as shared key due to FLOAT data type", sharedUniqueKey.Name)
|
||||||
uniqueKeyIsValid = false
|
uniqueKeyIsValid = false
|
||||||
}
|
}
|
||||||
case sql.JSONColumnType:
|
case sql.JSONColumnType:
|
||||||
{
|
{
|
||||||
// Noteworthy that at this time MySQL does not allow JSON indexing anyhow, but this code
|
// Noteworthy that at this time MySQL does not allow JSON indexing anyhow, but this code
|
||||||
// will remain in place to potentially handle the future case where JSON is supported in indexes.
|
// will remain in place to potentially handle the future case where JSON is supported in indexes.
|
||||||
this.migrationContext.Log.Warning("Will not use %+v as shared key due to JSON data type", sharedUniqueKey.Name)
|
log.Warning("Will not use %+v as shared key due to JSON data type", sharedUniqueKey.Name)
|
||||||
uniqueKeyIsValid = false
|
uniqueKeyIsValid = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -161,17 +157,17 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
|||||||
if this.migrationContext.UniqueKey == nil {
|
if this.migrationContext.UniqueKey == nil {
|
||||||
return fmt.Errorf("No shared unique key can be found after ALTER! Bailing out")
|
return fmt.Errorf("No shared unique key can be found after ALTER! Bailing out")
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Chosen shared unique key is %s", this.migrationContext.UniqueKey.Name)
|
log.Infof("Chosen shared unique key is %s", this.migrationContext.UniqueKey.Name)
|
||||||
if this.migrationContext.UniqueKey.HasNullable {
|
if this.migrationContext.UniqueKey.HasNullable {
|
||||||
if this.migrationContext.NullableUniqueKeyAllowed {
|
if this.migrationContext.NullableUniqueKeyAllowed {
|
||||||
this.migrationContext.Log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", this.migrationContext.UniqueKey)
|
log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", this.migrationContext.UniqueKey)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", this.migrationContext.UniqueKey)
|
return fmt.Errorf("Chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", this.migrationContext.UniqueKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.ColumnRenameMap)
|
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.ColumnRenameMap)
|
||||||
this.migrationContext.Log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
|
log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
|
||||||
// By fact that a non-empty unique key exists we also know the shared columns are non-empty
|
// By fact that a non-empty unique key exists we also know the shared columns are non-empty
|
||||||
|
|
||||||
// This additional step looks at which columns are unsigned. We could have merged this within
|
// This additional step looks at which columns are unsigned. We could have merged this within
|
||||||
@ -186,20 +182,9 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
|||||||
if column.Name == mappedColumn.Name && column.Type == sql.DateTimeColumnType && mappedColumn.Type == sql.TimestampColumnType {
|
if column.Name == mappedColumn.Name && column.Type == sql.DateTimeColumnType && mappedColumn.Type == sql.TimestampColumnType {
|
||||||
this.migrationContext.MappedSharedColumns.SetConvertDatetimeToTimestamp(column.Name, this.migrationContext.ApplierTimeZone)
|
this.migrationContext.MappedSharedColumns.SetConvertDatetimeToTimestamp(column.Name, this.migrationContext.ApplierTimeZone)
|
||||||
}
|
}
|
||||||
if column.Name == mappedColumn.Name && column.Type == sql.EnumColumnType && mappedColumn.Charset != "" {
|
|
||||||
this.migrationContext.MappedSharedColumns.SetEnumToTextConversion(column.Name)
|
|
||||||
this.migrationContext.MappedSharedColumns.SetEnumValues(column.Name, column.EnumValues)
|
|
||||||
}
|
|
||||||
if column.Name == mappedColumn.Name && column.Charset != mappedColumn.Charset {
|
|
||||||
this.migrationContext.SharedColumns.SetCharsetConversion(column.Name, column.Charset, mappedColumn.Charset)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, column := range this.migrationContext.UniqueKey.Columns.Columns() {
|
for _, column := range this.migrationContext.UniqueKey.Columns.Columns() {
|
||||||
if this.migrationContext.GhostTableVirtualColumns.GetColumn(column.Name) != nil {
|
|
||||||
// this is a virtual column
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if this.migrationContext.MappedSharedColumns.HasTimezoneConversion(column.Name) {
|
if this.migrationContext.MappedSharedColumns.HasTimezoneConversion(column.Name) {
|
||||||
return fmt.Errorf("No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key. Column: %s, key: %s", column.Name, this.migrationContext.UniqueKey.Name)
|
return fmt.Errorf("No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key. Column: %s, key: %s", column.Name, this.migrationContext.UniqueKey.Name)
|
||||||
}
|
}
|
||||||
@ -214,7 +199,7 @@ func (this *Inspector) validateConnection() error {
|
|||||||
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
|
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
|
||||||
}
|
}
|
||||||
|
|
||||||
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
|
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
|
||||||
this.migrationContext.InspectorMySQLVersion = version
|
this.migrationContext.InspectorMySQLVersion = version
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -265,19 +250,19 @@ func (this *Inspector) validateGrants() error {
|
|||||||
this.migrationContext.HasSuperPrivilege = foundSuper
|
this.migrationContext.HasSuperPrivilege = foundSuper
|
||||||
|
|
||||||
if foundAll {
|
if foundAll {
|
||||||
this.migrationContext.Log.Infof("User has ALL privileges")
|
log.Infof("User has ALL privileges")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if foundSuper && foundReplicationSlave && foundDBAll {
|
if foundSuper && foundReplicationSlave && foundDBAll {
|
||||||
this.migrationContext.Log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if foundReplicationClient && foundReplicationSlave && foundDBAll {
|
if foundReplicationClient && foundReplicationSlave && foundDBAll {
|
||||||
this.migrationContext.Log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(this.migrationContext.DatabaseName), foundDBAll)
|
log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(this.migrationContext.DatabaseName), foundDBAll)
|
||||||
return this.migrationContext.Log.Errorf("User has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
return log.Errorf("User has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
||||||
}
|
}
|
||||||
|
|
||||||
// restartReplication is required so that we are _certain_ the binlog format and
|
// restartReplication is required so that we are _certain_ the binlog format and
|
||||||
@ -285,7 +270,7 @@ func (this *Inspector) validateGrants() error {
|
|||||||
// It is entirely possible, for example, that the replication is using 'STATEMENT'
|
// It is entirely possible, for example, that the replication is using 'STATEMENT'
|
||||||
// binlog format even as the variable says 'ROW'
|
// binlog format even as the variable says 'ROW'
|
||||||
func (this *Inspector) restartReplication() error {
|
func (this *Inspector) restartReplication() error {
|
||||||
this.migrationContext.Log.Infof("Restarting replication on %s to make sure binlog settings apply to replication thread", this.connectionConfig.Key.String())
|
log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
|
|
||||||
masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.connectionConfig)
|
masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.connectionConfig)
|
||||||
if masterKey == nil {
|
if masterKey == nil {
|
||||||
@ -304,7 +289,7 @@ func (this *Inspector) restartReplication() error {
|
|||||||
}
|
}
|
||||||
time.Sleep(startSlavePostWaitMilliseconds)
|
time.Sleep(startSlavePostWaitMilliseconds)
|
||||||
|
|
||||||
this.migrationContext.Log.Debugf("Replication restarted")
|
log.Debugf("Replication restarted")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,7 +309,7 @@ func (this *Inspector) applyBinlogFormat() error {
|
|||||||
if err := this.restartReplication(); err != nil {
|
if err := this.restartReplication(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Debugf("'ROW' binlog format applied")
|
log.Debugf("'ROW' binlog format applied")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// We already have RBR, no explicit switch
|
// We already have RBR, no explicit switch
|
||||||
@ -344,13 +329,13 @@ func (this *Inspector) validateBinlogs() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !hasBinaryLogs {
|
if !hasBinaryLogs {
|
||||||
return fmt.Errorf("%s must have binary logs enabled", this.connectionConfig.Key.String())
|
return fmt.Errorf("%s:%d must have binary logs enabled", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
}
|
}
|
||||||
if this.migrationContext.RequiresBinlogFormatChange() {
|
if this.migrationContext.RequiresBinlogFormatChange() {
|
||||||
if !this.migrationContext.SwitchToRowBinlogFormat {
|
if !this.migrationContext.SwitchToRowBinlogFormat {
|
||||||
return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", this.connectionConfig.Key.String())
|
return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s:%d doesn't have replicas", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
}
|
}
|
||||||
query := `show /* gh-ost */ slave hosts`
|
query := fmt.Sprintf(`show /* gh-ost */ slave hosts`)
|
||||||
countReplicas := 0
|
countReplicas := 0
|
||||||
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
|
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
|
||||||
countReplicas++
|
countReplicas++
|
||||||
@ -360,20 +345,21 @@ func (this *Inspector) validateBinlogs() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if countReplicas > 0 {
|
if countReplicas > 0 {
|
||||||
return fmt.Errorf("%s has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
|
return fmt.Errorf("%s:%d has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("%s has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
|
log.Infof("%s:%d has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
|
||||||
}
|
}
|
||||||
query = `select @@global.binlog_row_image`
|
query = `select @@global.binlog_row_image`
|
||||||
if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil {
|
if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil {
|
||||||
return err
|
// Only as of 5.6. We wish to support 5.5 as well
|
||||||
|
this.migrationContext.OriginalBinlogRowImage = "FULL"
|
||||||
}
|
}
|
||||||
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
|
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
|
||||||
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
|
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
|
||||||
return fmt.Errorf("%s has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogRowImage)
|
return fmt.Errorf("%s:%d has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogRowImage)
|
||||||
}
|
}
|
||||||
|
|
||||||
this.migrationContext.Log.Infof("binary logs validated on %s", this.connectionConfig.Key.String())
|
log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -386,25 +372,25 @@ func (this *Inspector) validateLogSlaveUpdates() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if logSlaveUpdates {
|
if logSlaveUpdates {
|
||||||
this.migrationContext.Log.Infof("log_slave_updates validated on %s", this.connectionConfig.Key.String())
|
log.Infof("log_slave_updates validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if this.migrationContext.IsTungsten {
|
if this.migrationContext.IsTungsten {
|
||||||
this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.String())
|
log.Warningf("log_slave_updates not found on %s:%d, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
|
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
|
||||||
return fmt.Errorf("%s must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.String())
|
return fmt.Errorf("%s:%d must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
if this.migrationContext.InspectorIsAlsoApplier() {
|
if this.migrationContext.InspectorIsAlsoApplier() {
|
||||||
this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.String())
|
log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("%s must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.String())
|
return fmt.Errorf("%s:%d must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateTable makes sure the table we need to operate on actually exists
|
// validateTable makes sure the table we need to operate on actually exists
|
||||||
@ -427,17 +413,17 @@ func (this *Inspector) validateTable() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !tableFound {
|
if !tableFound {
|
||||||
return this.migrationContext.Log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
return log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine)
|
log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine)
|
||||||
this.migrationContext.Log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate)
|
log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateTableForeignKeys makes sure no foreign keys exist on the migrated table
|
// validateTableForeignKeys makes sure no foreign keys exist on the migrated table
|
||||||
func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) error {
|
func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) error {
|
||||||
if this.migrationContext.SkipForeignKeyChecks {
|
if this.migrationContext.SkipForeignKeyChecks {
|
||||||
this.migrationContext.Log.Warning("--skip-foreign-key-checks provided: will not check for foreign keys")
|
log.Warning("--skip-foreign-key-checks provided: will not check for foreign keys")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
query := `
|
query := `
|
||||||
@ -471,16 +457,16 @@ func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) erro
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if numParentForeignKeys > 0 {
|
if numParentForeignKeys > 0 {
|
||||||
return this.migrationContext.Log.Errorf("Found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
return log.Errorf("Found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||||
}
|
}
|
||||||
if numChildForeignKeys > 0 {
|
if numChildForeignKeys > 0 {
|
||||||
if allowChildForeignKeys {
|
if allowChildForeignKeys {
|
||||||
this.migrationContext.Log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag")
|
log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return this.migrationContext.Log.Errorf("Found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
return log.Errorf("Found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Debugf("Validated no foreign keys exist on table")
|
log.Debugf("Validated no foreign keys exist on table")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -506,9 +492,9 @@ func (this *Inspector) validateTableTriggers() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if numTriggers > 0 {
|
if numTriggers > 0 {
|
||||||
return this.migrationContext.Log.Errorf("Found triggers on %s.%s. Triggers are not supported at this time. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
return log.Errorf("Found triggers on %s.%s. Triggers are not supported at this time. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Debugf("Validated no triggers exist on table")
|
log.Debugf("Validated no triggers exist on table")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -528,48 +514,28 @@ func (this *Inspector) estimateTableRowsViaExplain() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !outputFound {
|
if !outputFound {
|
||||||
return this.migrationContext.Log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
return log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate)
|
log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CountTableRows counts exact number of rows on the original table
|
// CountTableRows counts exact number of rows on the original table
|
||||||
func (this *Inspector) CountTableRows(ctx context.Context) error {
|
func (this *Inspector) CountTableRows() error {
|
||||||
atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1)
|
atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1)
|
||||||
defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0)
|
defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0)
|
||||||
|
|
||||||
this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
|
log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
|
||||||
|
|
||||||
conn, err := this.db.Conn(ctx)
|
query := fmt.Sprintf(`select /* gh-ost */ count(*) as rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
var connectionID string
|
|
||||||
if err := conn.QueryRowContext(ctx, `SELECT /* gh-ost */ CONNECTION_ID()`).Scan(&connectionID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
|
||||||
var rowsEstimate int64
|
var rowsEstimate int64
|
||||||
if err := conn.QueryRowContext(ctx, query).Scan(&rowsEstimate); err != nil {
|
if err := this.db.QueryRow(query).Scan(&rowsEstimate); err != nil {
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
|
||||||
this.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err())
|
|
||||||
return mysql.Kill(this.db, connectionID)
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// row count query finished. nil out the cancel func, so the main migration thread
|
|
||||||
// doesn't bother calling it after row copy is done.
|
|
||||||
this.migrationContext.SetCountTableRowsCancelFunc(nil)
|
|
||||||
|
|
||||||
atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate)
|
atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate)
|
||||||
this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate
|
this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate
|
||||||
|
|
||||||
this.migrationContext.Log.Infof("Exact number of rows via COUNT: %d", rowsEstimate)
|
log.Infof("Exact number of rows via COUNT: %d", rowsEstimate)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -588,7 +554,6 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
|
|||||||
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
||||||
columnName := m.GetString("COLUMN_NAME")
|
columnName := m.GetString("COLUMN_NAME")
|
||||||
columnType := m.GetString("COLUMN_TYPE")
|
columnType := m.GetString("COLUMN_TYPE")
|
||||||
columnOctetLength := m.GetUint("CHARACTER_OCTET_LENGTH")
|
|
||||||
for _, columnsList := range columnsLists {
|
for _, columnsList := range columnsLists {
|
||||||
column := columnsList.GetColumn(columnName)
|
column := columnsList.GetColumn(columnName)
|
||||||
if column == nil {
|
if column == nil {
|
||||||
@ -615,11 +580,6 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
|
|||||||
}
|
}
|
||||||
if strings.HasPrefix(columnType, "enum") {
|
if strings.HasPrefix(columnType, "enum") {
|
||||||
column.Type = sql.EnumColumnType
|
column.Type = sql.EnumColumnType
|
||||||
column.EnumValues = sql.ParseEnumValues(m.GetString("COLUMN_TYPE"))
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(columnType, "binary") {
|
|
||||||
column.Type = sql.BinaryColumnType
|
|
||||||
column.BinaryOctetLength = columnOctetLength
|
|
||||||
}
|
}
|
||||||
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
|
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
|
||||||
column.Charset = charset
|
column.Charset = charset
|
||||||
@ -630,24 +590,6 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAutoIncrementValue get's the original table's AUTO_INCREMENT value, if exists (0 value if not exists)
|
|
||||||
func (this *Inspector) getAutoIncrementValue(tableName string) (autoIncrement uint64, err error) {
|
|
||||||
query := `
|
|
||||||
SELECT
|
|
||||||
AUTO_INCREMENT
|
|
||||||
FROM INFORMATION_SCHEMA.TABLES
|
|
||||||
WHERE
|
|
||||||
TABLES.TABLE_SCHEMA = ?
|
|
||||||
AND TABLES.TABLE_NAME = ?
|
|
||||||
AND AUTO_INCREMENT IS NOT NULL
|
|
||||||
`
|
|
||||||
err = sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
|
||||||
autoIncrement = m.GetUint64("AUTO_INCREMENT")
|
|
||||||
return nil
|
|
||||||
}, this.migrationContext.DatabaseName, tableName)
|
|
||||||
return autoIncrement, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// getCandidateUniqueKeys investigates a table and returns the list of unique keys
|
// getCandidateUniqueKeys investigates a table and returns the list of unique keys
|
||||||
// candidate for chunking
|
// candidate for chunking
|
||||||
func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) {
|
func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) {
|
||||||
@ -721,13 +663,13 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return uniqueKeys, err
|
return uniqueKeys, err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys)
|
log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys)
|
||||||
return uniqueKeys, nil
|
return uniqueKeys, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getSharedUniqueKeys returns the intersection of two given unique keys,
|
// getSharedUniqueKeys returns the intersection of two given unique keys,
|
||||||
// testing by list of columns
|
// testing by list of columns
|
||||||
func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys []*sql.UniqueKey) (uniqueKeys []*sql.UniqueKey) {
|
func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [](*sql.UniqueKey)) (uniqueKeys [](*sql.UniqueKey), err error) {
|
||||||
// We actually do NOT rely on key name, just on the set of columns. This is because maybe
|
// We actually do NOT rely on key name, just on the set of columns. This is because maybe
|
||||||
// the ALTER is on the name itself...
|
// the ALTER is on the name itself...
|
||||||
for _, originalUniqueKey := range originalUniqueKeys {
|
for _, originalUniqueKey := range originalUniqueKeys {
|
||||||
@ -737,7 +679,7 @@ func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return uniqueKeys
|
return uniqueKeys, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getSharedColumns returns the intersection of two lists of columns in same order as the first list
|
// getSharedColumns returns the intersection of two lists of columns in same order as the first list
|
||||||
@ -811,7 +753,7 @@ func (this *Inspector) readChangelogState(hint string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (this *Inspector) getMasterConnectionConfig() (applierConfig *mysql.ConnectionConfig, err error) {
|
func (this *Inspector) getMasterConnectionConfig() (applierConfig *mysql.ConnectionConfig, err error) {
|
||||||
this.migrationContext.Log.Infof("Recursively searching for replication master")
|
log.Infof("Recursively searching for replication master")
|
||||||
visitedKeys := mysql.NewInstanceKeyMap()
|
visitedKeys := mysql.NewInstanceKeyMap()
|
||||||
return mysql.GetMasterConnectionConfigSafe(this.connectionConfig, visitedKeys, this.migrationContext.AllowedMasterMaster)
|
return mysql.GetMasterConnectionConfigSafe(this.connectionConfig, visitedKeys, this.migrationContext.AllowedMasterMaster)
|
||||||
}
|
}
|
||||||
@ -826,4 +768,5 @@ func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err er
|
|||||||
func (this *Inspector) Teardown() {
|
func (this *Inspector) Teardown() {
|
||||||
this.db.Close()
|
this.db.Close()
|
||||||
this.informationSchemaDb.Close()
|
this.informationSchemaDb.Close()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
@ -1,31 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2022 GitHub Inc.
|
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
|
||||||
*/
|
|
||||||
|
|
||||||
package logic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
test "github.com/openark/golib/tests"
|
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/sql"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestInspectGetSharedUniqueKeys(t *testing.T) {
|
|
||||||
origUniqKeys := []*sql.UniqueKey{
|
|
||||||
{Columns: *sql.NewColumnList([]string{"id", "item_id"})},
|
|
||||||
{Columns: *sql.NewColumnList([]string{"id", "org_id"})},
|
|
||||||
}
|
|
||||||
ghostUniqKeys := []*sql.UniqueKey{
|
|
||||||
{Columns: *sql.NewColumnList([]string{"id", "item_id"})},
|
|
||||||
{Columns: *sql.NewColumnList([]string{"id", "org_id"})},
|
|
||||||
{Columns: *sql.NewColumnList([]string{"item_id", "user_id"})},
|
|
||||||
}
|
|
||||||
inspector := &Inspector{}
|
|
||||||
sharedUniqKeys := inspector.getSharedUniqueKeys(origUniqKeys, ghostUniqKeys)
|
|
||||||
test.S(t).ExpectEquals(len(sharedUniqKeys), 2)
|
|
||||||
test.S(t).ExpectEquals(sharedUniqKeys[0].Columns.String(), "id,item_id")
|
|
||||||
test.S(t).ExpectEquals(sharedUniqKeys[1].Columns.String(), "id,org_id")
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,256 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2022 GitHub Inc.
|
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
|
||||||
*/
|
|
||||||
|
|
||||||
package logic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/openark/golib/tests"
|
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/base"
|
|
||||||
"github.com/github/gh-ost/go/binlog"
|
|
||||||
"github.com/github/gh-ost/go/sql"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMigratorOnChangelogEvent(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
|
|
||||||
t.Run("heartbeat", func(t *testing.T) {
|
|
||||||
columnValues := sql.ToColumnValues([]interface{}{
|
|
||||||
123,
|
|
||||||
time.Now().Unix(),
|
|
||||||
"heartbeat",
|
|
||||||
"2022-08-16T00:45:10.52Z",
|
|
||||||
})
|
|
||||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.InsertDML,
|
|
||||||
NewColumnValues: columnValues,
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("state-AllEventsUpToLockProcessed", func(t *testing.T) {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
go func(wg *sync.WaitGroup) {
|
|
||||||
defer wg.Done()
|
|
||||||
es := <-migrator.applyEventsQueue
|
|
||||||
tests.S(t).ExpectNotNil(es)
|
|
||||||
tests.S(t).ExpectNotNil(es.writeFunc)
|
|
||||||
}(&wg)
|
|
||||||
|
|
||||||
columnValues := sql.ToColumnValues([]interface{}{
|
|
||||||
123,
|
|
||||||
time.Now().Unix(),
|
|
||||||
"state",
|
|
||||||
AllEventsUpToLockProcessed,
|
|
||||||
})
|
|
||||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.InsertDML,
|
|
||||||
NewColumnValues: columnValues,
|
|
||||||
}))
|
|
||||||
wg.Wait()
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("state-GhostTableMigrated", func(t *testing.T) {
|
|
||||||
go func() {
|
|
||||||
tests.S(t).ExpectTrue(<-migrator.ghostTableMigrated)
|
|
||||||
}()
|
|
||||||
|
|
||||||
columnValues := sql.ToColumnValues([]interface{}{
|
|
||||||
123,
|
|
||||||
time.Now().Unix(),
|
|
||||||
"state",
|
|
||||||
GhostTableMigrated,
|
|
||||||
})
|
|
||||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.InsertDML,
|
|
||||||
NewColumnValues: columnValues,
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("state-Migrated", func(t *testing.T) {
|
|
||||||
columnValues := sql.ToColumnValues([]interface{}{
|
|
||||||
123,
|
|
||||||
time.Now().Unix(),
|
|
||||||
"state",
|
|
||||||
Migrated,
|
|
||||||
})
|
|
||||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.InsertDML,
|
|
||||||
NewColumnValues: columnValues,
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("state-ReadMigrationRangeValues", func(t *testing.T) {
|
|
||||||
columnValues := sql.ToColumnValues([]interface{}{
|
|
||||||
123,
|
|
||||||
time.Now().Unix(),
|
|
||||||
"state",
|
|
||||||
ReadMigrationRangeValues,
|
|
||||||
})
|
|
||||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
|
||||||
DatabaseName: "test",
|
|
||||||
DML: binlog.InsertDML,
|
|
||||||
NewColumnValues: columnValues,
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMigratorValidateStatement(t *testing.T) {
|
|
||||||
t.Run("add-column", func(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test ADD test_new VARCHAR(64) NOT NULL`))
|
|
||||||
|
|
||||||
tests.S(t).ExpectNil(migrator.validateAlterStatement())
|
|
||||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("drop-column", func(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test DROP abc`))
|
|
||||||
|
|
||||||
tests.S(t).ExpectNil(migrator.validateAlterStatement())
|
|
||||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 1)
|
|
||||||
_, exists := migrator.migrationContext.DroppedColumnsMap["abc"]
|
|
||||||
tests.S(t).ExpectTrue(exists)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("rename-column", func(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test CHANGE test123 test1234 bigint unsigned`))
|
|
||||||
|
|
||||||
err := migrator.validateAlterStatement()
|
|
||||||
tests.S(t).ExpectNotNil(err)
|
|
||||||
tests.S(t).ExpectTrue(strings.HasPrefix(err.Error(), "gh-ost believes the ALTER statement renames columns"))
|
|
||||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("rename-column-approved", func(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
migrator.migrationContext.ApproveRenamedColumns = true
|
|
||||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test CHANGE test123 test1234 bigint unsigned`))
|
|
||||||
|
|
||||||
tests.S(t).ExpectNil(migrator.validateAlterStatement())
|
|
||||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("rename-table", func(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test RENAME TO test_new`))
|
|
||||||
|
|
||||||
err := migrator.validateAlterStatement()
|
|
||||||
tests.S(t).ExpectNotNil(err)
|
|
||||||
tests.S(t).ExpectTrue(errors.Is(err, ErrMigratorUnsupportedRenameAlter))
|
|
||||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMigratorCreateFlagFiles(t *testing.T) {
|
|
||||||
tmpdir, err := os.MkdirTemp("", t.Name())
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrationContext.PostponeCutOverFlagFile = filepath.Join(tmpdir, "cut-over.flag")
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
tests.S(t).ExpectNil(migrator.createFlagFiles())
|
|
||||||
tests.S(t).ExpectNil(migrator.createFlagFiles()) // twice to test already-exists
|
|
||||||
|
|
||||||
_, err = os.Stat(migrationContext.PostponeCutOverFlagFile)
|
|
||||||
tests.S(t).ExpectNil(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMigratorGetProgressPercent(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
|
|
||||||
{
|
|
||||||
tests.S(t).ExpectEquals(migrator.getProgressPercent(0), float64(100.0))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
migrationContext.TotalRowsCopied = 250
|
|
||||||
tests.S(t).ExpectEquals(migrator.getProgressPercent(1000), float64(25.0))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMigratorGetMigrationStateAndETA(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
now := time.Now()
|
|
||||||
migrationContext.RowCopyStartTime = now.Add(-time.Minute)
|
|
||||||
migrationContext.RowCopyEndTime = now
|
|
||||||
|
|
||||||
{
|
|
||||||
migrationContext.TotalRowsCopied = 456
|
|
||||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(123456)
|
|
||||||
tests.S(t).ExpectEquals(state, "migrating")
|
|
||||||
tests.S(t).ExpectEquals(eta, "4h29m44s")
|
|
||||||
tests.S(t).ExpectEquals(etaDuration.String(), "4h29m44s")
|
|
||||||
}
|
|
||||||
{
|
|
||||||
migrationContext.TotalRowsCopied = 456
|
|
||||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(456)
|
|
||||||
tests.S(t).ExpectEquals(state, "migrating")
|
|
||||||
tests.S(t).ExpectEquals(eta, "due")
|
|
||||||
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
|
|
||||||
}
|
|
||||||
{
|
|
||||||
migrationContext.TotalRowsCopied = 123456
|
|
||||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(456)
|
|
||||||
tests.S(t).ExpectEquals(state, "migrating")
|
|
||||||
tests.S(t).ExpectEquals(eta, "due")
|
|
||||||
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
|
|
||||||
}
|
|
||||||
{
|
|
||||||
atomic.StoreInt64(&migrationContext.CountingRowsFlag, 1)
|
|
||||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(123456)
|
|
||||||
tests.S(t).ExpectEquals(state, "counting rows")
|
|
||||||
tests.S(t).ExpectEquals(eta, "due")
|
|
||||||
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
|
|
||||||
}
|
|
||||||
{
|
|
||||||
atomic.StoreInt64(&migrationContext.CountingRowsFlag, 0)
|
|
||||||
atomic.StoreInt64(&migrationContext.IsPostponingCutOver, 1)
|
|
||||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(123456)
|
|
||||||
tests.S(t).ExpectEquals(state, "postponing cut-over")
|
|
||||||
tests.S(t).ExpectEquals(eta, "due")
|
|
||||||
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMigratorShouldPrintStatus(t *testing.T) {
|
|
||||||
migrationContext := base.NewMigrationContext()
|
|
||||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
|
||||||
|
|
||||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(NoPrintStatusRule, 10, time.Second)) // test 'rule != HeuristicPrintStatusRule' return
|
|
||||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 10, time.Second)) // test 'etaDuration.Seconds() <= 60'
|
|
||||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 90, time.Second)) // test 'etaDuration.Seconds() <= 60' again
|
|
||||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 90, time.Minute)) // test 'etaDuration.Seconds() <= 180'
|
|
||||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 60, 90*time.Second)) // test 'elapsedSeconds <= 180'
|
|
||||||
tests.S(t).ExpectFalse(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 61, 90*time.Second)) // test 'elapsedSeconds <= 180'
|
|
||||||
tests.S(t).ExpectFalse(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 99, 210*time.Second)) // test 'elapsedSeconds <= 180'
|
|
||||||
tests.S(t).ExpectFalse(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 12345, 86400*time.Second)) // test 'else'
|
|
||||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 30030, 86400*time.Second)) // test 'else' again
|
|
||||||
}
|
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -16,6 +16,7 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/github/gh-ost/go/base"
|
"github.com/github/gh-ost/go/base"
|
||||||
|
"github.com/outbrain/golib/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type printStatusFunc func(PrintStatusRule, io.Writer)
|
type printStatusFunc func(PrintStatusRule, io.Writer)
|
||||||
@ -48,12 +49,12 @@ func (this *Server) BindSocketFile() (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Listening on unix socket file: %s", this.migrationContext.ServeSocketFile)
|
log.Infof("Listening on unix socket file: %s", this.migrationContext.ServeSocketFile)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *Server) RemoveSocketFile() (err error) {
|
func (this *Server) RemoveSocketFile() (err error) {
|
||||||
this.migrationContext.Log.Infof("Removing socket file: %s", this.migrationContext.ServeSocketFile)
|
log.Infof("Removing socket file: %s", this.migrationContext.ServeSocketFile)
|
||||||
return os.Remove(this.migrationContext.ServeSocketFile)
|
return os.Remove(this.migrationContext.ServeSocketFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,7 +66,7 @@ func (this *Server) BindTCPPort() (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Infof("Listening on tcp port: %d", this.migrationContext.ServeTCPPort)
|
log.Infof("Listening on tcp port: %d", this.migrationContext.ServeTCPPort)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,7 +76,7 @@ func (this *Server) Serve() (err error) {
|
|||||||
for {
|
for {
|
||||||
conn, err := this.unixListener.Accept()
|
conn, err := this.unixListener.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
this.migrationContext.Log.Errore(err)
|
log.Errore(err)
|
||||||
}
|
}
|
||||||
go this.handleConnection(conn)
|
go this.handleConnection(conn)
|
||||||
}
|
}
|
||||||
@ -87,7 +88,7 @@ func (this *Server) Serve() (err error) {
|
|||||||
for {
|
for {
|
||||||
conn, err := this.tcpListener.Accept()
|
conn, err := this.tcpListener.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
this.migrationContext.Log.Errore(err)
|
log.Errore(err)
|
||||||
}
|
}
|
||||||
go this.handleConnection(conn)
|
go this.handleConnection(conn)
|
||||||
}
|
}
|
||||||
@ -117,11 +118,13 @@ func (this *Server) onServerCommand(command string, writer *bufio.Writer) (err e
|
|||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(writer, "%s\n", err.Error())
|
fmt.Fprintf(writer, "%s\n", err.Error())
|
||||||
}
|
}
|
||||||
return this.migrationContext.Log.Errore(err)
|
return log.Errore(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyServerCommand parses and executes commands by user
|
// applyServerCommand parses and executes commands by user
|
||||||
func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (printStatusRule PrintStatusRule, err error) {
|
func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (printStatusRule PrintStatusRule, err error) {
|
||||||
|
printStatusRule = NoPrintStatusRule
|
||||||
|
|
||||||
tokens := strings.SplitN(command, "=", 2)
|
tokens := strings.SplitN(command, "=", 2)
|
||||||
command = strings.TrimSpace(tokens[0])
|
command = strings.TrimSpace(tokens[0])
|
||||||
arg := ""
|
arg := ""
|
||||||
@ -132,7 +135,7 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
argIsQuestion := (arg == "?")
|
argIsQuestion := (arg == "?")
|
||||||
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged"
|
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged\n"
|
||||||
|
|
||||||
if err := this.hooksExecutor.onInteractiveCommand(command); err != nil {
|
if err := this.hooksExecutor.onInteractiveCommand(command); err != nil {
|
||||||
return NoPrintStatusRule, err
|
return NoPrintStatusRule, err
|
||||||
@ -145,8 +148,6 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
|
|||||||
status # Print a detailed status message
|
status # Print a detailed status message
|
||||||
sup # Print a short status message
|
sup # Print a short status message
|
||||||
coordinates # Print the currently inspected coordinates
|
coordinates # Print the currently inspected coordinates
|
||||||
applier # Print the hostname of the applier
|
|
||||||
inspector # Print the hostname of the inspector
|
|
||||||
chunk-size=<newsize> # Set a new chunk-size
|
chunk-size=<newsize> # Set a new chunk-size
|
||||||
dml-batch-size=<newsize> # Set a new dml-batch-size
|
dml-batch-size=<newsize> # Set a new dml-batch-size
|
||||||
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
|
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
|
||||||
@ -177,22 +178,6 @@ help # This message
|
|||||||
}
|
}
|
||||||
return NoPrintStatusRule, fmt.Errorf("coordinates are read-only")
|
return NoPrintStatusRule, fmt.Errorf("coordinates are read-only")
|
||||||
}
|
}
|
||||||
case "applier":
|
|
||||||
if this.migrationContext.ApplierConnectionConfig != nil && this.migrationContext.ApplierConnectionConfig.ImpliedKey != nil {
|
|
||||||
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
|
|
||||||
this.migrationContext.ApplierConnectionConfig.ImpliedKey.String(),
|
|
||||||
this.migrationContext.ApplierMySQLVersion,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return NoPrintStatusRule, nil
|
|
||||||
case "inspector":
|
|
||||||
if this.migrationContext.InspectorConnectionConfig != nil && this.migrationContext.InspectorConnectionConfig.ImpliedKey != nil {
|
|
||||||
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
|
|
||||||
this.migrationContext.InspectorConnectionConfig.ImpliedKey.String(),
|
|
||||||
this.migrationContext.InspectorMySQLVersion,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return NoPrintStatusRule, nil
|
|
||||||
case "chunk-size":
|
case "chunk-size":
|
||||||
{
|
{
|
||||||
if argIsQuestion {
|
if argIsQuestion {
|
||||||
@ -280,7 +265,7 @@ help # This message
|
|||||||
return NoPrintStatusRule, nil
|
return NoPrintStatusRule, nil
|
||||||
}
|
}
|
||||||
this.migrationContext.SetThrottleQuery(arg)
|
this.migrationContext.SetThrottleQuery(arg)
|
||||||
fmt.Fprintln(writer, throttleHint)
|
fmt.Fprintf(writer, throttleHint)
|
||||||
return ForcePrintStatusAndHintRule, nil
|
return ForcePrintStatusAndHintRule, nil
|
||||||
}
|
}
|
||||||
case "throttle-http":
|
case "throttle-http":
|
||||||
@ -290,7 +275,7 @@ help # This message
|
|||||||
return NoPrintStatusRule, nil
|
return NoPrintStatusRule, nil
|
||||||
}
|
}
|
||||||
this.migrationContext.SetThrottleHTTP(arg)
|
this.migrationContext.SetThrottleHTTP(arg)
|
||||||
fmt.Fprintln(writer, throttleHint)
|
fmt.Fprintf(writer, throttleHint)
|
||||||
return ForcePrintStatusAndHintRule, nil
|
return ForcePrintStatusAndHintRule, nil
|
||||||
}
|
}
|
||||||
case "throttle-control-replicas":
|
case "throttle-control-replicas":
|
||||||
@ -313,7 +298,7 @@ help # This message
|
|||||||
return NoPrintStatusRule, err
|
return NoPrintStatusRule, err
|
||||||
}
|
}
|
||||||
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
|
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
|
||||||
fmt.Fprintln(writer, throttleHint)
|
fmt.Fprintf(writer, throttleHint)
|
||||||
return ForcePrintStatusAndHintRule, nil
|
return ForcePrintStatusAndHintRule, nil
|
||||||
}
|
}
|
||||||
case "no-throttle", "unthrottle", "resume", "continue":
|
case "no-throttle", "unthrottle", "resume", "continue":
|
||||||
@ -357,7 +342,7 @@ help # This message
|
|||||||
return NoPrintStatusRule, err
|
return NoPrintStatusRule, err
|
||||||
}
|
}
|
||||||
err := fmt.Errorf("User commanded 'panic'. The migration will be aborted without cleanup. Please drop the gh-ost tables before trying again.")
|
err := fmt.Errorf("User commanded 'panic'. The migration will be aborted without cleanup. Please drop the gh-ost tables before trying again.")
|
||||||
this.migrationContext.PanicAbort <- err
|
this.migrationContext.PanicAbortOnError(err)
|
||||||
return NoPrintStatusRule, err
|
return NoPrintStatusRule, err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -16,7 +16,8 @@ import (
|
|||||||
"github.com/github/gh-ost/go/binlog"
|
"github.com/github/gh-ost/go/binlog"
|
||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
|
|
||||||
"github.com/openark/golib/sqlutils"
|
"github.com/outbrain/golib/log"
|
||||||
|
"github.com/outbrain/golib/sqlutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type BinlogEventListener struct {
|
type BinlogEventListener struct {
|
||||||
@ -42,7 +43,6 @@ type EventsStreamer struct {
|
|||||||
listenersMutex *sync.Mutex
|
listenersMutex *sync.Mutex
|
||||||
eventsChannel chan *binlog.BinlogEntry
|
eventsChannel chan *binlog.BinlogEntry
|
||||||
binlogReader *binlog.GoMySQLReader
|
binlogReader *binlog.GoMySQLReader
|
||||||
name string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer {
|
func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer {
|
||||||
@ -52,13 +52,13 @@ func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer
|
|||||||
listeners: [](*BinlogEventListener){},
|
listeners: [](*BinlogEventListener){},
|
||||||
listenersMutex: &sync.Mutex{},
|
listenersMutex: &sync.Mutex{},
|
||||||
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
|
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
|
||||||
name: "streamer",
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddListener registers a new listener for binlog events, on a per-table basis
|
// AddListener registers a new listener for binlog events, on a per-table basis
|
||||||
func (this *EventsStreamer) AddListener(
|
func (this *EventsStreamer) AddListener(
|
||||||
async bool, databaseName string, tableName string, onDmlEvent func(event *binlog.BinlogDMLEvent) error) (err error) {
|
async bool, databaseName string, tableName string, onDmlEvent func(event *binlog.BinlogDMLEvent) error) (err error) {
|
||||||
|
|
||||||
this.listenersMutex.Lock()
|
this.listenersMutex.Lock()
|
||||||
defer this.listenersMutex.Unlock()
|
defer this.listenersMutex.Unlock()
|
||||||
|
|
||||||
@ -86,10 +86,10 @@ func (this *EventsStreamer) notifyListeners(binlogEvent *binlog.BinlogDMLEvent)
|
|||||||
|
|
||||||
for _, listener := range this.listeners {
|
for _, listener := range this.listeners {
|
||||||
listener := listener
|
listener := listener
|
||||||
if !strings.EqualFold(listener.databaseName, binlogEvent.DatabaseName) {
|
if strings.ToLower(listener.databaseName) != strings.ToLower(binlogEvent.DatabaseName) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !strings.EqualFold(listener.tableName, binlogEvent.TableName) {
|
if strings.ToLower(listener.tableName) != strings.ToLower(binlogEvent.TableName) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if listener.async {
|
if listener.async {
|
||||||
@ -107,7 +107,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
|
|||||||
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
|
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name); err != nil {
|
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := this.readCurrentBinlogCoordinates(); err != nil {
|
if err := this.readCurrentBinlogCoordinates(); err != nil {
|
||||||
@ -122,7 +122,10 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
|
|||||||
|
|
||||||
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
|
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
|
||||||
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
|
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
|
||||||
goMySQLReader := binlog.NewGoMySQLReader(this.migrationContext)
|
goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := goMySQLReader.ConnectBinlogStreamer(*binlogCoordinates); err != nil {
|
if err := goMySQLReader.ConnectBinlogStreamer(*binlogCoordinates); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -157,7 +160,7 @@ func (this *EventsStreamer) readCurrentBinlogCoordinates() error {
|
|||||||
if !foundMasterStatus {
|
if !foundMasterStatus {
|
||||||
return fmt.Errorf("Got no results from SHOW MASTER STATUS. Bailing out")
|
return fmt.Errorf("Got no results from SHOW MASTER STATUS. Bailing out")
|
||||||
}
|
}
|
||||||
this.migrationContext.Log.Debugf("Streamer binlog coordinates: %+v", *this.initialBinlogCoordinates)
|
log.Debugf("Streamer binlog coordinates: %+v", *this.initialBinlogCoordinates)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,7 +186,7 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
this.migrationContext.Log.Infof("StreamEvents encountered unexpected error: %+v", err)
|
log.Infof("StreamEvents encountered unexpected error: %+v", err)
|
||||||
this.migrationContext.MarkPointOfInterest()
|
this.migrationContext.MarkPointOfInterest()
|
||||||
time.Sleep(ReconnectStreamerSleepSeconds * time.Second)
|
time.Sleep(ReconnectStreamerSleepSeconds * time.Second)
|
||||||
|
|
||||||
@ -199,7 +202,7 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
|
|||||||
|
|
||||||
// Reposition at same binlog file.
|
// Reposition at same binlog file.
|
||||||
lastAppliedRowsEventHint = this.binlogReader.LastAppliedRowsEventHint
|
lastAppliedRowsEventHint = this.binlogReader.LastAppliedRowsEventHint
|
||||||
this.migrationContext.Log.Infof("Reconnecting... Will resume at %+v", lastAppliedRowsEventHint)
|
log.Infof("Reconnecting... Will resume at %+v", lastAppliedRowsEventHint)
|
||||||
if err := this.initBinlogReader(this.GetReconnectBinlogCoordinates()); err != nil {
|
if err := this.initBinlogReader(this.GetReconnectBinlogCoordinates()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -210,10 +213,11 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
|
|||||||
|
|
||||||
func (this *EventsStreamer) Close() (err error) {
|
func (this *EventsStreamer) Close() (err error) {
|
||||||
err = this.binlogReader.Close()
|
err = this.binlogReader.Close()
|
||||||
this.migrationContext.Log.Infof("Closed streamer connection. err=%+v", err)
|
log.Infof("Closed streamer connection. err=%+v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *EventsStreamer) Teardown() {
|
func (this *EventsStreamer) Teardown() {
|
||||||
this.db.Close()
|
this.db.Close()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,11 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package logic
|
package logic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
@ -16,25 +15,24 @@ import (
|
|||||||
"github.com/github/gh-ost/go/base"
|
"github.com/github/gh-ost/go/base"
|
||||||
"github.com/github/gh-ost/go/mysql"
|
"github.com/github/gh-ost/go/mysql"
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
|
"github.com/outbrain/golib/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
httpStatusMessages = map[int]string{
|
httpStatusMessages map[int]string = map[int]string{
|
||||||
200: "OK",
|
200: "OK",
|
||||||
404: "Not found",
|
404: "Not found",
|
||||||
417: "Expectation failed",
|
417: "Expectation failed",
|
||||||
429: "Too many requests",
|
429: "Too many requests",
|
||||||
500: "Internal server error",
|
500: "Internal server error",
|
||||||
-1: "Connection error",
|
|
||||||
}
|
}
|
||||||
// See https://github.com/github/freno/blob/master/doc/http.md
|
// See https://github.com/github/freno/blob/master/doc/http.md
|
||||||
httpStatusFrenoMessages = map[int]string{
|
httpStatusFrenoMessages map[int]string = map[int]string{
|
||||||
200: "OK",
|
200: "OK",
|
||||||
404: "freno: unknown metric",
|
404: "freno: unknown metric",
|
||||||
417: "freno: access forbidden",
|
417: "freno: access forbidden",
|
||||||
429: "freno: threshold exceeded",
|
429: "freno: threshold exceeded",
|
||||||
500: "freno: internal error",
|
500: "freno: internal error",
|
||||||
-1: "freno: connection error",
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,22 +41,16 @@ const frenoMagicHint = "freno"
|
|||||||
// Throttler collects metrics related to throttling and makes informed decision
|
// Throttler collects metrics related to throttling and makes informed decision
|
||||||
// whether throttling should take place.
|
// whether throttling should take place.
|
||||||
type Throttler struct {
|
type Throttler struct {
|
||||||
appVersion string
|
|
||||||
migrationContext *base.MigrationContext
|
migrationContext *base.MigrationContext
|
||||||
applier *Applier
|
applier *Applier
|
||||||
httpClient *http.Client
|
|
||||||
httpClientTimeout time.Duration
|
|
||||||
inspector *Inspector
|
inspector *Inspector
|
||||||
finishedMigrating int64
|
finishedMigrating int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector, appVersion string) *Throttler {
|
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector) *Throttler {
|
||||||
return &Throttler{
|
return &Throttler{
|
||||||
appVersion: appVersion,
|
|
||||||
migrationContext: migrationContext,
|
migrationContext: migrationContext,
|
||||||
applier: applier,
|
applier: applier,
|
||||||
httpClient: &http.Client{},
|
|
||||||
httpClientTimeout: time.Duration(migrationContext.ThrottleHTTPTimeoutMillis) * time.Millisecond,
|
|
||||||
inspector: inspector,
|
inspector: inspector,
|
||||||
finishedMigrating: 0,
|
finishedMigrating: 0,
|
||||||
}
|
}
|
||||||
@ -92,7 +84,6 @@ func (this *Throttler) shouldThrottle() (result bool, reason string, reasonHint
|
|||||||
if statusCode != 0 && statusCode != http.StatusOK {
|
if statusCode != 0 && statusCode != http.StatusOK {
|
||||||
return true, this.throttleHttpMessage(int(statusCode)), base.NoThrottleReasonHint
|
return true, this.throttleHttpMessage(int(statusCode)), base.NoThrottleReasonHint
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replication lag throttle
|
// Replication lag throttle
|
||||||
maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)
|
maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)
|
||||||
lag := atomic.LoadInt64(&this.migrationContext.CurrentLag)
|
lag := atomic.LoadInt64(&this.migrationContext.CurrentLag)
|
||||||
@ -129,7 +120,7 @@ func parseChangelogHeartbeat(heartbeatValue string) (lag time.Duration, err erro
|
|||||||
// parseChangelogHeartbeat parses a string timestamp and deduces replication lag
|
// parseChangelogHeartbeat parses a string timestamp and deduces replication lag
|
||||||
func (this *Throttler) parseChangelogHeartbeat(heartbeatValue string) (err error) {
|
func (this *Throttler) parseChangelogHeartbeat(heartbeatValue string) (err error) {
|
||||||
if lag, err := parseChangelogHeartbeat(heartbeatValue); err != nil {
|
if lag, err := parseChangelogHeartbeat(heartbeatValue); err != nil {
|
||||||
return this.migrationContext.Log.Errore(err)
|
return log.Errore(err)
|
||||||
} else {
|
} else {
|
||||||
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
||||||
return nil
|
return nil
|
||||||
@ -151,13 +142,13 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
|
|||||||
// This means we will always get a good heartbeat value.
|
// This means we will always get a good heartbeat value.
|
||||||
// When running on replica, we should instead check the `SHOW SLAVE STATUS` output.
|
// When running on replica, we should instead check the `SHOW SLAVE STATUS` output.
|
||||||
if lag, err := mysql.GetReplicationLagFromSlaveStatus(this.inspector.informationSchemaDb); err != nil {
|
if lag, err := mysql.GetReplicationLagFromSlaveStatus(this.inspector.informationSchemaDb); err != nil {
|
||||||
return this.migrationContext.Log.Errore(err)
|
return log.Errore(err)
|
||||||
} else {
|
} else {
|
||||||
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if heartbeatValue, err := this.inspector.readChangelogState("heartbeat"); err != nil {
|
if heartbeatValue, err := this.inspector.readChangelogState("heartbeat"); err != nil {
|
||||||
return this.migrationContext.Log.Errore(err)
|
return log.Errore(err)
|
||||||
} else {
|
} else {
|
||||||
this.parseChangelogHeartbeat(heartbeatValue)
|
this.parseChangelogHeartbeat(heartbeatValue)
|
||||||
}
|
}
|
||||||
@ -168,9 +159,8 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
|
|||||||
collectFunc()
|
collectFunc()
|
||||||
firstThrottlingCollected <- true
|
firstThrottlingCollected <- true
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
ticker := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||||
defer ticker.Stop()
|
for range ticker {
|
||||||
for range ticker.C {
|
|
||||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -180,6 +170,7 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
|
|||||||
|
|
||||||
// collectControlReplicasLag polls all the control replicas to get maximum lag value
|
// collectControlReplicasLag polls all the control replicas to get maximum lag value
|
||||||
func (this *Throttler) collectControlReplicasLag() {
|
func (this *Throttler) collectControlReplicasLag() {
|
||||||
|
|
||||||
if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 {
|
if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -195,12 +186,9 @@ func (this *Throttler) collectControlReplicasLag() {
|
|||||||
dbUri := connectionConfig.GetDBUri("information_schema")
|
dbUri := connectionConfig.GetDBUri("information_schema")
|
||||||
|
|
||||||
var heartbeatValue string
|
var heartbeatValue string
|
||||||
db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri)
|
if db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri); err != nil {
|
||||||
if err != nil {
|
|
||||||
return lag, err
|
return lag, err
|
||||||
}
|
} else if err = db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
|
||||||
|
|
||||||
if err := db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
|
|
||||||
return lag, err
|
return lag, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,6 +209,7 @@ func (this *Throttler) collectControlReplicasLag() {
|
|||||||
lagResult := &mysql.ReplicationLagResult{Key: connectionConfig.Key}
|
lagResult := &mysql.ReplicationLagResult{Key: connectionConfig.Key}
|
||||||
go func() {
|
go func() {
|
||||||
lagResult.Lag, lagResult.Err = readReplicaLag(connectionConfig)
|
lagResult.Lag, lagResult.Err = readReplicaLag(connectionConfig)
|
||||||
|
this.migrationContext.PanicAbortIfTableError(lagResult.Err)
|
||||||
lagResults <- lagResult
|
lagResults <- lagResult
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -244,14 +233,12 @@ func (this *Throttler) collectControlReplicasLag() {
|
|||||||
}
|
}
|
||||||
this.migrationContext.SetControlReplicasLagResult(readControlReplicasLag())
|
this.migrationContext.SetControlReplicasLagResult(readControlReplicasLag())
|
||||||
}
|
}
|
||||||
|
aggressiveTicker := time.Tick(100 * time.Millisecond)
|
||||||
relaxedFactor := 10
|
relaxedFactor := 10
|
||||||
counter := 0
|
counter := 0
|
||||||
shouldReadLagAggressively := false
|
shouldReadLagAggressively := false
|
||||||
|
|
||||||
ticker := time.NewTicker(100 * time.Millisecond)
|
for range aggressiveTicker {
|
||||||
defer ticker.Stop()
|
|
||||||
for range ticker.C {
|
|
||||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -294,53 +281,24 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
|
|||||||
if url == "" {
|
if url == "" {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
resp, err := http.Head(url)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), this.httpClientTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
req.Header.Set("User-Agent", fmt.Sprintf("gh-ost/%s", this.appVersion))
|
|
||||||
|
|
||||||
resp, err := this.httpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(resp.StatusCode))
|
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(resp.StatusCode))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := collectFunc()
|
collectFunc()
|
||||||
if err != nil {
|
|
||||||
// If not told to ignore errors, we'll throttle on HTTP connection issues
|
|
||||||
if !this.migrationContext.IgnoreHTTPErrors {
|
|
||||||
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
firstThrottlingCollected <- true
|
firstThrottlingCollected <- true
|
||||||
|
|
||||||
collectInterval := time.Duration(this.migrationContext.ThrottleHTTPIntervalMillis) * time.Millisecond
|
ticker := time.Tick(100 * time.Millisecond)
|
||||||
ticker := time.NewTicker(collectInterval)
|
for range ticker {
|
||||||
defer ticker.Stop()
|
|
||||||
for range ticker.C {
|
|
||||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sleep, err := collectFunc()
|
if sleep, _ := collectFunc(); sleep {
|
||||||
if err != nil {
|
|
||||||
// If not told to ignore errors, we'll throttle on HTTP connection issues
|
|
||||||
if !this.migrationContext.IgnoreHTTPErrors {
|
|
||||||
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sleep {
|
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -360,7 +318,7 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
|
|||||||
// Regardless of throttle, we take opportunity to check for panic-abort
|
// Regardless of throttle, we take opportunity to check for panic-abort
|
||||||
if this.migrationContext.PanicFlagFile != "" {
|
if this.migrationContext.PanicFlagFile != "" {
|
||||||
if base.FileExists(this.migrationContext.PanicFlagFile) {
|
if base.FileExists(this.migrationContext.PanicFlagFile) {
|
||||||
this.migrationContext.PanicAbort <- fmt.Errorf("Found panic-file %s. Aborting without cleanup", this.migrationContext.PanicFlagFile)
|
this.migrationContext.PanicAbortOnError(fmt.Errorf("Found panic-file %s. Aborting without cleanup", this.migrationContext.PanicFlagFile))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -373,7 +331,7 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
|
|||||||
hibernateDuration := time.Duration(this.migrationContext.CriticalLoadHibernateSeconds) * time.Second
|
hibernateDuration := time.Duration(this.migrationContext.CriticalLoadHibernateSeconds) * time.Second
|
||||||
hibernateUntilTime := time.Now().Add(hibernateDuration)
|
hibernateUntilTime := time.Now().Add(hibernateDuration)
|
||||||
atomic.StoreInt64(&this.migrationContext.HibernateUntil, hibernateUntilTime.UnixNano())
|
atomic.StoreInt64(&this.migrationContext.HibernateUntil, hibernateUntilTime.UnixNano())
|
||||||
this.migrationContext.Log.Errorf("critical-load met: %s=%d, >=%d. Will hibernate for the duration of %+v, until %+v", variableName, value, threshold, hibernateDuration, hibernateUntilTime)
|
log.Errorf("critical-load met: %s=%d, >=%d. Will hibernate for the duration of %+v, until %+v", variableName, value, threshold, hibernateDuration, hibernateUntilTime)
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(hibernateDuration)
|
time.Sleep(hibernateDuration)
|
||||||
this.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(true, "leaving hibernation", base.LeavingHibernationThrottleReasonHint))
|
this.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(true, "leaving hibernation", base.LeavingHibernationThrottleReasonHint))
|
||||||
@ -383,15 +341,15 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds == 0 {
|
if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds == 0 {
|
||||||
this.migrationContext.PanicAbort <- fmt.Errorf("critical-load met: %s=%d, >=%d", variableName, value, threshold)
|
this.migrationContext.PanicAbortOnError(fmt.Errorf("critical-load met: %s=%d, >=%d", variableName, value, threshold))
|
||||||
}
|
}
|
||||||
if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds > 0 {
|
if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds > 0 {
|
||||||
this.migrationContext.Log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, this.migrationContext.CriticalLoadIntervalMilliseconds)
|
log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, this.migrationContext.CriticalLoadIntervalMilliseconds)
|
||||||
go func() {
|
go func() {
|
||||||
timer := time.NewTimer(time.Millisecond * time.Duration(this.migrationContext.CriticalLoadIntervalMilliseconds))
|
timer := time.NewTimer(time.Millisecond * time.Duration(this.migrationContext.CriticalLoadIntervalMilliseconds))
|
||||||
<-timer.C
|
<-timer.C
|
||||||
if criticalLoadMetAgain, variableName, value, threshold, _ := this.criticalLoadIsMet(); criticalLoadMetAgain {
|
if criticalLoadMetAgain, variableName, value, threshold, _ := this.criticalLoadIsMet(); criticalLoadMetAgain {
|
||||||
this.migrationContext.PanicAbort <- fmt.Errorf("critical-load met again after %d millis: %s=%d, >=%d", this.migrationContext.CriticalLoadIntervalMilliseconds, variableName, value, threshold)
|
this.migrationContext.PanicAbortOnError(fmt.Errorf("critical-load met again after %d millis: %s=%d, >=%d", this.migrationContext.CriticalLoadIntervalMilliseconds, variableName, value, threshold))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -446,9 +404,8 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
|
|||||||
this.collectGeneralThrottleMetrics()
|
this.collectGeneralThrottleMetrics()
|
||||||
firstThrottlingCollected <- true
|
firstThrottlingCollected <- true
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Second)
|
throttlerMetricsTick := time.Tick(1 * time.Second)
|
||||||
defer ticker.Stop()
|
for range throttlerMetricsTick {
|
||||||
for range ticker.C {
|
|
||||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -459,7 +416,9 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
|
|||||||
}
|
}
|
||||||
|
|
||||||
// initiateThrottlerChecks initiates the throttle ticker and sets the basic behavior of throttling.
|
// initiateThrottlerChecks initiates the throttle ticker and sets the basic behavior of throttling.
|
||||||
func (this *Throttler) initiateThrottlerChecks() {
|
func (this *Throttler) initiateThrottlerChecks() error {
|
||||||
|
throttlerTick := time.Tick(100 * time.Millisecond)
|
||||||
|
|
||||||
throttlerFunction := func() {
|
throttlerFunction := func() {
|
||||||
alreadyThrottling, currentReason, _ := this.migrationContext.IsThrottled()
|
alreadyThrottling, currentReason, _ := this.migrationContext.IsThrottled()
|
||||||
shouldThrottle, throttleReason, throttleReasonHint := this.shouldThrottle()
|
shouldThrottle, throttleReason, throttleReasonHint := this.shouldThrottle()
|
||||||
@ -476,15 +435,14 @@ func (this *Throttler) initiateThrottlerChecks() {
|
|||||||
this.migrationContext.SetThrottled(shouldThrottle, throttleReason, throttleReasonHint)
|
this.migrationContext.SetThrottled(shouldThrottle, throttleReason, throttleReasonHint)
|
||||||
}
|
}
|
||||||
throttlerFunction()
|
throttlerFunction()
|
||||||
|
for range throttlerTick {
|
||||||
ticker := time.NewTicker(100 * time.Millisecond)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for range ticker.C {
|
|
||||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
throttlerFunction()
|
throttlerFunction()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// throttle sees if throttling needs take place, and if so, continuously sleeps (blocks)
|
// throttle sees if throttling needs take place, and if so, continuously sleeps (blocks)
|
||||||
@ -504,6 +462,6 @@ func (this *Throttler) throttle(onThrottled func()) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (this *Throttler) Teardown() {
|
func (this *Throttler) Teardown() {
|
||||||
this.migrationContext.Log.Debugf("Tearing down...")
|
log.Debugf("Tearing down...")
|
||||||
atomic.StoreInt64(&this.finishedMigrating, 1)
|
atomic.StoreInt64(&this.finishedMigrating, 1)
|
||||||
}
|
}
|
||||||
|
@ -1,21 +1,36 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2015 Shlomi Noach, courtesy Booking.com
|
Copyright 2015 Shlomi Noach, courtesy Booking.com
|
||||||
Copyright 2022 GitHub Inc.
|
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package mysql
|
package mysql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var detachPattern *regexp.Regexp
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
detachPattern, _ = regexp.Compile(`//([^/:]+):([\d]+)`) // e.g. `//binlog.01234:567890`
|
||||||
|
}
|
||||||
|
|
||||||
|
type BinlogType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
BinaryLog BinlogType = iota
|
||||||
|
RelayLog
|
||||||
|
)
|
||||||
|
|
||||||
// BinlogCoordinates described binary log coordinates in the form of log file & log position.
|
// BinlogCoordinates described binary log coordinates in the form of log file & log position.
|
||||||
type BinlogCoordinates struct {
|
type BinlogCoordinates struct {
|
||||||
LogFile string
|
LogFile string
|
||||||
LogPos int64
|
LogPos int64
|
||||||
|
Type BinlogType
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
|
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
|
||||||
@ -47,7 +62,7 @@ func (this *BinlogCoordinates) Equals(other *BinlogCoordinates) bool {
|
|||||||
if other == nil {
|
if other == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
|
return this.LogFile == other.LogFile && this.LogPos == other.LogPos && this.Type == other.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEmpty returns true if the log file is empty, unnamed
|
// IsEmpty returns true if the log file is empty, unnamed
|
||||||
@ -72,5 +87,76 @@ func (this *BinlogCoordinates) SmallerThanOrEquals(other *BinlogCoordinates) boo
|
|||||||
if this.SmallerThan(other) {
|
if this.SmallerThan(other) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
|
return this.LogFile == other.LogFile && this.LogPos == other.LogPos // No Type comparison
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
|
||||||
|
func (this *BinlogCoordinates) FileSmallerThan(other *BinlogCoordinates) bool {
|
||||||
|
return this.LogFile < other.LogFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileNumberDistance returns the numeric distance between this coordinate's file number and the other's.
|
||||||
|
// Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's"
|
||||||
|
func (this *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int {
|
||||||
|
thisNumber, _ := this.FileNumber()
|
||||||
|
otherNumber, _ := other.FileNumber()
|
||||||
|
return otherNumber - thisNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileNumber returns the numeric value of the file, and the length in characters representing the number in the filename.
|
||||||
|
// Example: FileNumber() of mysqld.log.000789 is (789, 6)
|
||||||
|
func (this *BinlogCoordinates) FileNumber() (int, int) {
|
||||||
|
tokens := strings.Split(this.LogFile, ".")
|
||||||
|
numPart := tokens[len(tokens)-1]
|
||||||
|
numLen := len(numPart)
|
||||||
|
fileNum, err := strconv.Atoi(numPart)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
return fileNum, numLen
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreviousFileCoordinatesBy guesses the filename of the previous binlog/relaylog, by given offset (number of files back)
|
||||||
|
func (this *BinlogCoordinates) PreviousFileCoordinatesBy(offset int) (BinlogCoordinates, error) {
|
||||||
|
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
|
||||||
|
|
||||||
|
fileNum, numLen := this.FileNumber()
|
||||||
|
if fileNum == 0 {
|
||||||
|
return result, errors.New("Log file number is zero, cannot detect previous file")
|
||||||
|
}
|
||||||
|
newNumStr := fmt.Sprintf("%d", (fileNum - offset))
|
||||||
|
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
|
||||||
|
|
||||||
|
tokens := strings.Split(this.LogFile, ".")
|
||||||
|
tokens[len(tokens)-1] = newNumStr
|
||||||
|
result.LogFile = strings.Join(tokens, ".")
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
|
||||||
|
func (this *BinlogCoordinates) PreviousFileCoordinates() (BinlogCoordinates, error) {
|
||||||
|
return this.PreviousFileCoordinatesBy(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
|
||||||
|
func (this *BinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) {
|
||||||
|
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
|
||||||
|
|
||||||
|
fileNum, numLen := this.FileNumber()
|
||||||
|
newNumStr := fmt.Sprintf("%d", (fileNum + 1))
|
||||||
|
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
|
||||||
|
|
||||||
|
tokens := strings.Split(this.LogFile, ".")
|
||||||
|
tokens[len(tokens)-1] = newNumStr
|
||||||
|
result.LogFile = strings.Join(tokens, ".")
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
|
||||||
|
func (this *BinlogCoordinates) DetachedCoordinates() (isDetached bool, detachedLogFile string, detachedLogPos string) {
|
||||||
|
detachedCoordinatesSubmatch := detachPattern.FindStringSubmatch(this.LogFile)
|
||||||
|
if len(detachedCoordinatesSubmatch) == 0 {
|
||||||
|
return false, "", ""
|
||||||
|
}
|
||||||
|
return true, detachedCoordinatesSubmatch[1], detachedCoordinatesSubmatch[2]
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,8 @@ package mysql
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
test "github.com/openark/golib/tests"
|
test "github.com/outbrain/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -37,6 +37,57 @@ func TestBinlogCoordinates(t *testing.T) {
|
|||||||
test.S(t).ExpectTrue(c1.SmallerThanOrEquals(&c3))
|
test.S(t).ExpectTrue(c1.SmallerThanOrEquals(&c3))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBinlogNext(t *testing.T) {
|
||||||
|
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||||
|
cres, err := c1.NextFileCoordinates()
|
||||||
|
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||||
|
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00018")
|
||||||
|
|
||||||
|
c2 := BinlogCoordinates{LogFile: "mysql-bin.00099", LogPos: 104}
|
||||||
|
cres, err = c2.NextFileCoordinates()
|
||||||
|
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||||
|
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00100")
|
||||||
|
|
||||||
|
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00099", LogPos: 104}
|
||||||
|
cres, err = c3.NextFileCoordinates()
|
||||||
|
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||||
|
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00100")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBinlogPrevious(t *testing.T) {
|
||||||
|
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||||
|
cres, err := c1.PreviousFileCoordinates()
|
||||||
|
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||||
|
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00016")
|
||||||
|
|
||||||
|
c2 := BinlogCoordinates{LogFile: "mysql-bin.00100", LogPos: 104}
|
||||||
|
cres, err = c2.PreviousFileCoordinates()
|
||||||
|
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||||
|
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00099")
|
||||||
|
|
||||||
|
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00100", LogPos: 104}
|
||||||
|
cres, err = c3.PreviousFileCoordinates()
|
||||||
|
|
||||||
|
test.S(t).ExpectNil(err)
|
||||||
|
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||||
|
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00099")
|
||||||
|
|
||||||
|
c4 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00000", LogPos: 104}
|
||||||
|
_, err = c4.PreviousFileCoordinates()
|
||||||
|
|
||||||
|
test.S(t).ExpectNotNil(err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestBinlogCoordinatesAsKey(t *testing.T) {
|
func TestBinlogCoordinatesAsKey(t *testing.T) {
|
||||||
m := make(map[BinlogCoordinates]bool)
|
m := make(map[BinlogCoordinates]bool)
|
||||||
|
|
||||||
@ -52,3 +103,20 @@ func TestBinlogCoordinatesAsKey(t *testing.T) {
|
|||||||
|
|
||||||
test.S(t).ExpectEquals(len(m), 3)
|
test.S(t).ExpectEquals(len(m), 3)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBinlogFileNumber(t *testing.T) {
|
||||||
|
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||||
|
c2 := BinlogCoordinates{LogFile: "mysql-bin.00022", LogPos: 104}
|
||||||
|
|
||||||
|
test.S(t).ExpectEquals(c1.FileNumberDistance(&c1), 0)
|
||||||
|
test.S(t).ExpectEquals(c1.FileNumberDistance(&c2), 5)
|
||||||
|
test.S(t).ExpectEquals(c2.FileNumberDistance(&c1), -5)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBinlogFileNumberDistance(t *testing.T) {
|
||||||
|
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||||
|
fileNum, numLen := c1.FileNumber()
|
||||||
|
|
||||||
|
test.S(t).ExpectEquals(fileNum, 17)
|
||||||
|
test.S(t).ExpectEquals(numLen, 5)
|
||||||
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -12,7 +12,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/go-sql-driver/mysql"
|
"github.com/go-sql-driver/mysql"
|
||||||
)
|
)
|
||||||
@ -28,8 +27,6 @@ type ConnectionConfig struct {
|
|||||||
Password string
|
Password string
|
||||||
ImpliedKey *InstanceKey
|
ImpliedKey *InstanceKey
|
||||||
tlsConfig *tls.Config
|
tlsConfig *tls.Config
|
||||||
Timeout float64
|
|
||||||
TransactionIsolation string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConnectionConfig() *ConnectionConfig {
|
func NewConnectionConfig() *ConnectionConfig {
|
||||||
@ -47,8 +44,6 @@ func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionC
|
|||||||
User: this.User,
|
User: this.User,
|
||||||
Password: this.Password,
|
Password: this.Password,
|
||||||
tlsConfig: this.tlsConfig,
|
tlsConfig: this.tlsConfig,
|
||||||
Timeout: this.Timeout,
|
|
||||||
TransactionIsolation: this.TransactionIsolation,
|
|
||||||
}
|
}
|
||||||
config.ImpliedKey = &config.Key
|
config.ImpliedKey = &config.Key
|
||||||
return config
|
return config
|
||||||
@ -95,7 +90,6 @@ func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clien
|
|||||||
}
|
}
|
||||||
|
|
||||||
this.tlsConfig = &tls.Config{
|
this.tlsConfig = &tls.Config{
|
||||||
ServerName: this.Key.Hostname,
|
|
||||||
Certificates: certs,
|
Certificates: certs,
|
||||||
RootCAs: rootCertPool,
|
RootCAs: rootCertPool,
|
||||||
InsecureSkipVerify: allowInsecure,
|
InsecureSkipVerify: allowInsecure,
|
||||||
@ -115,23 +109,12 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string {
|
|||||||
// Wrap IPv6 literals in square brackets
|
// Wrap IPv6 literals in square brackets
|
||||||
hostname = fmt.Sprintf("[%s]", hostname)
|
hostname = fmt.Sprintf("[%s]", hostname)
|
||||||
}
|
}
|
||||||
|
interpolateParams := true
|
||||||
// go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to
|
// go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to
|
||||||
// simplify construction of the DSN below.
|
// simplify construction of the DSN below.
|
||||||
tlsOption := "false"
|
tlsOption := "false"
|
||||||
if this.tlsConfig != nil {
|
if this.tlsConfig != nil {
|
||||||
tlsOption = TLS_CONFIG_KEY
|
tlsOption = TLS_CONFIG_KEY
|
||||||
}
|
}
|
||||||
connectionParams := []string{
|
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?interpolateParams=%t&autocommit=true&charset=utf8mb4,utf8,latin1&tls=%s", this.User, this.Password, hostname, this.Key.Port, databaseName, interpolateParams, tlsOption)
|
||||||
"autocommit=true",
|
|
||||||
"charset=utf8mb4,utf8,latin1",
|
|
||||||
"interpolateParams=true",
|
|
||||||
fmt.Sprintf("tls=%s", tlsOption),
|
|
||||||
fmt.Sprintf("transaction_isolation=%q", this.TransactionIsolation),
|
|
||||||
fmt.Sprintf("timeout=%fs", this.Timeout),
|
|
||||||
fmt.Sprintf("readTimeout=%fs", this.Timeout),
|
|
||||||
fmt.Sprintf("writeTimeout=%fs", this.Timeout),
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?%s", this.User, this.Password, hostname, this.Key.Port, databaseName, strings.Join(connectionParams, "&"))
|
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -9,12 +9,8 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
test "github.com/openark/golib/tests"
|
test "github.com/outbrain/golib/tests"
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
transactionIsolation = "REPEATABLE-READ"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -29,7 +25,6 @@ func TestNewConnectionConfig(t *testing.T) {
|
|||||||
test.S(t).ExpectEquals(c.ImpliedKey.Port, 0)
|
test.S(t).ExpectEquals(c.ImpliedKey.Port, 0)
|
||||||
test.S(t).ExpectEquals(c.User, "")
|
test.S(t).ExpectEquals(c.User, "")
|
||||||
test.S(t).ExpectEquals(c.Password, "")
|
test.S(t).ExpectEquals(c.Password, "")
|
||||||
test.S(t).ExpectEquals(c.TransactionIsolation, "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDuplicateCredentials(t *testing.T) {
|
func TestDuplicateCredentials(t *testing.T) {
|
||||||
@ -41,7 +36,6 @@ func TestDuplicateCredentials(t *testing.T) {
|
|||||||
InsecureSkipVerify: true,
|
InsecureSkipVerify: true,
|
||||||
ServerName: "feathers",
|
ServerName: "feathers",
|
||||||
}
|
}
|
||||||
c.TransactionIsolation = transactionIsolation
|
|
||||||
|
|
||||||
dup := c.DuplicateCredentials(InstanceKey{Hostname: "otherhost", Port: 3310})
|
dup := c.DuplicateCredentials(InstanceKey{Hostname: "otherhost", Port: 3310})
|
||||||
test.S(t).ExpectEquals(dup.Key.Hostname, "otherhost")
|
test.S(t).ExpectEquals(dup.Key.Hostname, "otherhost")
|
||||||
@ -51,7 +45,6 @@ func TestDuplicateCredentials(t *testing.T) {
|
|||||||
test.S(t).ExpectEquals(dup.User, "gromit")
|
test.S(t).ExpectEquals(dup.User, "gromit")
|
||||||
test.S(t).ExpectEquals(dup.Password, "penguin")
|
test.S(t).ExpectEquals(dup.Password, "penguin")
|
||||||
test.S(t).ExpectEquals(dup.tlsConfig, c.tlsConfig)
|
test.S(t).ExpectEquals(dup.tlsConfig, c.tlsConfig)
|
||||||
test.S(t).ExpectEquals(dup.TransactionIsolation, c.TransactionIsolation)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDuplicate(t *testing.T) {
|
func TestDuplicate(t *testing.T) {
|
||||||
@ -59,7 +52,6 @@ func TestDuplicate(t *testing.T) {
|
|||||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||||
c.User = "gromit"
|
c.User = "gromit"
|
||||||
c.Password = "penguin"
|
c.Password = "penguin"
|
||||||
c.TransactionIsolation = transactionIsolation
|
|
||||||
|
|
||||||
dup := c.Duplicate()
|
dup := c.Duplicate()
|
||||||
test.S(t).ExpectEquals(dup.Key.Hostname, "myhost")
|
test.S(t).ExpectEquals(dup.Key.Hostname, "myhost")
|
||||||
@ -68,7 +60,6 @@ func TestDuplicate(t *testing.T) {
|
|||||||
test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3306)
|
test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3306)
|
||||||
test.S(t).ExpectEquals(dup.User, "gromit")
|
test.S(t).ExpectEquals(dup.User, "gromit")
|
||||||
test.S(t).ExpectEquals(dup.Password, "penguin")
|
test.S(t).ExpectEquals(dup.Password, "penguin")
|
||||||
test.S(t).ExpectEquals(dup.TransactionIsolation, transactionIsolation)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetDBUri(t *testing.T) {
|
func TestGetDBUri(t *testing.T) {
|
||||||
@ -76,11 +67,9 @@ func TestGetDBUri(t *testing.T) {
|
|||||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||||
c.User = "gromit"
|
c.User = "gromit"
|
||||||
c.Password = "penguin"
|
c.Password = "penguin"
|
||||||
c.Timeout = 1.2345
|
|
||||||
c.TransactionIsolation = transactionIsolation
|
|
||||||
|
|
||||||
uri := c.GetDBUri("test")
|
uri := c.GetDBUri("test")
|
||||||
test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=false&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
|
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=false")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetDBUriWithTLSSetup(t *testing.T) {
|
func TestGetDBUriWithTLSSetup(t *testing.T) {
|
||||||
@ -88,10 +77,8 @@ func TestGetDBUriWithTLSSetup(t *testing.T) {
|
|||||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||||
c.User = "gromit"
|
c.User = "gromit"
|
||||||
c.Password = "penguin"
|
c.Password = "penguin"
|
||||||
c.Timeout = 1.2345
|
|
||||||
c.tlsConfig = &tls.Config{}
|
c.tlsConfig = &tls.Config{}
|
||||||
c.TransactionIsolation = transactionIsolation
|
|
||||||
|
|
||||||
uri := c.GetDBUri("test")
|
uri := c.GetDBUri("test")
|
||||||
test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=ghost&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
|
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=ghost")
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2015 Shlomi Noach, courtesy Booking.com
|
Copyright 2015 Shlomi Noach, courtesy Booking.com
|
||||||
Copyright 2022 GitHub Inc.
|
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -8,21 +7,12 @@ package mysql
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DefaultInstancePort = 3306
|
const (
|
||||||
|
DefaultInstancePort = 3306
|
||||||
var (
|
|
||||||
ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$")
|
|
||||||
ipv4HostRegexp = regexp.MustCompile("^([^:]+)$")
|
|
||||||
|
|
||||||
// e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
|
|
||||||
ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") //nolint:gosimple
|
|
||||||
// e.g. 2001:db8:1f70::999:de8:7648:6e8
|
|
||||||
ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// InstanceKey is an instance indicator, identified by hostname and port
|
// InstanceKey is an instance indicator, identified by hostname and port
|
||||||
@ -35,34 +25,25 @@ const detachHint = "//"
|
|||||||
|
|
||||||
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
|
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
|
||||||
func NewRawInstanceKey(hostPort string) (*InstanceKey, error) {
|
func NewRawInstanceKey(hostPort string) (*InstanceKey, error) {
|
||||||
var hostname, port string
|
tokens := strings.SplitN(hostPort, ":", 2)
|
||||||
if submatch := ipv4HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
|
if len(tokens) != 2 {
|
||||||
hostname = submatch[1]
|
return nil, fmt.Errorf("Cannot parse InstanceKey from %s. Expected format is host:port", hostPort)
|
||||||
port = submatch[2]
|
|
||||||
} else if submatch := ipv4HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
|
|
||||||
hostname = submatch[1]
|
|
||||||
} else if submatch := ipv6HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
|
|
||||||
hostname = submatch[1]
|
|
||||||
port = submatch[2]
|
|
||||||
} else if submatch := ipv6HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
|
|
||||||
hostname = submatch[1]
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("Cannot parse address: %s", hostPort)
|
|
||||||
}
|
}
|
||||||
instanceKey := &InstanceKey{Hostname: hostname, Port: DefaultInstancePort}
|
instanceKey := &InstanceKey{Hostname: tokens[0]}
|
||||||
if port != "" {
|
|
||||||
var err error
|
var err error
|
||||||
if instanceKey.Port, err = strconv.Atoi(port); err != nil {
|
if instanceKey.Port, err = strconv.Atoi(tokens[1]); err != nil {
|
||||||
return instanceKey, fmt.Errorf("Invalid port: %s", port)
|
return instanceKey, fmt.Errorf("Invalid port: %s", tokens[1])
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return instanceKey, nil
|
return instanceKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306.
|
// ParseRawInstanceKeyLoose will parse an InstanceKey from a string representation such as 127.0.0.1:3306.
|
||||||
// The port part is optional; there will be no name resolve
|
// The port part is optional; there will be no name resolve
|
||||||
func ParseInstanceKey(hostPort string) (*InstanceKey, error) {
|
func ParseRawInstanceKeyLoose(hostPort string) (*InstanceKey, error) {
|
||||||
|
if !strings.Contains(hostPort, ":") {
|
||||||
|
return &InstanceKey{Hostname: hostPort, Port: DefaultInstancePort}, nil
|
||||||
|
}
|
||||||
return NewRawInstanceKey(hostPort)
|
return NewRawInstanceKey(hostPort)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ func (this *InstanceKeyMap) ReadCommaDelimitedList(list string) error {
|
|||||||
}
|
}
|
||||||
tokens := strings.Split(list, ",")
|
tokens := strings.Split(list, ",")
|
||||||
for _, token := range tokens {
|
for _, token := range tokens {
|
||||||
key, err := ParseInstanceKey(token)
|
key, err := ParseRawInstanceKeyLoose(token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,74 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 GitHub Inc.
|
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
|
||||||
*/
|
|
||||||
|
|
||||||
package mysql
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
|
||||||
test "github.com/openark/golib/tests"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
log.SetLevel(log.ERROR)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseInstanceKey(t *testing.T) {
|
|
||||||
{
|
|
||||||
key, err := ParseInstanceKey("myhost:1234")
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(key.Hostname, "myhost")
|
|
||||||
test.S(t).ExpectEquals(key.Port, 1234)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
key, err := ParseInstanceKey("myhost")
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(key.Hostname, "myhost")
|
|
||||||
test.S(t).ExpectEquals(key.Port, 3306)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
key, err := ParseInstanceKey("10.0.0.3:3307")
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(key.Hostname, "10.0.0.3")
|
|
||||||
test.S(t).ExpectEquals(key.Port, 3307)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
key, err := ParseInstanceKey("10.0.0.3")
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(key.Hostname, "10.0.0.3")
|
|
||||||
test.S(t).ExpectEquals(key.Port, 3306)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
key, err := ParseInstanceKey("[2001:db8:1f70::999:de8:7648:6e8]:3308")
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(key.Hostname, "2001:db8:1f70::999:de8:7648:6e8")
|
|
||||||
test.S(t).ExpectEquals(key.Port, 3308)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
key, err := ParseInstanceKey("::1")
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(key.Hostname, "::1")
|
|
||||||
test.S(t).ExpectEquals(key.Port, 3306)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
key, err := ParseInstanceKey("0:0:0:0:0:0:0:0")
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(key.Hostname, "0:0:0:0:0:0:0:0")
|
|
||||||
test.S(t).ExpectEquals(key.Port, 3306)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
_, err := ParseInstanceKey("[2001:xxxx:1f70::999:de8:7648:6e8]:3308")
|
|
||||||
test.S(t).ExpectNotNil(err)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
_, err := ParseInstanceKey("10.0.0.4:")
|
|
||||||
test.S(t).ExpectNotNil(err)
|
|
||||||
}
|
|
||||||
{
|
|
||||||
_, err := ParseInstanceKey("10.0.0.4:5.6.7")
|
|
||||||
test.S(t).ExpectNotNil(err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -14,16 +14,18 @@ import (
|
|||||||
|
|
||||||
"github.com/github/gh-ost/go/sql"
|
"github.com/github/gh-ost/go/sql"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
"github.com/openark/golib/sqlutils"
|
"github.com/outbrain/golib/sqlutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
MaxTableNameLength = 64
|
Error1017CantFindFile = "Error 1017:"
|
||||||
MaxReplicationPasswordLength = 32
|
Error1146TableDoesntExist = "Error 1146:"
|
||||||
MaxDBPoolConnections = 3
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const MaxTableNameLength = 64
|
||||||
|
const MaxReplicationPasswordLength = 32
|
||||||
|
|
||||||
type ReplicationLagResult struct {
|
type ReplicationLagResult struct {
|
||||||
Key InstanceKey
|
Key InstanceKey
|
||||||
Lag time.Duration
|
Lag time.Duration
|
||||||
@ -42,22 +44,23 @@ func (this *ReplicationLagResult) HasLag() bool {
|
|||||||
var knownDBs map[string]*gosql.DB = make(map[string]*gosql.DB)
|
var knownDBs map[string]*gosql.DB = make(map[string]*gosql.DB)
|
||||||
var knownDBsMutex = &sync.Mutex{}
|
var knownDBsMutex = &sync.Mutex{}
|
||||||
|
|
||||||
func GetDB(migrationUuid string, mysql_uri string) (db *gosql.DB, exists bool, err error) {
|
func GetDB(migrationUuid string, mysql_uri string) (*gosql.DB, bool, error) {
|
||||||
cacheKey := migrationUuid + ":" + mysql_uri
|
cacheKey := migrationUuid + ":" + mysql_uri
|
||||||
|
|
||||||
knownDBsMutex.Lock()
|
knownDBsMutex.Lock()
|
||||||
defer knownDBsMutex.Unlock()
|
defer func() {
|
||||||
|
knownDBsMutex.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
if db, exists = knownDBs[cacheKey]; !exists {
|
var exists bool
|
||||||
db, err = gosql.Open("mysql", mysql_uri)
|
if _, exists = knownDBs[cacheKey]; !exists {
|
||||||
if err != nil {
|
if db, err := gosql.Open("mysql", mysql_uri); err == nil {
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
db.SetMaxOpenConns(MaxDBPoolConnections)
|
|
||||||
db.SetMaxIdleConns(MaxDBPoolConnections)
|
|
||||||
knownDBs[cacheKey] = db
|
knownDBs[cacheKey] = db
|
||||||
|
} else {
|
||||||
|
return db, exists, err
|
||||||
}
|
}
|
||||||
return db, exists, nil
|
}
|
||||||
|
return knownDBs[cacheKey], exists, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
|
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
|
||||||
@ -205,9 +208,3 @@ func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnL
|
|||||||
}
|
}
|
||||||
return sql.NewColumnList(columnNames), sql.NewColumnList(virtualColumnNames), nil
|
return sql.NewColumnList(columnNames), sql.NewColumnList(virtualColumnNames), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kill executes a KILL QUERY by connection id
|
|
||||||
func Kill(db *gosql.DB, connectionID string) error {
|
|
||||||
_, err := db.Exec(`KILL QUERY %s`, connectionID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
65
go/os/process.go
Normal file
65
go/os/process.go
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 Outbrain Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package os
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/outbrain/golib/log"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func execCmd(commandText string, arguments ...string) (*exec.Cmd, string, error) {
|
||||||
|
commandBytes := []byte(commandText)
|
||||||
|
tmpFile, err := ioutil.TempFile("", "gh-ost-process-cmd-")
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", log.Errore(err)
|
||||||
|
}
|
||||||
|
ioutil.WriteFile(tmpFile.Name(), commandBytes, 0644)
|
||||||
|
log.Debugf("execCmd: %s", commandText)
|
||||||
|
shellArguments := append([]string{}, tmpFile.Name())
|
||||||
|
shellArguments = append(shellArguments, arguments...)
|
||||||
|
log.Debugf("%+v", shellArguments)
|
||||||
|
return exec.Command("bash", shellArguments...), tmpFile.Name(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommandRun executes a command
|
||||||
|
func CommandRun(commandText string, arguments ...string) error {
|
||||||
|
cmd, tmpFileName, err := execCmd(commandText, arguments...)
|
||||||
|
defer os.Remove(tmpFileName)
|
||||||
|
if err != nil {
|
||||||
|
return log.Errore(err)
|
||||||
|
}
|
||||||
|
err = cmd.Run()
|
||||||
|
return log.Errore(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunCommandWithOutput executes a command and return output bytes
|
||||||
|
func RunCommandWithOutput(commandText string) ([]byte, error) {
|
||||||
|
cmd, tmpFileName, err := execCmd(commandText)
|
||||||
|
defer os.Remove(tmpFileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, log.Errore(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
outputBytes, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return nil, log.Errore(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return outputBytes, nil
|
||||||
|
}
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -33,13 +33,11 @@ func EscapeName(name string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func buildColumnsPreparedValues(columns *ColumnList) []string {
|
func buildColumnsPreparedValues(columns *ColumnList) []string {
|
||||||
values := make([]string, columns.Len())
|
values := make([]string, columns.Len(), columns.Len())
|
||||||
for i, column := range columns.Columns() {
|
for i, column := range columns.Columns() {
|
||||||
var token string
|
var token string
|
||||||
if column.timezoneConversion != nil {
|
if column.timezoneConversion != nil {
|
||||||
token = fmt.Sprintf("convert_tz(?, '%s', '%s')", column.timezoneConversion.ToTimezone, "+00:00")
|
token = fmt.Sprintf("convert_tz(?, '%s', '%s')", column.timezoneConversion.ToTimezone, "+00:00")
|
||||||
} else if column.enumToTextConversion {
|
|
||||||
token = fmt.Sprintf("ELT(?, %s)", column.EnumValues)
|
|
||||||
} else if column.Type == JSONColumnType {
|
} else if column.Type == JSONColumnType {
|
||||||
token = "convert(? using utf8mb4)"
|
token = "convert(? using utf8mb4)"
|
||||||
} else {
|
} else {
|
||||||
@ -51,7 +49,7 @@ func buildColumnsPreparedValues(columns *ColumnList) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func buildPreparedValues(length int) []string {
|
func buildPreparedValues(length int) []string {
|
||||||
values := make([]string, length)
|
values := make([]string, length, length)
|
||||||
for i := 0; i < length; i++ {
|
for i := 0; i < length; i++ {
|
||||||
values[i] = "?"
|
values[i] = "?"
|
||||||
}
|
}
|
||||||
@ -59,7 +57,7 @@ func buildPreparedValues(length int) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func duplicateNames(names []string) []string {
|
func duplicateNames(names []string) []string {
|
||||||
duplicate := make([]string, len(names))
|
duplicate := make([]string, len(names), len(names))
|
||||||
copy(duplicate, names)
|
copy(duplicate, names)
|
||||||
return duplicate
|
return duplicate
|
||||||
}
|
}
|
||||||
@ -110,8 +108,6 @@ func BuildSetPreparedClause(columns *ColumnList) (result string, err error) {
|
|||||||
var setToken string
|
var setToken string
|
||||||
if column.timezoneConversion != nil {
|
if column.timezoneConversion != nil {
|
||||||
setToken = fmt.Sprintf("%s=convert_tz(?, '%s', '%s')", EscapeName(column.Name), column.timezoneConversion.ToTimezone, "+00:00")
|
setToken = fmt.Sprintf("%s=convert_tz(?, '%s', '%s')", EscapeName(column.Name), column.timezoneConversion.ToTimezone, "+00:00")
|
||||||
} else if column.enumToTextConversion {
|
|
||||||
setToken = fmt.Sprintf("%s=ELT(?, %s)", EscapeName(column.Name), column.EnumValues)
|
|
||||||
} else if column.Type == JSONColumnType {
|
} else if column.Type == JSONColumnType {
|
||||||
setToken = fmt.Sprintf("%s=convert(? using utf8mb4)", EscapeName(column.Name))
|
setToken = fmt.Sprintf("%s=convert(? using utf8mb4)", EscapeName(column.Name))
|
||||||
} else {
|
} else {
|
||||||
@ -167,7 +163,7 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
|
|||||||
if includeEquals {
|
if includeEquals {
|
||||||
comparison, err := BuildEqualsComparison(columns, values)
|
comparison, err := BuildEqualsComparison(columns, values)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", explodedArgs, err
|
return "", explodedArgs, nil
|
||||||
}
|
}
|
||||||
comparisons = append(comparisons, comparison)
|
comparisons = append(comparisons, comparison)
|
||||||
explodedArgs = append(explodedArgs, args...)
|
explodedArgs = append(explodedArgs, args...)
|
||||||
@ -261,8 +257,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string
|
|||||||
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
|
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
|
||||||
|
|
||||||
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
|
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
|
||||||
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
|
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
|
||||||
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
|
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
|
||||||
for i, column := range uniqueKeyColumns.Columns() {
|
for i, column := range uniqueKeyColumns.Columns() {
|
||||||
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
|
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
|
||||||
if column.Type == EnumColumnType {
|
if column.Type == EnumColumnType {
|
||||||
@ -316,8 +312,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, tableName str
|
|||||||
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
|
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
|
||||||
|
|
||||||
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
|
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
|
||||||
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
|
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
|
||||||
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
|
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
|
||||||
for i, column := range uniqueKeyColumns.Columns() {
|
for i, column := range uniqueKeyColumns.Columns() {
|
||||||
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
|
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
|
||||||
if column.Type == EnumColumnType {
|
if column.Type == EnumColumnType {
|
||||||
@ -368,7 +364,7 @@ func buildUniqueKeyMinMaxValuesPreparedQuery(databaseName, tableName string, uni
|
|||||||
tableName = EscapeName(tableName)
|
tableName = EscapeName(tableName)
|
||||||
|
|
||||||
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
|
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
|
||||||
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames))
|
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
|
||||||
for i, column := range uniqueKeyColumns.Columns() {
|
for i, column := range uniqueKeyColumns.Columns() {
|
||||||
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
|
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
|
||||||
if column.Type == EnumColumnType {
|
if column.Type == EnumColumnType {
|
||||||
@ -400,7 +396,7 @@ func BuildDMLDeleteQuery(databaseName, tableName string, tableColumns, uniqueKey
|
|||||||
}
|
}
|
||||||
for _, column := range uniqueKeyColumns.Columns() {
|
for _, column := range uniqueKeyColumns.Columns() {
|
||||||
tableOrdinal := tableColumns.Ordinals[column.Name]
|
tableOrdinal := tableColumns.Ordinals[column.Name]
|
||||||
arg := column.convertArg(args[tableOrdinal], true)
|
arg := column.convertArg(args[tableOrdinal])
|
||||||
uniqueKeyArgs = append(uniqueKeyArgs, arg)
|
uniqueKeyArgs = append(uniqueKeyArgs, arg)
|
||||||
}
|
}
|
||||||
databaseName = EscapeName(databaseName)
|
databaseName = EscapeName(databaseName)
|
||||||
@ -437,7 +433,7 @@ func BuildDMLInsertQuery(databaseName, tableName string, tableColumns, sharedCol
|
|||||||
|
|
||||||
for _, column := range sharedColumns.Columns() {
|
for _, column := range sharedColumns.Columns() {
|
||||||
tableOrdinal := tableColumns.Ordinals[column.Name]
|
tableOrdinal := tableColumns.Ordinals[column.Name]
|
||||||
arg := column.convertArg(args[tableOrdinal], false)
|
arg := column.convertArg(args[tableOrdinal])
|
||||||
sharedArgs = append(sharedArgs, arg)
|
sharedArgs = append(sharedArgs, arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -485,25 +481,19 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
|
|||||||
|
|
||||||
for _, column := range sharedColumns.Columns() {
|
for _, column := range sharedColumns.Columns() {
|
||||||
tableOrdinal := tableColumns.Ordinals[column.Name]
|
tableOrdinal := tableColumns.Ordinals[column.Name]
|
||||||
arg := column.convertArg(valueArgs[tableOrdinal], false)
|
arg := column.convertArg(valueArgs[tableOrdinal])
|
||||||
sharedArgs = append(sharedArgs, arg)
|
sharedArgs = append(sharedArgs, arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, column := range uniqueKeyColumns.Columns() {
|
for _, column := range uniqueKeyColumns.Columns() {
|
||||||
tableOrdinal := tableColumns.Ordinals[column.Name]
|
tableOrdinal := tableColumns.Ordinals[column.Name]
|
||||||
arg := column.convertArg(whereArgs[tableOrdinal], true)
|
arg := column.convertArg(whereArgs[tableOrdinal])
|
||||||
uniqueKeyArgs = append(uniqueKeyArgs, arg)
|
uniqueKeyArgs = append(uniqueKeyArgs, arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
setClause, err := BuildSetPreparedClause(mappedSharedColumns)
|
setClause, err := BuildSetPreparedClause(mappedSharedColumns)
|
||||||
if err != nil {
|
|
||||||
return "", sharedArgs, uniqueKeyArgs, err
|
|
||||||
}
|
|
||||||
|
|
||||||
equalsComparison, err := BuildEqualsPreparedComparison(uniqueKeyColumns.Names())
|
equalsComparison, err := BuildEqualsPreparedComparison(uniqueKeyColumns.Names())
|
||||||
if err != nil {
|
|
||||||
return "", sharedArgs, uniqueKeyArgs, err
|
|
||||||
}
|
|
||||||
result = fmt.Sprintf(`
|
result = fmt.Sprintf(`
|
||||||
update /* gh-ost %s.%s */
|
update /* gh-ost %s.%s */
|
||||||
%s.%s
|
%s.%s
|
||||||
|
@ -12,8 +12,8 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
test "github.com/openark/golib/tests"
|
test "github.com/outbrain/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
115
go/sql/parser.go
115
go/sql/parser.go
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -16,53 +16,22 @@ var (
|
|||||||
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
|
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
|
||||||
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
|
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
|
||||||
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
|
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
|
||||||
autoIncrementRegexp = regexp.MustCompile(`(?i)\bauto_increment[\s]*=[\s]*([0-9]+)`)
|
|
||||||
alterTableExplicitSchemaTableRegexps = []*regexp.Regexp{
|
|
||||||
// ALTER TABLE `scm`.`tbl` something
|
|
||||||
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
|
|
||||||
// ALTER TABLE `scm`.tbl something
|
|
||||||
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]([\S]+)\s+(.*$)`),
|
|
||||||
// ALTER TABLE scm.`tbl` something
|
|
||||||
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
|
|
||||||
// ALTER TABLE scm.tbl something
|
|
||||||
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]([\S]+)\s+(.*$)`),
|
|
||||||
}
|
|
||||||
alterTableExplicitTableRegexps = []*regexp.Regexp{
|
|
||||||
// ALTER TABLE `tbl` something
|
|
||||||
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
|
|
||||||
// ALTER TABLE tbl something
|
|
||||||
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)\s+(.*$)`),
|
|
||||||
}
|
|
||||||
enumValuesRegexp = regexp.MustCompile("^enum[(](.*)[)]$")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type AlterTableParser struct {
|
type Parser struct {
|
||||||
columnRenameMap map[string]string
|
columnRenameMap map[string]string
|
||||||
droppedColumns map[string]bool
|
droppedColumns map[string]bool
|
||||||
isRenameTable bool
|
isRenameTable bool
|
||||||
isAutoIncrementDefined bool
|
|
||||||
|
|
||||||
alterStatementOptions string
|
|
||||||
alterTokens []string
|
|
||||||
|
|
||||||
explicitSchema string
|
|
||||||
explicitTable string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAlterTableParser() *AlterTableParser {
|
func NewParser() *Parser {
|
||||||
return &AlterTableParser{
|
return &Parser{
|
||||||
columnRenameMap: make(map[string]string),
|
columnRenameMap: make(map[string]string),
|
||||||
droppedColumns: make(map[string]bool),
|
droppedColumns: make(map[string]bool),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewParserFromAlterStatement(alterStatement string) *AlterTableParser {
|
func (this *Parser) tokenizeAlterStatement(alterStatement string) (tokens []string, err error) {
|
||||||
parser := NewAlterTableParser()
|
|
||||||
parser.ParseAlterStatement(alterStatement)
|
|
||||||
return parser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tokens []string) {
|
|
||||||
terminatingQuote := rune(0)
|
terminatingQuote := rune(0)
|
||||||
f := func(c rune) bool {
|
f := func(c rune) bool {
|
||||||
switch {
|
switch {
|
||||||
@ -86,16 +55,16 @@ func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tok
|
|||||||
for i := range tokens {
|
for i := range tokens {
|
||||||
tokens[i] = strings.TrimSpace(tokens[i])
|
tokens[i] = strings.TrimSpace(tokens[i])
|
||||||
}
|
}
|
||||||
return tokens
|
return tokens, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *AlterTableParser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) {
|
func (this *Parser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) {
|
||||||
strippedStatement = alterStatement
|
strippedStatement = alterStatement
|
||||||
strippedStatement = sanitizeQuotesRegexp.ReplaceAllString(strippedStatement, "''")
|
strippedStatement = sanitizeQuotesRegexp.ReplaceAllString(strippedStatement, "''")
|
||||||
return strippedStatement
|
return strippedStatement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *AlterTableParser) parseAlterToken(alterToken string) {
|
func (this *Parser) parseAlterToken(alterToken string) (err error) {
|
||||||
{
|
{
|
||||||
// rename
|
// rename
|
||||||
allStringSubmatch := renameColumnRegexp.FindAllStringSubmatch(alterToken, -1)
|
allStringSubmatch := renameColumnRegexp.FindAllStringSubmatch(alterToken, -1)
|
||||||
@ -125,40 +94,19 @@ func (this *AlterTableParser) parseAlterToken(alterToken string) {
|
|||||||
this.isRenameTable = true
|
this.isRenameTable = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
return nil
|
||||||
// auto_increment
|
|
||||||
if autoIncrementRegexp.MatchString(alterToken) {
|
|
||||||
this.isAutoIncrementDefined = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) {
|
func (this *Parser) ParseAlterStatement(alterStatement string) (err error) {
|
||||||
this.alterStatementOptions = alterStatement
|
alterTokens, _ := this.tokenizeAlterStatement(alterStatement)
|
||||||
for _, alterTableRegexp := range alterTableExplicitSchemaTableRegexps {
|
for _, alterToken := range alterTokens {
|
||||||
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
|
|
||||||
this.explicitSchema = submatch[1]
|
|
||||||
this.explicitTable = submatch[2]
|
|
||||||
this.alterStatementOptions = submatch[3]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, alterTableRegexp := range alterTableExplicitTableRegexps {
|
|
||||||
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
|
|
||||||
this.explicitTable = submatch[1]
|
|
||||||
this.alterStatementOptions = submatch[2]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, alterToken := range this.tokenizeAlterStatement(this.alterStatementOptions) {
|
|
||||||
alterToken = this.sanitizeQuotesFromAlterStatement(alterToken)
|
alterToken = this.sanitizeQuotesFromAlterStatement(alterToken)
|
||||||
this.parseAlterToken(alterToken)
|
this.parseAlterToken(alterToken)
|
||||||
this.alterTokens = append(this.alterTokens, alterToken)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *AlterTableParser) GetNonTrivialRenames() map[string]string {
|
func (this *Parser) GetNonTrivialRenames() map[string]string {
|
||||||
result := make(map[string]string)
|
result := make(map[string]string)
|
||||||
for column, renamed := range this.columnRenameMap {
|
for column, renamed := range this.columnRenameMap {
|
||||||
if column != renamed {
|
if column != renamed {
|
||||||
@ -168,45 +116,14 @@ func (this *AlterTableParser) GetNonTrivialRenames() map[string]string {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *AlterTableParser) HasNonTrivialRenames() bool {
|
func (this *Parser) HasNonTrivialRenames() bool {
|
||||||
return len(this.GetNonTrivialRenames()) > 0
|
return len(this.GetNonTrivialRenames()) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *AlterTableParser) DroppedColumnsMap() map[string]bool {
|
func (this *Parser) DroppedColumnsMap() map[string]bool {
|
||||||
return this.droppedColumns
|
return this.droppedColumns
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *AlterTableParser) IsRenameTable() bool {
|
func (this *Parser) IsRenameTable() bool {
|
||||||
return this.isRenameTable
|
return this.isRenameTable
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *AlterTableParser) IsAutoIncrementDefined() bool {
|
|
||||||
return this.isAutoIncrementDefined
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *AlterTableParser) GetExplicitSchema() string {
|
|
||||||
return this.explicitSchema
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *AlterTableParser) HasExplicitSchema() bool {
|
|
||||||
return this.GetExplicitSchema() != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *AlterTableParser) GetExplicitTable() string {
|
|
||||||
return this.explicitTable
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *AlterTableParser) HasExplicitTable() bool {
|
|
||||||
return this.GetExplicitTable() != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *AlterTableParser) GetAlterStatementOptions() string {
|
|
||||||
return this.alterStatementOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseEnumValues(enumColumnType string) string {
|
|
||||||
if submatch := enumValuesRegexp.FindStringSubmatch(enumColumnType); len(submatch) > 0 {
|
|
||||||
return submatch[1]
|
|
||||||
}
|
|
||||||
return enumColumnType
|
|
||||||
}
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2022 GitHub Inc.
|
Copyright 2016 GitHub Inc.
|
||||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -9,8 +9,8 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
test "github.com/openark/golib/tests"
|
test "github.com/outbrain/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -19,53 +19,28 @@ func init() {
|
|||||||
|
|
||||||
func TestParseAlterStatement(t *testing.T) {
|
func TestParseAlterStatement(t *testing.T) {
|
||||||
statement := "add column t int, engine=innodb"
|
statement := "add column t int, engine=innodb"
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
|
||||||
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
|
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
|
||||||
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseAlterStatementTrivialRename(t *testing.T) {
|
func TestParseAlterStatementTrivialRename(t *testing.T) {
|
||||||
statement := "add column t int, change ts ts timestamp, engine=innodb"
|
statement := "add column t int, change ts ts timestamp, engine=innodb"
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
|
||||||
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
|
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
|
||||||
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
|
|
||||||
test.S(t).ExpectEquals(len(parser.columnRenameMap), 1)
|
test.S(t).ExpectEquals(len(parser.columnRenameMap), 1)
|
||||||
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
|
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseAlterStatementWithAutoIncrement(t *testing.T) {
|
|
||||||
statements := []string{
|
|
||||||
"auto_increment=7",
|
|
||||||
"auto_increment = 7",
|
|
||||||
"AUTO_INCREMENT = 71",
|
|
||||||
"add column t int, change ts ts timestamp, auto_increment=7 engine=innodb",
|
|
||||||
"add column t int, change ts ts timestamp, auto_increment =7 engine=innodb",
|
|
||||||
"add column t int, change ts ts timestamp, AUTO_INCREMENT = 7 engine=innodb",
|
|
||||||
"add column t int, change ts ts timestamp, engine=innodb auto_increment=73425",
|
|
||||||
}
|
|
||||||
for _, statement := range statements {
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
|
||||||
test.S(t).ExpectTrue(parser.IsAutoIncrementDefined())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseAlterStatementTrivialRenames(t *testing.T) {
|
func TestParseAlterStatementTrivialRenames(t *testing.T) {
|
||||||
statement := "add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb"
|
statement := "add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb"
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
|
||||||
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
|
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
|
||||||
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
|
|
||||||
test.S(t).ExpectEquals(len(parser.columnRenameMap), 2)
|
test.S(t).ExpectEquals(len(parser.columnRenameMap), 2)
|
||||||
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
|
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
|
||||||
test.S(t).ExpectEquals(parser.columnRenameMap["f"], "f")
|
test.S(t).ExpectEquals(parser.columnRenameMap["f"], "f")
|
||||||
@ -83,11 +58,9 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, statement := range statements {
|
for _, statement := range statements {
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
|
||||||
renames := parser.GetNonTrivialRenames()
|
renames := parser.GetNonTrivialRenames()
|
||||||
test.S(t).ExpectEquals(len(renames), 2)
|
test.S(t).ExpectEquals(len(renames), 2)
|
||||||
test.S(t).ExpectEquals(renames["i"], "count")
|
test.S(t).ExpectEquals(renames["i"], "count")
|
||||||
@ -96,46 +69,46 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTokenizeAlterStatement(t *testing.T) {
|
func TestTokenizeAlterStatement(t *testing.T) {
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
{
|
{
|
||||||
alterStatement := "add column t int"
|
alterStatement := "add column t int"
|
||||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int"}))
|
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int"}))
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
alterStatement := "add column t int, change column i int"
|
alterStatement := "add column t int, change column i int"
|
||||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int"}))
|
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int"}))
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
alterStatement := "add column t int, change column i int 'some comment'"
|
alterStatement := "add column t int, change column i int 'some comment'"
|
||||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment'"}))
|
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment'"}))
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
alterStatement := "add column t int, change column i int 'some comment, with comma'"
|
alterStatement := "add column t int, change column i int 'some comment, with comma'"
|
||||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment, with comma'"}))
|
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment, with comma'"}))
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
alterStatement := "add column t int, add column d decimal(10,2)"
|
alterStatement := "add column t int, add column d decimal(10,2)"
|
||||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column d decimal(10,2)"}))
|
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column d decimal(10,2)"}))
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
alterStatement := "add column t int, add column e enum('a','b','c')"
|
alterStatement := "add column t int, add column e enum('a','b','c')"
|
||||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column e enum('a','b','c')"}))
|
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column e enum('a','b','c')"}))
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
alterStatement := "add column t int(11), add column e enum('a','b','c')"
|
alterStatement := "add column t int(11), add column e enum('a','b','c')"
|
||||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int(11)", "add column e enum('a','b','c')"}))
|
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int(11)", "add column e enum('a','b','c')"}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
|
func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
{
|
{
|
||||||
alterStatement := "add column e enum('a','b','c')"
|
alterStatement := "add column e enum('a','b','c')"
|
||||||
strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement)
|
strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement)
|
||||||
@ -149,8 +122,9 @@ func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseAlterStatementDroppedColumns(t *testing.T) {
|
func TestParseAlterStatementDroppedColumns(t *testing.T) {
|
||||||
|
|
||||||
{
|
{
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
statement := "drop column b"
|
statement := "drop column b"
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
@ -158,17 +132,16 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
|
|||||||
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
statement := "drop column b, drop key c_idx, drop column `d`"
|
statement := "drop column b, drop key c_idx, drop column `d`"
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
|
||||||
test.S(t).ExpectEquals(len(parser.droppedColumns), 2)
|
test.S(t).ExpectEquals(len(parser.droppedColumns), 2)
|
||||||
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
||||||
test.S(t).ExpectTrue(parser.droppedColumns["d"])
|
test.S(t).ExpectTrue(parser.droppedColumns["d"])
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
statement := "drop column b, drop key c_idx, drop column `d`, drop `e`, drop primary key, drop foreign key fk_1"
|
statement := "drop column b, drop key c_idx, drop column `d`, drop `e`, drop primary key, drop foreign key fk_1"
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
@ -178,7 +151,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
|
|||||||
test.S(t).ExpectTrue(parser.droppedColumns["e"])
|
test.S(t).ExpectTrue(parser.droppedColumns["e"])
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
statement := "drop column b, drop bad statement, add column i int"
|
statement := "drop column b, drop bad statement, add column i int"
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
@ -188,151 +161,40 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseAlterStatementRenameTable(t *testing.T) {
|
func TestParseAlterStatementRenameTable(t *testing.T) {
|
||||||
|
|
||||||
{
|
{
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
statement := "drop column b"
|
statement := "drop column b"
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectFalse(parser.isRenameTable)
|
test.S(t).ExpectFalse(parser.isRenameTable)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
statement := "rename as something_else"
|
statement := "rename as something_else"
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectTrue(parser.isRenameTable)
|
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
statement := "drop column b, rename as something_else"
|
statement := "drop column b, rename as something_else"
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
|
||||||
test.S(t).ExpectTrue(parser.isRenameTable)
|
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
statement := "engine=innodb rename as something_else"
|
statement := "engine=innodb rename as something_else"
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectTrue(parser.isRenameTable)
|
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
parser := NewAlterTableParser()
|
parser := NewParser()
|
||||||
statement := "rename as something_else, engine=innodb"
|
statement := "rename as something_else, engine=innodb"
|
||||||
err := parser.ParseAlterStatement(statement)
|
err := parser.ParseAlterStatement(statement)
|
||||||
test.S(t).ExpectNil(err)
|
test.S(t).ExpectNil(err)
|
||||||
test.S(t).ExpectTrue(parser.isRenameTable)
|
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseAlterStatementExplicitTable(t *testing.T) {
|
|
||||||
{
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
statement := "drop column b"
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.explicitSchema, "")
|
|
||||||
test.S(t).ExpectEquals(parser.explicitTable, "")
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
statement := "alter table tbl drop column b"
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.explicitSchema, "")
|
|
||||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
statement := "alter table `tbl` drop column b"
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.explicitSchema, "")
|
|
||||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
statement := "alter table `scm with spaces`.`tbl` drop column b"
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm with spaces")
|
|
||||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
statement := "alter table `scm`.`tbl with spaces` drop column b"
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
|
||||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl with spaces")
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
statement := "alter table `scm`.tbl drop column b"
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
|
||||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
statement := "alter table scm.`tbl` drop column b"
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
|
||||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
statement := "alter table scm.tbl drop column b"
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
|
||||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
parser := NewAlterTableParser()
|
|
||||||
statement := "alter table scm.tbl drop column b, add index idx(i)"
|
|
||||||
err := parser.ParseAlterStatement(statement)
|
|
||||||
test.S(t).ExpectNil(err)
|
|
||||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
|
||||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
|
||||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b, add index idx(i)")
|
|
||||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b", "add index idx(i)"}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseEnumValues(t *testing.T) {
|
|
||||||
{
|
|
||||||
s := "enum('red','green','blue','orange')"
|
|
||||||
values := ParseEnumValues(s)
|
|
||||||
test.S(t).ExpectEquals(values, "'red','green','blue','orange'")
|
|
||||||
}
|
|
||||||
{
|
|
||||||
s := "('red','green','blue','orange')"
|
|
||||||
values := ParseEnumValues(s)
|
|
||||||
test.S(t).ExpectEquals(values, "('red','green','blue','orange')")
|
|
||||||
}
|
|
||||||
{
|
|
||||||
s := "zzz"
|
|
||||||
values := ParseEnumValues(s)
|
|
||||||
test.S(t).ExpectEquals(values, "zzz")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
package sql
|
package sql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -23,7 +22,6 @@ const (
|
|||||||
MediumIntColumnType
|
MediumIntColumnType
|
||||||
JSONColumnType
|
JSONColumnType
|
||||||
FloatColumnType
|
FloatColumnType
|
||||||
BinaryColumnType
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxMediumintUnsigned int32 = 16777215
|
const maxMediumintUnsigned int32 = 16777215
|
||||||
@ -32,48 +30,20 @@ type TimezoneConversion struct {
|
|||||||
ToTimezone string
|
ToTimezone string
|
||||||
}
|
}
|
||||||
|
|
||||||
type CharacterSetConversion struct {
|
|
||||||
ToCharset string
|
|
||||||
FromCharset string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Column struct {
|
type Column struct {
|
||||||
Name string
|
Name string
|
||||||
IsUnsigned bool
|
IsUnsigned bool
|
||||||
Charset string
|
Charset string
|
||||||
Type ColumnType
|
Type ColumnType
|
||||||
EnumValues string
|
|
||||||
timezoneConversion *TimezoneConversion
|
timezoneConversion *TimezoneConversion
|
||||||
enumToTextConversion bool
|
|
||||||
// add Octet length for binary type, fix bytes with suffix "00" get clipped in mysql binlog.
|
|
||||||
// https://github.com/github/gh-ost/issues/909
|
|
||||||
BinaryOctetLength uint
|
|
||||||
charsetConversion *CharacterSetConversion
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *Column) convertArg(arg interface{}, isUniqueKeyColumn bool) interface{} {
|
func (this *Column) convertArg(arg interface{}) interface{} {
|
||||||
if s, ok := arg.(string); ok {
|
if s, ok := arg.(string); ok {
|
||||||
arg2Bytes := []byte(s)
|
// string, charset conversion
|
||||||
// convert to bytes if character string without charsetConversion.
|
|
||||||
if this.Charset != "" && this.charsetConversion == nil {
|
|
||||||
arg = arg2Bytes
|
|
||||||
} else {
|
|
||||||
if encoding, ok := charsetEncodingMap[this.Charset]; ok {
|
if encoding, ok := charsetEncodingMap[this.Charset]; ok {
|
||||||
arg, _ = encoding.NewDecoder().String(s)
|
arg, _ = encoding.NewDecoder().String(s)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if this.Type == BinaryColumnType && isUniqueKeyColumn {
|
|
||||||
size := len(arg2Bytes)
|
|
||||||
if uint(size) < this.BinaryOctetLength {
|
|
||||||
buf := bytes.NewBuffer(arg2Bytes)
|
|
||||||
for i := uint(0); i < (this.BinaryOctetLength - uint(size)); i++ {
|
|
||||||
buf.Write([]byte{0})
|
|
||||||
}
|
|
||||||
arg = buf.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return arg
|
return arg
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,18 +179,6 @@ func (this *ColumnList) HasTimezoneConversion(columnName string) bool {
|
|||||||
return this.GetColumn(columnName).timezoneConversion != nil
|
return this.GetColumn(columnName).timezoneConversion != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *ColumnList) SetEnumToTextConversion(columnName string) {
|
|
||||||
this.GetColumn(columnName).enumToTextConversion = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *ColumnList) IsEnumToTextConversion(columnName string) bool {
|
|
||||||
return this.GetColumn(columnName).enumToTextConversion
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *ColumnList) SetEnumValues(columnName string, enumValues string) {
|
|
||||||
this.GetColumn(columnName).EnumValues = enumValues
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *ColumnList) String() string {
|
func (this *ColumnList) String() string {
|
||||||
return strings.Join(this.Names(), ",")
|
return strings.Join(this.Names(), ",")
|
||||||
}
|
}
|
||||||
@ -248,10 +206,6 @@ func (this *ColumnList) Len() int {
|
|||||||
return len(this.columns)
|
return len(this.columns)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this *ColumnList) SetCharsetConversion(columnName string, fromCharset string, toCharset string) {
|
|
||||||
this.GetColumn(columnName).charsetConversion = &CharacterSetConversion{FromCharset: fromCharset, ToCharset: toCharset}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UniqueKey is the combination of a key's name and columns
|
// UniqueKey is the combination of a key's name and columns
|
||||||
type UniqueKey struct {
|
type UniqueKey struct {
|
||||||
Name string
|
Name string
|
||||||
|
@ -10,8 +10,8 @@ import (
|
|||||||
|
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/openark/golib/log"
|
"github.com/outbrain/golib/log"
|
||||||
test "github.com/openark/golib/tests"
|
test "github.com/outbrain/golib/tests"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
drop table if exists gh_ost_test;
|
|
||||||
create table gh_ost_test (
|
|
||||||
id int auto_increment,
|
|
||||||
i int not null,
|
|
||||||
color varchar(32),
|
|
||||||
primary key(id)
|
|
||||||
) auto_increment=1;
|
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 11, 'red');
|
|
||||||
insert into gh_ost_test values (null, 13, 'green');
|
|
||||||
insert into gh_ost_test values (null, 17, 'blue');
|
|
@ -1 +0,0 @@
|
|||||||
--attempt-instant-ddl
|
|
@ -1,17 +0,0 @@
|
|||||||
drop event if exists gh_ost_test;
|
|
||||||
|
|
||||||
drop table if exists gh_ost_test;
|
|
||||||
create table gh_ost_test (
|
|
||||||
id int auto_increment,
|
|
||||||
i int not null,
|
|
||||||
primary key(id)
|
|
||||||
) auto_increment=1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (NULL, 11);
|
|
||||||
insert into gh_ost_test values (NULL, 13);
|
|
||||||
insert into gh_ost_test values (NULL, 17);
|
|
||||||
insert into gh_ost_test values (NULL, 23);
|
|
||||||
insert into gh_ost_test values (NULL, 29);
|
|
||||||
insert into gh_ost_test values (NULL, 31);
|
|
||||||
insert into gh_ost_test values (NULL, 37);
|
|
||||||
delete from gh_ost_test where id>=5;
|
|
@ -1 +0,0 @@
|
|||||||
AUTO_INCREMENT=7
|
|
@ -1 +0,0 @@
|
|||||||
--alter='AUTO_INCREMENT=7'
|
|
@ -1,17 +0,0 @@
|
|||||||
drop event if exists gh_ost_test;
|
|
||||||
|
|
||||||
drop table if exists gh_ost_test;
|
|
||||||
create table gh_ost_test (
|
|
||||||
id int auto_increment,
|
|
||||||
i int not null,
|
|
||||||
primary key(id)
|
|
||||||
) auto_increment=1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (NULL, 11);
|
|
||||||
insert into gh_ost_test values (NULL, 13);
|
|
||||||
insert into gh_ost_test values (NULL, 17);
|
|
||||||
insert into gh_ost_test values (NULL, 23);
|
|
||||||
insert into gh_ost_test values (NULL, 29);
|
|
||||||
insert into gh_ost_test values (NULL, 31);
|
|
||||||
insert into gh_ost_test values (NULL, 37);
|
|
||||||
delete from gh_ost_test where id>=5;
|
|
@ -1 +0,0 @@
|
|||||||
AUTO_INCREMENT=8
|
|
@ -1,13 +0,0 @@
|
|||||||
drop event if exists gh_ost_test;
|
|
||||||
|
|
||||||
drop table if exists gh_ost_test;
|
|
||||||
create table gh_ost_test (
|
|
||||||
id int auto_increment,
|
|
||||||
i int not null,
|
|
||||||
primary key(id)
|
|
||||||
) auto_increment=1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (NULL, 11);
|
|
||||||
insert into gh_ost_test values (NULL, 13);
|
|
||||||
insert into gh_ost_test values (NULL, 17);
|
|
||||||
insert into gh_ost_test values (NULL, 23);
|
|
@ -1 +0,0 @@
|
|||||||
AUTO_INCREMENT=5
|
|
@ -1,21 +0,0 @@
|
|||||||
drop table if exists gh_ost_test;
|
|
||||||
create table gh_ost_test (
|
|
||||||
id bigint auto_increment,
|
|
||||||
val bigint not null,
|
|
||||||
primary key(id)
|
|
||||||
) auto_increment=1;
|
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
|
||||||
delimiter ;;
|
|
||||||
create event gh_ost_test
|
|
||||||
on schedule every 1 second
|
|
||||||
starts current_timestamp
|
|
||||||
ends current_timestamp + interval 60 second
|
|
||||||
on completion not preserve
|
|
||||||
enable
|
|
||||||
do
|
|
||||||
begin
|
|
||||||
insert into gh_ost_test values (null, 18446744073709551615);
|
|
||||||
insert into gh_ost_test values (null, 18446744073709551614);
|
|
||||||
insert into gh_ost_test values (null, 18446744073709551613);
|
|
||||||
end ;;
|
|
@ -1 +0,0 @@
|
|||||||
--alter="change val val bigint"
|
|
@ -1,40 +0,0 @@
|
|||||||
drop table if exists gh_ost_test;
|
|
||||||
create table gh_ost_test (
|
|
||||||
id int auto_increment,
|
|
||||||
i int not null,
|
|
||||||
ts0 timestamp(6) default current_timestamp(6),
|
|
||||||
updated tinyint unsigned default 0,
|
|
||||||
primary key(id, ts0)
|
|
||||||
) auto_increment=1;
|
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
|
||||||
delimiter ;;
|
|
||||||
create event gh_ost_test
|
|
||||||
on schedule every 1 second
|
|
||||||
starts current_timestamp
|
|
||||||
ends current_timestamp + interval 60 second
|
|
||||||
on completion not preserve
|
|
||||||
enable
|
|
||||||
do
|
|
||||||
begin
|
|
||||||
insert into gh_ost_test values (null, 11, sysdate(6), 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 13, sysdate(6), 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 17, sysdate(6), 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 19, sysdate(6), 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 23, sysdate(6), 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 29, sysdate(6), 0);
|
|
||||||
insert into gh_ost_test values (null, 31, sysdate(6), 0);
|
|
||||||
insert into gh_ost_test values (null, 37, sysdate(6), 0);
|
|
||||||
insert into gh_ost_test values (null, 41, sysdate(6), 0);
|
|
||||||
delete from gh_ost_test where i = 31 order by id desc limit 1;
|
|
||||||
end ;;
|
|
@ -1,40 +0,0 @@
|
|||||||
drop table if exists gh_ost_test;
|
|
||||||
create table gh_ost_test (
|
|
||||||
id int auto_increment,
|
|
||||||
i int not null,
|
|
||||||
v varchar(128),
|
|
||||||
updated tinyint unsigned default 0,
|
|
||||||
primary key(id, v)
|
|
||||||
) auto_increment=1;
|
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
|
||||||
delimiter ;;
|
|
||||||
create event gh_ost_test
|
|
||||||
on schedule every 1 second
|
|
||||||
starts current_timestamp
|
|
||||||
ends current_timestamp + interval 60 second
|
|
||||||
on completion not preserve
|
|
||||||
enable
|
|
||||||
do
|
|
||||||
begin
|
|
||||||
insert into gh_ost_test values (null, 11, 'eleven', 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 13, 'thirteen', 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 17, 'seventeen', 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 19, 'nineteen', 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 23, 'twenty three', 0);
|
|
||||||
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 29, 'twenty nine', 0);
|
|
||||||
insert into gh_ost_test values (null, 31, 'thirty one', 0);
|
|
||||||
insert into gh_ost_test values (null, 37, 'thirty seven', 0);
|
|
||||||
insert into gh_ost_test values (null, 41, 'forty one', 0);
|
|
||||||
delete from gh_ost_test where i = 31 order by id desc limit 1;
|
|
||||||
end ;;
|
|
@ -7,6 +7,9 @@ create table gh_ost_test (
|
|||||||
primary key(id)
|
primary key(id)
|
||||||
) auto_increment=1;
|
) auto_increment=1;
|
||||||
|
|
||||||
|
insert into gh_ost_test values (null, 'átesting');
|
||||||
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial');
|
insert into gh_ost_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial');
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
drop event if exists gh_ost_test;
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
1
localtests/datetime-submillis/ignore_versions
Normal file
1
localtests/datetime-submillis/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
1
localtests/datetime-to-timestamp-pk-fail/ignore_versions
Normal file
1
localtests/datetime-to-timestamp-pk-fail/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
@ -1 +0,0 @@
|
|||||||
--allow-zero-in-date --alter="change column dt dt datetime not null default '1970-00-00 00:00:00'"
|
|
1
localtests/datetime/ignore_versions
Normal file
1
localtests/datetime/ignore_versions
Normal file
@ -0,0 +1 @@
|
|||||||
|
(5.5)
|
@ -1 +0,0 @@
|
|||||||
Percona
|
|
@ -1,26 +0,0 @@
|
|||||||
drop table if exists gh_ost_test;
|
|
||||||
create table gh_ost_test (
|
|
||||||
id int auto_increment,
|
|
||||||
i int not null,
|
|
||||||
e enum('red', 'green', 'blue', 'orange') null default null collate 'utf8_bin',
|
|
||||||
primary key(id)
|
|
||||||
) auto_increment=1;
|
|
||||||
|
|
||||||
insert into gh_ost_test values (null, 7, 'red');
|
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
|
||||||
delimiter ;;
|
|
||||||
create event gh_ost_test
|
|
||||||
on schedule every 1 second
|
|
||||||
starts current_timestamp
|
|
||||||
ends current_timestamp + interval 60 second
|
|
||||||
on completion not preserve
|
|
||||||
enable
|
|
||||||
do
|
|
||||||
begin
|
|
||||||
insert into gh_ost_test values (null, 11, 'red');
|
|
||||||
insert into gh_ost_test values (null, 13, 'green');
|
|
||||||
insert into gh_ost_test values (null, 17, 'blue');
|
|
||||||
set @last_insert_id := last_insert_id();
|
|
||||||
update gh_ost_test set e='orange' where id = @last_insert_id;
|
|
||||||
end ;;
|
|
@ -1 +0,0 @@
|
|||||||
--alter="change e e varchar(32) not null default ''"
|
|
@ -1 +0,0 @@
|
|||||||
--allow-zero-in-date --alter="engine=innodb"
|
|
@ -1 +0,0 @@
|
|||||||
Invalid default value for 'dt'
|
|
@ -1 +0,0 @@
|
|||||||
--alter="change column dt dt datetime not null default '1970-00-00 00:00:00'"
|
|
@ -1,21 +1,23 @@
|
|||||||
set session sql_mode='';
|
|
||||||
drop table if exists gh_ost_test;
|
drop table if exists gh_ost_test;
|
||||||
create table gh_ost_test (
|
create table gh_ost_test (
|
||||||
id int unsigned auto_increment,
|
id int auto_increment,
|
||||||
i int not null,
|
i int not null,
|
||||||
dt datetime not null default '1970-00-00 00:00:00',
|
color varchar(32),
|
||||||
primary key(id)
|
primary key(id)
|
||||||
) auto_increment=1;
|
) auto_increment=1;
|
||||||
|
|
||||||
|
insert into gh_ost_test values (null, 1, 'red');
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
drop event if exists gh_ost_test;
|
||||||
delimiter ;;
|
delimiter ;;
|
||||||
create event gh_ost_test
|
create event gh_ost_test
|
||||||
on schedule every 1 second
|
on schedule every 1 second
|
||||||
starts current_timestamp
|
starts current_timestamp + interval 3 second
|
||||||
ends current_timestamp + interval 60 second
|
ends current_timestamp + interval 60 second
|
||||||
on completion not preserve
|
on completion not preserve
|
||||||
enable
|
enable
|
||||||
do
|
do
|
||||||
begin
|
begin
|
||||||
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
|
insert into gh_ost_test values (null, 1, 'blue');
|
||||||
|
drop table if exists _gh_ost_test_ghc;
|
||||||
end ;;
|
end ;;
|
1
localtests/fail-drop-ghc-table-no-read/expect_failure
Normal file
1
localtests/fail-drop-ghc-table-no-read/expect_failure
Normal file
@ -0,0 +1 @@
|
|||||||
|
Error 1146: Table 'test._gh_ost_test_ghc' doesn't exist
|
1
localtests/fail-drop-ghc-table-no-read/extra_args
Normal file
1
localtests/fail-drop-ghc-table-no-read/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--throttle-query='select sleep(1)'
|
@ -1,21 +1,23 @@
|
|||||||
set session sql_mode='';
|
|
||||||
drop table if exists gh_ost_test;
|
drop table if exists gh_ost_test;
|
||||||
create table gh_ost_test (
|
create table gh_ost_test (
|
||||||
id int unsigned auto_increment,
|
id int auto_increment,
|
||||||
i int not null,
|
i int not null,
|
||||||
dt datetime not null default '1970-00-00 00:00:00',
|
color varchar(32),
|
||||||
primary key(id)
|
primary key(id)
|
||||||
) auto_increment=1;
|
) auto_increment=1;
|
||||||
|
|
||||||
|
insert into gh_ost_test values (null, 1, 'red');
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
drop event if exists gh_ost_test;
|
||||||
delimiter ;;
|
delimiter ;;
|
||||||
create event gh_ost_test
|
create event gh_ost_test
|
||||||
on schedule every 1 second
|
on schedule every 1 second
|
||||||
starts current_timestamp
|
starts current_timestamp + interval 3 second
|
||||||
ends current_timestamp + interval 60 second
|
ends current_timestamp + interval 60 second
|
||||||
on completion not preserve
|
on completion not preserve
|
||||||
enable
|
enable
|
||||||
do
|
do
|
||||||
begin
|
begin
|
||||||
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
|
insert into gh_ost_test values (null, 1, 'blue');
|
||||||
|
drop table if exists _gh_ost_test_ghc;
|
||||||
end ;;
|
end ;;
|
1
localtests/fail-drop-ghc-table/expect_failure
Normal file
1
localtests/fail-drop-ghc-table/expect_failure
Normal file
@ -0,0 +1 @@
|
|||||||
|
Error 1146: Table 'test._gh_ost_test_ghc' doesn't exist
|
@ -1,11 +1,13 @@
|
|||||||
drop table if exists gh_ost_test;
|
drop table if exists gh_ost_test;
|
||||||
create table gh_ost_test (
|
create table gh_ost_test (
|
||||||
id int unsigned auto_increment,
|
id int auto_increment,
|
||||||
i int not null,
|
i int not null,
|
||||||
dt datetime,
|
color varchar(32),
|
||||||
primary key(id)
|
primary key(id)
|
||||||
) auto_increment=1;
|
) auto_increment=1;
|
||||||
|
|
||||||
|
insert into gh_ost_test values (null, 1, 'blue');
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
drop event if exists gh_ost_test;
|
||||||
delimiter ;;
|
delimiter ;;
|
||||||
create event gh_ost_test
|
create event gh_ost_test
|
||||||
@ -16,5 +18,5 @@ create event gh_ost_test
|
|||||||
enable
|
enable
|
||||||
do
|
do
|
||||||
begin
|
begin
|
||||||
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
|
drop table if exists _gh_ost_test_gho;
|
||||||
end ;;
|
end ;;
|
1
localtests/fail-drop-gho-table-no-data/expect_failure
Normal file
1
localtests/fail-drop-gho-table-no-data/expect_failure
Normal file
@ -0,0 +1 @@
|
|||||||
|
Error 1146: Table 'test._gh_ost_test_gho' doesn't exist
|
1
localtests/fail-drop-gho-table-no-data/extra_args
Normal file
1
localtests/fail-drop-gho-table-no-data/extra_args
Normal file
@ -0,0 +1 @@
|
|||||||
|
--throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _gh_ost_test_ghc'
|
@ -1,11 +1,12 @@
|
|||||||
drop table if exists gh_ost_test;
|
drop table if exists gh_ost_test;
|
||||||
create table gh_ost_test (
|
create table gh_ost_test (
|
||||||
id int unsigned auto_increment,
|
id int auto_increment,
|
||||||
i int not null,
|
i int not null,
|
||||||
dt datetime,
|
color varchar(32),
|
||||||
primary key(id)
|
primary key(id)
|
||||||
) auto_increment=1;
|
) auto_increment=1;
|
||||||
|
|
||||||
|
|
||||||
drop event if exists gh_ost_test;
|
drop event if exists gh_ost_test;
|
||||||
delimiter ;;
|
delimiter ;;
|
||||||
create event gh_ost_test
|
create event gh_ost_test
|
||||||
@ -16,5 +17,6 @@ create event gh_ost_test
|
|||||||
enable
|
enable
|
||||||
do
|
do
|
||||||
begin
|
begin
|
||||||
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
|
insert into gh_ost_test values (null, 1, 'blue');
|
||||||
|
drop table if exists _gh_ost_test_gho;
|
||||||
end ;;
|
end ;;
|
1
localtests/fail-drop-gho-table/expect_failure
Normal file
1
localtests/fail-drop-gho-table/expect_failure
Normal file
@ -0,0 +1 @@
|
|||||||
|
Error 1146: Table 'test._gh_ost_test_gho' doesn't exist
|
@ -1 +0,0 @@
|
|||||||
Invalid default value for 'dt'
|
|
@ -1 +0,0 @@
|
|||||||
--alter="engine=innodb"
|
|
@ -1 +0,0 @@
|
|||||||
Percona
|
|
@ -1 +0,0 @@
|
|||||||
Percona
|
|
@ -1 +0,0 @@
|
|||||||
Percona
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user