Compare commits
4 Commits
master
...
utf8-in-la
Author | SHA1 | Date | |
---|---|---|---|
|
c636dfdd55 | ||
|
294c71c4d5 | ||
|
c6df7aa016 | ||
|
8b4a32cedd |
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -11,9 +11,11 @@ Related issue: https://github.com/github/gh-ost/issues/0123456789
|
||||
|
||||
### Description
|
||||
|
||||
This PR [briefly explain what it does]
|
||||
This PR [briefly explain what is does]
|
||||
|
||||
> In case this PR introduced Go code changes:
|
||||
|
||||
- [ ] contributed code is using same conventions as original code
|
||||
- [ ] `script/cibuild` returns with no formatting errors, build errors or unit test errors.
|
||||
- [ ] code is formatted via `gofmt` (please avoid `goimports`)
|
||||
- [ ] code is built via `./build.sh`
|
||||
- [ ] code is tested via `./test.sh`
|
||||
|
25
.github/workflows/ci.yml
vendored
25
.github/workflows/ci.yml
vendored
@ -1,25 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.17
|
||||
|
||||
- name: Build
|
||||
run: script/cibuild
|
||||
|
||||
- name: Upload gh-ost binary artifact
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: gh-ost
|
||||
path: bin/gh-ost
|
25
.github/workflows/codeql.yml
vendored
25
.github/workflows/codeql.yml
vendored
@ -1,25 +0,0 @@
|
||||
name: "CodeQL analysis"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
|
||||
jobs:
|
||||
codeql:
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
runs-on: ubuntu-latest # windows-latest and ubuntu-latest are supported. macos-latest is not supported at this time.
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
23
.github/workflows/golangci-lint.yml
vendored
23
.github/workflows/golangci-lint.yml
vendored
@ -1,23 +0,0 @@
|
||||
name: golangci-lint
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
permissions:
|
||||
contents: read
|
||||
# Optional: allow read access to pull request. Use with `only-new-issues` option.
|
||||
# pull-requests: read
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
- uses: actions/checkout@v3
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.46.2
|
24
.github/workflows/replica-tests.yml
vendored
24
.github/workflows/replica-tests.yml
vendored
@ -1,24 +0,0 @@
|
||||
name: migration tests
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
version: [mysql-5.7.25,mysql-8.0.16,PerconaServer-8.0.21]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.17
|
||||
|
||||
- name: migration tests
|
||||
env:
|
||||
TEST_MYSQL_VERSION: ${{ matrix.version }}
|
||||
run: script/cibuild-gh-ost-replica-tests
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,4 +2,3 @@
|
||||
/bin/
|
||||
/libexec/
|
||||
/.vendor/
|
||||
.idea/
|
||||
|
@ -1,30 +0,0 @@
|
||||
run:
|
||||
timeout: 5m
|
||||
linters:
|
||||
disable:
|
||||
- errcheck
|
||||
enable:
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- dogsled
|
||||
- durationcheck
|
||||
- errname
|
||||
- errorlint
|
||||
- execinquery
|
||||
- gofmt
|
||||
- ifshort
|
||||
- misspell
|
||||
- nilerr
|
||||
- nilnil
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nosprintfhostport
|
||||
- prealloc
|
||||
- rowserrcheck
|
||||
- sqlclosecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- wastedassign
|
||||
- whitespace
|
7
.travis.yml
Normal file
7
.travis.yml
Normal file
@ -0,0 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
script: ./test.sh
|
@ -1,20 +0,0 @@
|
||||
FROM golang:1.17
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y ruby ruby-dev rubygems build-essential
|
||||
RUN gem install --no-ri --no-rdoc fpm
|
||||
ENV GOPATH=/tmp/go
|
||||
|
||||
RUN apt-get install -y curl
|
||||
RUN apt-get install -y rsync
|
||||
RUN apt-get install -y gcc
|
||||
RUN apt-get install -y g++
|
||||
RUN apt-get install -y bash
|
||||
RUN apt-get install -y git
|
||||
RUN apt-get install -y tar
|
||||
RUN apt-get install -y rpm
|
||||
|
||||
RUN mkdir -p $GOPATH/src/github.com/github/gh-ost
|
||||
WORKDIR $GOPATH/src/github.com/github/gh-ost
|
||||
COPY . .
|
||||
RUN bash build.sh
|
@ -1,11 +0,0 @@
|
||||
FROM golang:1.17
|
||||
LABEL maintainer="github@github.com"
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y lsb-release
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY . /go/src/github.com/github/gh-ost
|
||||
WORKDIR /go/src/github.com/github/gh-ost
|
||||
|
||||
CMD ["script/test"]
|
16
README.md
16
README.md
@ -1,7 +1,5 @@
|
||||
# gh-ost
|
||||
|
||||
[![ci](https://github.com/github/gh-ost/actions/workflows/ci.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/ci.yml) [![replica-tests](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
|
||||
|
||||
#### GitHub's online schema migration for MySQL <img src="doc/images/gh-ost-logo-light-160.png" align="right">
|
||||
|
||||
`gh-ost` is a triggerless online schema migration solution for MySQL. It is testable and provides pausability, dynamic control/reconfiguration, auditing, and many operational perks.
|
||||
@ -60,12 +58,9 @@ More tips:
|
||||
Also see:
|
||||
|
||||
- [requirements and limitations](doc/requirements-and-limitations.md)
|
||||
- [common questions](doc/questions.md)
|
||||
- [what if?](doc/what-if.md)
|
||||
- [the fine print](doc/the-fine-print.md)
|
||||
- [Community questions](https://github.com/github/gh-ost/issues?q=label%3Aquestion)
|
||||
- [Using `gh-ost` on AWS RDS](doc/rds.md)
|
||||
- [Using `gh-ost` on Azure Database for MySQL](doc/azure.md)
|
||||
- [Questions](https://github.com/github/gh-ost/issues?q=label%3Aquestion)
|
||||
|
||||
## What's in a name?
|
||||
|
||||
@ -85,8 +80,6 @@ But then a rare genetic mutation happened, and the `c` transformed into `t`. And
|
||||
|
||||
We develop `gh-ost` at GitHub and for the community. We may have different priorities than others. From time to time we may suggest a contribution that is not on our immediate roadmap but which may appeal to others.
|
||||
|
||||
Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started developing with gh-ost.
|
||||
|
||||
## Download/binaries/source
|
||||
|
||||
`gh-ost` is now GA and stable.
|
||||
@ -95,9 +88,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
|
||||
|
||||
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
|
||||
|
||||
`gh-ost` is a Go project; it is built with Go `1.15` and above. To build on your own, use either:
|
||||
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
|
||||
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
|
||||
`gh-ost` is a Go project; it is built with Go 1.5 with "experimental vendor". Soon to migrate to Go 1.6. See and use [build file](https://github.com/github/gh-ost/blob/master/build.sh) for compiling it on your own.
|
||||
|
||||
Generally speaking, `master` branch is stable, but only [releases](https://github.com/github/gh-ost/releases) are to be used in production.
|
||||
|
||||
@ -108,6 +99,3 @@ Generally speaking, `master` branch is stable, but only [releases](https://githu
|
||||
- [@ggunson](https://github.com/ggunson)
|
||||
- [@tomkrouper](https://github.com/tomkrouper)
|
||||
- [@shlomi-noach](https://github.com/shlomi-noach)
|
||||
- [@jessbreckenridge](https://github.com/jessbreckenridge)
|
||||
- [@gtowey](https://github.com/gtowey)
|
||||
- [@timvaillancourt](https://github.com/timvaillancourt)
|
||||
|
@ -1 +1 @@
|
||||
1.1.2
|
||||
1.0.28
|
||||
|
89
build.sh
89
build.sh
@ -2,77 +2,36 @@
|
||||
#
|
||||
#
|
||||
|
||||
RELEASE_VERSION=
|
||||
buildpath=
|
||||
|
||||
function setuptree() {
|
||||
b=$( mktemp -d $buildpath/gh-ostXXXXXX ) || return 1
|
||||
mkdir -p $b/gh-ost
|
||||
mkdir -p $b/gh-ost/usr/bin
|
||||
echo $b
|
||||
}
|
||||
RELEASE_VERSION=$(cat RELEASE_VERSION)
|
||||
|
||||
function build {
|
||||
osname=$1
|
||||
osshort=$2
|
||||
GOOS=$3
|
||||
GOARCH=$4
|
||||
osname=$1
|
||||
osshort=$2
|
||||
GOOS=$3
|
||||
GOARCH=$4
|
||||
|
||||
if ! go version | egrep -q 'go1\.(1[5-9]|[2-9][0-9]{1})' ; then
|
||||
echo "go version must be 1.15 or above"
|
||||
exit 1
|
||||
fi
|
||||
echo "Building ${osname} binary"
|
||||
export GOOS
|
||||
export GOARCH
|
||||
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
|
||||
|
||||
echo "Building ${osname}-${GOARCH} binary"
|
||||
export GOOS
|
||||
export GOARCH
|
||||
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Build failed for ${osname}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Build failed for ${osname} ${GOARCH}."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${GOARCH}-${timestamp}.tar.gz $target)
|
||||
|
||||
# build RPM and deb for Linux, x86-64 only
|
||||
if [ "$GOOS" == "linux" ] && [ "$GOARCH" == "amd64" ] ; then
|
||||
echo "Creating Distro full packages"
|
||||
builddir=$(setuptree)
|
||||
cp $buildpath/$target $builddir/gh-ost/usr/bin
|
||||
cd $buildpath
|
||||
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" --rpm-os linux .
|
||||
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
|
||||
cd -
|
||||
fi
|
||||
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
|
||||
}
|
||||
|
||||
main() {
|
||||
if [ -z "${RELEASE_VERSION}" ] ; then
|
||||
RELEASE_VERSION=$(git describe --abbrev=0 --tags | tr -d 'v')
|
||||
fi
|
||||
if [ -z "${RELEASE_VERSION}" ] ; then
|
||||
RELEASE_VERSION=$(cat RELEASE_VERSION)
|
||||
fi
|
||||
buildpath=/tmp/gh-ost
|
||||
target=gh-ost
|
||||
timestamp=$(date "+%Y%m%d%H%M%S")
|
||||
ldflags="-X main.AppVersion=${RELEASE_VERSION}"
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
mkdir -p ${buildpath}
|
||||
build macOS osx darwin amd64
|
||||
build GNU/Linux linux linux amd64
|
||||
|
||||
buildpath=/tmp/gh-ost-release
|
||||
target=gh-ost
|
||||
timestamp=$(date "+%Y%m%d%H%M%S")
|
||||
ldflags="-X main.AppVersion=${RELEASE_VERSION}"
|
||||
|
||||
mkdir -p ${buildpath}
|
||||
rm -rf ${buildpath:?}/*
|
||||
build GNU/Linux linux linux amd64
|
||||
build GNU/Linux linux linux arm64
|
||||
build macOS osx darwin amd64
|
||||
build macOS osx darwin arm64
|
||||
|
||||
echo "Binaries found in:"
|
||||
find $buildpath/gh-ost* -type f -maxdepth 1
|
||||
|
||||
echo "Checksums:"
|
||||
(cd $buildpath && shasum -a256 gh-ost* 2>/dev/null)
|
||||
}
|
||||
|
||||
main "$@"
|
||||
echo "Binaries found in:"
|
||||
ls -1 $buildpath/gh-ost-binary*${timestamp}.tar.gz
|
||||
|
26
doc/azure.md
26
doc/azure.md
@ -1,26 +0,0 @@
|
||||
`gh-ost` has been updated to work with Azure Database for MySQL however due to GitHub does not use it, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
|
||||
|
||||
# Azure Database for MySQL
|
||||
|
||||
## Limitations
|
||||
|
||||
- `gh-ost` runs should be setup use [`--assume-rbr`][assume_rbr_docs] and use `binlog_row_image=FULL`.
|
||||
- Azure Database for MySQL does not use same user name suffix for master and replica, so master host, user and password need to be pointed out.
|
||||
|
||||
## Step
|
||||
1. Change the replica server's `binlog_row_image` from `MINIMAL` to `FULL`. See [guide](https://docs.microsoft.com/en-us/azure/mysql/howto-server-parameters) on Azure document.
|
||||
2. Use your `gh-ost` always with additional 5 parameter
|
||||
```{bash}
|
||||
gh-ost \
|
||||
--azure \
|
||||
--assume-master-host=master-server-dns-name \
|
||||
--master-user="master-user-name" \
|
||||
--master-password="master-password" \
|
||||
--assume-rbr \
|
||||
[-- other paramters you need]
|
||||
```
|
||||
|
||||
|
||||
[new_issue]: https://github.com/github/gh-ost/issues/new
|
||||
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
|
||||
[migrate_test_on_replica_docs]: https://github.com/github/gh-ost/blob/master/doc/cheatsheet.md#c-migratetest-on-replica
|
@ -1,13 +1,11 @@
|
||||
# Cheatsheet
|
||||
|
||||
### Operation modes
|
||||
|
||||
![operation modes](images/gh-ost-operation-modes.png)
|
||||
|
||||
|
||||
`gh-ost` operates by connecting to potentially multiple servers, as well as imposing itself as a replica in order to streamline binary log events directly from one of those servers. There are various operation modes, which depend on your setup, configuration, and where you want to run the migration.
|
||||
|
||||
#### a. Connect to replica, migrate on master
|
||||
### a. Connect to replica, migrate on master
|
||||
|
||||
This is the mode `gh-ost` expects by default. `gh-ost` will investigate the replica, crawl up to find the topology's master, and will hook onto it as well. Migration will:
|
||||
|
||||
@ -49,7 +47,7 @@ gh-ost \
|
||||
With `--execute`, migration actually copies data and flips tables. Without it this is a `noop` run.
|
||||
|
||||
|
||||
#### b. Connect to master
|
||||
### b. Connect to master
|
||||
|
||||
If you don't have replicas, or do not wish to use them, you are still able to operate directly on the master. `gh-ost` will do all operations directly on the master. You may still ask it to be considerate of replication lag.
|
||||
|
||||
@ -82,7 +80,7 @@ gh-ost \
|
||||
[--execute]
|
||||
```
|
||||
|
||||
#### c. Migrate/test on replica
|
||||
### c. Migrate/test on replica
|
||||
|
||||
This will perform a migration on the replica. `gh-ost` will briefly connect to the master but will thereafter perform all operations on the replica without modifying anything on the master.
|
||||
Throughout the operation, `gh-ost` will throttle such that the replica is up to date.
|
||||
@ -148,24 +146,8 @@ gh-ost --allow-master-master --assume-master-host=a.specific.master.com
|
||||
|
||||
Topologies using _tungsten replicator_ are peculiar in that the participating servers are not actually aware they are replicating. The _tungsten replicator_ looks just like another app issuing queries on those hosts. `gh-ost` is unable to identify that a server participates in a _tungsten_ topology.
|
||||
|
||||
If you choose to migrate directly on master (see above), there's nothing special you need to do.
|
||||
|
||||
If you choose to migrate via replica, then you need to make sure Tungsten is configured with log-slave-updates parameter (note this is different from MySQL's own log-slave-updates parameter), otherwise changes will not be in the replica's binlog, causing data to be corrupted after table swap. You must also supply the identity of the master, and indicate this is a tungsten setup, as follows:
|
||||
If you choose to migrate directly on master (see above), there's nothing special you need to do. If you choose to migrate via replica, then you must supply the identity of the master, and indicate this is a tungsten setup, as follows:
|
||||
|
||||
```
|
||||
gh-ost --tungsten --assume-master-host=the.topology.master.com
|
||||
```
|
||||
|
||||
Also note that `--switch-to-rbr` does not work for a Tungsten setup as the replication process is external, so you need to make sure `binlog_format` is set to ROW before Tungsten Replicator connects to the server and starts applying events from the master.
|
||||
|
||||
### Concurrent migrations
|
||||
|
||||
It is possible to run concurrent `gh-ost` migrations.
|
||||
|
||||
- Never on the exact same table.
|
||||
- If running on different replicas, (e.g. `table1` on `replica1` and `table2` on `replica2`) then no further configuration required.
|
||||
- If running from same server (binaries run on same server, regardless of which replica/replicas are used):
|
||||
- Make sure not to specify same `-serve-socket-file` (or let `gh-ost` pick one for you).
|
||||
- You may choose to use same `-throttle-flag-file` (preferably use `-throttle-additional-flag-file`, this is exactly the reason there's two, this latter file is for sharing).
|
||||
- You may choose to use same `-panic-flag-file`. This all depends on your flow and how you'd like to control your migrations.
|
||||
- If using same inspected box (either master or replica, `--host=everyone.uses.this.host`) then for each `gh-ost` process you must also provide a different, unique `--replica-server-id`. Optionally use process ID (`$$` in shell) ; but it's on you to choose a number that does not collide with another `gh-ost` or another running replica.
|
||||
|
@ -1,25 +0,0 @@
|
||||
# Getting started with gh-ost development.
|
||||
|
||||
## Overview
|
||||
|
||||
Getting started with gh-ost development is simple!
|
||||
|
||||
- First obtain the repository with `git clone` or `go get`.
|
||||
- From inside of the repository run `script/cibuild`.
|
||||
- This will bootstrap the environment if needed, format the code, build the code, and then run the unit test.
|
||||
|
||||
## CI build workflow
|
||||
|
||||
`script/cibuild` performs the following actions will bootstrap the environment to build `gh-ost` correctly, build, perform syntax checks and run unit tests.
|
||||
|
||||
If additional steps are needed, please add them into this workflow so that the workflow remains simple.
|
||||
|
||||
## `golang-ci` linter
|
||||
|
||||
To enfore best-practices, Pull Requests are automatically linted by [`golang-ci`](https://golangci-lint.run/). The linter config is located at [`.golangci.yml`](https://github.com/github/gh-ost/blob/master/.golangci.yml) and the `golangci-lint` GitHub Action is located at [`.github/workflows/golangci-lint.yml`](https://github.com/github/gh-ost/blob/master/.github/workflows/golangci-lint.yml).
|
||||
|
||||
To run the `golang-ci` linters locally _(recommended before push)_, use `script/lint`.
|
||||
|
||||
## Notes:
|
||||
|
||||
Currently, `script/ensure-go-installed` will install `go` for Mac OS X and Linux. We welcome PR's to add other platforms.
|
@ -2,22 +2,6 @@
|
||||
|
||||
A more in-depth discussion of various `gh-ost` command line flags: implementation, implication, use cases.
|
||||
|
||||
### aliyun-rds
|
||||
|
||||
Add this flag when executing on Aliyun RDS.
|
||||
|
||||
### allow-zero-in-date
|
||||
|
||||
Allows the user to make schema changes that include a zero date or zero in date (e.g. adding a `datetime default '0000-00-00 00:00:00'` column), even if global `sql_mode` on MySQL has `NO_ZERO_IN_DATE,NO_ZERO_DATE`.
|
||||
|
||||
### azure
|
||||
|
||||
Add this flag when executing on Azure Database for MySQL.
|
||||
|
||||
### allow-master-master
|
||||
|
||||
See [`--assume-master-host`](#assume-master-host).
|
||||
|
||||
### allow-on-master
|
||||
|
||||
By default, `gh-ost` would like you to connect to a replica, from where it figures out the master by itself. This wiring is required should your master execute using `binlog_format=STATEMENT`.
|
||||
@ -26,43 +10,24 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c
|
||||
|
||||
### approve-renamed-columns
|
||||
|
||||
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise, the new structure may also look like some column was dropped and another was added.
|
||||
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try an associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
|
||||
|
||||
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
|
||||
|
||||
If you think `gh-ost` is mistaken and that there's actually no _rename_ involved, you may pass [`--skip-renamed-columns`](#skip-renamed-columns) instead. This will cause `gh-ost` to disassociate the column values; data will not be copied between those columns.
|
||||
If you think `gh-ost` is mistaken and that there's actually no _rename_ involved, you may pass `--skip-renamed-columns` instead. This will cause `gh-ost` to disassociate the column values; data will not be copied between those columns.
|
||||
|
||||
### assume-master-host
|
||||
|
||||
`gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in:
|
||||
|
||||
- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters, and you prefer that it picks a specific one
|
||||
- _tungsten replicator_ topologies (together with [`--tungsten`](#tungsten)), where `gh-ost` is unable to crawl and detect the master
|
||||
- master-master topologies (together with `--allow-master-master`), where `gh-ost` can arbitrarily pick one of the co-master and you prefer that it picks a specific one
|
||||
- _tungsten replicator_ topologies (together with `--tungsten`), where `gh-ost` is unable to crawl and detect the master
|
||||
|
||||
### assume-rbr
|
||||
|
||||
If you happen to _know_ your servers use RBR (Row Based Replication, i.e. `binlog_format=ROW`), you may specify `--assume-rbr`. This skips a verification step where `gh-ost` would issue a `STOP SLAVE; START SLAVE`.
|
||||
Skipping this step means `gh-ost` would not need the `SUPER` privilege in order to operate.
|
||||
You may want to use this on Amazon RDS.
|
||||
|
||||
### attempt-instant-ddl
|
||||
|
||||
MySQL 8.0 supports "instant DDL" for some operations. If an alter statement can be completed with instant DDL, only a metadata change is required internally. Instant operations include:
|
||||
|
||||
- Adding a column
|
||||
- Dropping a column
|
||||
- Dropping an index
|
||||
- Extending a varchar column
|
||||
- Adding a virtual generated column
|
||||
|
||||
It is not reliable to parse the `ALTER` statement to determine if it is instant or not. This is because the table might be in an older row format, or have some other incompatibility that is difficult to identify.
|
||||
|
||||
`--attempt-instant-ddl` is disabled by default, but the risks of enabling it are relatively minor: `gh-ost` may need to acquire a metadata lock at the start of the operation. This is not a problem for most scenarios, but it could be a problem for users that start the DDL during a period with long running transactions.
|
||||
|
||||
`gh-ost` will automatically fallback to the normal DDL process if the attempt to use instant DDL is unsuccessful.
|
||||
|
||||
### binlogsyncer-max-reconnect-attempts
|
||||
`--binlogsyncer-max-reconnect-attempts=0`, the maximum number of attempts to re-establish a broken inspector connection for sync binlog. `0` or `negative number` means infinite retry, default `0`
|
||||
You may want to use this on Amazon RDS
|
||||
|
||||
### conf
|
||||
|
||||
@ -76,23 +41,13 @@ password=123456
|
||||
|
||||
### concurrent-rowcount
|
||||
|
||||
Defaults to `true`. See [`exact-rowcount`](#exact-rowcount)
|
||||
See `exact-rowcount`
|
||||
|
||||
### critical-load
|
||||
|
||||
Comma delimited status-name=threshold, same format as [`--max-load`](#max-load).
|
||||
### critical-load-interval-millis
|
||||
|
||||
`--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold.
|
||||
|
||||
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10-hour migration.
|
||||
|
||||
### critical-load-hibernate-seconds
|
||||
|
||||
When `--critical-load-hibernate-seconds` is non-zero (e.g. `--critical-load-hibernate-seconds=300`), `critical-load` does not panic and bail out; instead, `gh-ost` goes into hibernation for the specified duration. It will not read/write anything from/to any server during this time. Execution then continues upon waking from hibernation.
|
||||
|
||||
If `critical-load` is met again, `gh-ost` will repeat this cycle, and never panic and bail out.
|
||||
|
||||
### critical-load-interval-millis
|
||||
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10 hour migration.
|
||||
|
||||
When `--critical-load-interval-millis` is specified (e.g. `--critical-load-interval-millis=2500`), `gh-ost` gives a second chance: when it meets `critical-load` threshold, it doesn't bail out. Instead, it starts a timer (in this example: `2.5` seconds) and re-checks `critical-load` when the timer expires. If `critical-load` is met again, `gh-ost` panics and bails out. If not, execution continues.
|
||||
|
||||
@ -100,11 +55,7 @@ This is somewhat similar to a Nagios `n`-times test, where `n` in our case is al
|
||||
|
||||
### cut-over
|
||||
|
||||
Optional. Default is `safe`. See more discussion in [`cut-over`](cut-over.md)
|
||||
|
||||
### cut-over-lock-timeout-seconds
|
||||
|
||||
Default `3`. Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout).
|
||||
Optional. Default is `safe`. See more discussion in [cut-over](cut-over.md)
|
||||
|
||||
### discard-foreign-keys
|
||||
|
||||
@ -112,29 +63,16 @@ Default `3`. Max number of seconds to hold locks on tables while attempting to
|
||||
|
||||
At this time (10-2016) `gh-ost` does not support foreign keys on migrated tables (it bails out when it notices a FK on the migrated table). However, it is able to support _dropping_ of foreign keys via this flag. If you're trying to get rid of foreign keys in your environment, this is a useful flag.
|
||||
|
||||
See also: [`skip-foreign-key-checks`](#skip-foreign-key-checks)
|
||||
|
||||
|
||||
### dml-batch-size
|
||||
|
||||
`gh-ost` reads event from the binary log and applies them onto the _ghost_ table. It does so in batched writes: grouping multiple events to apply in a single transaction. This gives better write throughput as we don't need to sync the transaction log to disk for each event.
|
||||
|
||||
The `--dml-batch-size` flag controls the size of the batched write. Allowed values are `1 - 100`, where `1` means no batching (every event from the binary log is applied onto the _ghost_ table on its own transaction). Default value is `10`.
|
||||
|
||||
Why is this behavior configurable? Different workloads have different characteristics. Some workloads have very large writes, such that aggregating even `50` writes into a transaction makes for a significant transaction size. On other workloads write rate is high such that one just can't allow for a hundred more syncs to disk per second. The default value of `10` is a modest compromise that should probably work very well for most workloads. Your mileage may vary.
|
||||
|
||||
Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `gh-ost` blocks or waits on writes. The batch size is an upper limit on transaction size, not a minimal one. If `gh-ost` doesn't have "enough" events in the pipe, it does not wait on the binary log, it just writes what it already has. This conveniently suggests that if write load is light enough for `gh-ost` to only see a few events in the binary log at a given time, then it is also light enough for `gh-ost` to apply a fraction of the batch size.
|
||||
|
||||
### exact-rowcount
|
||||
|
||||
A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can and often will be, a large number. Exactly what that number is?
|
||||
A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can, and often be, a large number. Exactly what that number is?
|
||||
`gh-ost` initially estimates the number of rows in your table by issuing an `explain select * from your_table`. This will use statistics on your table and return with a rough estimate. How rough? It might go as low as half or as high as double the actual number of rows in your table. This is the same method as used in [`pt-online-schema-change`](https://www.percona.com/doc/percona-toolkit/2.2/pt-online-schema-change.html).
|
||||
|
||||
`gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen:
|
||||
- An initial, authoritative `select count(*) from your_table`.
|
||||
This query may take a long time to complete, but is performed before we begin the massive operations.
|
||||
When [`--concurrent-rowcount`](#concurrent-rowcount) is also specified, this runs in parallel to row copy.
|
||||
Note: [`--concurrent-rowcount`](#concurrent-rowcount) now defaults to `true`.
|
||||
When `--concurrent-rowcount` is also specified, this runs in parallel to row copy.
|
||||
Note: `--concurrent-rowcount` now defaults to `true`.
|
||||
- A continuous update to the estimate as we make progress applying events.
|
||||
We heuristically update the number of rows based on the queries we process from the binlogs.
|
||||
|
||||
@ -144,30 +82,6 @@ While the ongoing estimated number of rows is still heuristic, it's almost exact
|
||||
|
||||
Without this parameter, migration is a _noop_: testing table creation and validity of migration, but not touching data.
|
||||
|
||||
### force-named-cut-over
|
||||
|
||||
If given, a `cut-over` command must name the migrated table, or else ignored.
|
||||
|
||||
### force-named-panic
|
||||
|
||||
If given, a `panic` command must name the migrated table, or else ignored.
|
||||
|
||||
### force-table-names
|
||||
|
||||
Table name prefix to be used on the temporary tables.
|
||||
|
||||
### gcp
|
||||
|
||||
Add this flag when executing on a 1st generation Google Cloud Platform (GCP).
|
||||
|
||||
### heartbeat-interval-millis
|
||||
|
||||
Default 100. See [`subsecond-lag`](subsecond-lag.md) for details.
|
||||
|
||||
### hooks-status-interval
|
||||
|
||||
Defaults to 60 seconds. Configures how often the `gh-ost-on-status` hook is called, see [`hooks`](hooks.md) for full details on how to use hooks.
|
||||
|
||||
### initially-drop-ghost-table
|
||||
|
||||
`gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand.
|
||||
@ -176,119 +90,29 @@ We think `gh-ost` should not take chances or make assumptions about the user's t
|
||||
|
||||
### initially-drop-old-table
|
||||
|
||||
See [`initially-drop-ghost-table`](#initially-drop-ghost-table)
|
||||
|
||||
### initially-drop-socket-file
|
||||
|
||||
Default False. Should `gh-ost` forcibly delete an existing socket file. Be careful: this might drop the socket file of a running migration!
|
||||
See #initially-drop-ghost-table
|
||||
|
||||
### max-lag-millis
|
||||
|
||||
On a replication topology, this is perhaps the most important migration throttling factor: the maximum lag allowed for migration to work. If lag exceeds this value, migration throttles.
|
||||
|
||||
When using [Connect to replica, migrate on master](cheatsheet.md#a-connect-to-replica-migrate-on-master), this lag is primarily tested on the very replica `gh-ost` operates on. Lag is measured by checking the heartbeat events injected by `gh-ost` itself on the utility changelog table. That is, to measure this replica's lag, `gh-ost` doesn't need to issue `show slave status` nor have any external heartbeat mechanism.
|
||||
When using [Connect to replica, migrate on master](cheatsheet.md), this lag is primarily tested on the very replica `gh-ost` operates on. Lag is measured by checking the heartbeat events injected by `gh-ost` itself on the utility changelog table. That is, to measure this replica's lag, `gh-ost` doesn't need to issue `show slave status` nor have any external heartbeat mechanism.
|
||||
|
||||
When [`--throttle-control-replicas`](#throttle-control-replicas) is provided, throttling also considers lag on specified hosts. Lag measurements on listed hosts is done by querying `gh-ost`'s _changelog_ table, where `gh-ost` injects a heartbeat.
|
||||
When `--throttle-control-replicas` is provided, throttling also considers lag on specified hosts. Measuring lag on these hosts works as follows:
|
||||
|
||||
- If `--replication-lag-query` is provided, use the query, trust its result to indicate lag seconds (fraction, i.e. float, allowed)
|
||||
- Otherwise, issue `show slave status` and read `Seconds_behind_master` (`1sec` granularity)
|
||||
|
||||
See also: [Sub-second replication lag throttling](subsecond-lag.md)
|
||||
|
||||
### max-load
|
||||
|
||||
List of metrics and threshold values; topping the threshold of any will cause throttler to kick in. See also: [`throttling`](throttle.md#status-thresholds)
|
||||
|
||||
### migrate-on-replica
|
||||
|
||||
Typically `gh-ost` is used to migrate tables on a master. If you wish to only perform the migration in full on a replica, connect `gh-ost` to said replica and pass `--migrate-on-replica`. `gh-ost` will briefly connect to the master but otherwise will make no changes on the master. Migration will be fully executed on the replica, while making sure to maintain a small replication lag.
|
||||
|
||||
### postpone-cut-over-flag-file
|
||||
|
||||
Indicate a file name, such that the final [cut-over](cut-over.md) step does not take place as long as the file exists.
|
||||
When this flag is set, `gh-ost` expects the file to exist on startup, or else tries to create it. `gh-ost` exits with error if the file does not exist and `gh-ost` is unable to create it.
|
||||
With this flag set, the migration will cut-over upon deletion of the file or upon `cut-over` [interactive command](interactive-commands.md).
|
||||
|
||||
### replica-server-id
|
||||
|
||||
Defaults to 99999. If you run multiple migrations then you must provide a different, unique `--replica-server-id` for each `gh-ost` process.
|
||||
Optionally involve the process ID, for example: `--replica-server-id=$((1000000000+$$))`.
|
||||
|
||||
It's on you to choose a number that does not collide with another `gh-ost` or another running replica.
|
||||
See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet.
|
||||
|
||||
### serve-socket-file
|
||||
|
||||
Defaults to an auto-determined and advertised upon startup file. Defines Unix socket file to serve on.
|
||||
### skip-foreign-key-checks
|
||||
|
||||
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
|
||||
|
||||
### skip-strict-mode
|
||||
|
||||
By default `gh-ost` enforces STRICT_ALL_TABLES sql_mode as a safety measure. In some cases this changes the behaviour of other modes (namely ERROR_FOR_DIVISION_BY_ZERO, NO_ZERO_DATE, and NO_ZERO_IN_DATE) which may lead to errors during migration. Use `--skip-strict-mode` to explicitly tell `gh-ost` not to enforce this. **Danger** This may have some unexpected disastrous side effects.
|
||||
Typically `gh-ost` is used to migrate tables on a master. If you wish to only perform the migration in full on a replica, connect `gh-ost` to said replica and pass `--migrate-on-replica`. `gh-ost` will briefly connect to the master but other issue no changes on the master. Migration will be fully executed on the replica, while making sure to maintain a small replication lag.
|
||||
|
||||
### skip-renamed-columns
|
||||
|
||||
See [`approve-renamed-columns`](#approve-renamed-columns)
|
||||
|
||||
### ssl
|
||||
|
||||
By default `gh-ost` does not use ssl/tls connections to the database servers when performing migrations. This flag instructs `gh-ost` to use encrypted connections. If enabled, `gh-ost` will use the system's ca certificate pool for server certificate verification. If a different certificate is needed for server verification, see `--ssl-ca`. If you wish to skip server verification, but still use encrypted connections, use with `--ssl-allow-insecure`.
|
||||
|
||||
### ssl-allow-insecure
|
||||
|
||||
Allows `gh-ost` to connect to the MySQL servers using encrypted connections, but without verifying the validity of the certificate provided by the server during the connection. Requires `--ssl`.
|
||||
|
||||
### ssl-ca
|
||||
|
||||
`--ssl-ca=/path/to/ca-cert.pem`: ca certificate file (in PEM format) to use for server certificate verification. If specified, the default system ca cert pool will not be used for verification, only the ca cert provided here. Requires `--ssl`.
|
||||
|
||||
### ssl-cert
|
||||
|
||||
`--ssl-cert=/path/to/ssl-cert.crt`: SSL public key certificate file (in PEM format).
|
||||
|
||||
### ssl-key
|
||||
|
||||
`--ssl-key=/path/to/ssl-key.key`: SSL private key file (in PEM format).
|
||||
|
||||
### storage-engine
|
||||
Default is `innodb`, and `rocksdb` support is currently experimental. InnoDB and RocksDB are both transactional engines, supporting both shared and exclusive row locks.
|
||||
|
||||
But RocksDB currently lacks a few features support compared to InnoDB:
|
||||
- Gap Locks
|
||||
- Foreign Key
|
||||
- Generated Columns
|
||||
- Spatial
|
||||
- Geometry
|
||||
|
||||
When `--storage-engine=rocksdb`, `gh-ost` will make some changes necessary (e.g. sets isolation level to `READ_COMMITTED`) to support RocksDB.
|
||||
See `approve-renamed-columns`
|
||||
|
||||
### test-on-replica
|
||||
|
||||
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [`testing-on-replica`](testing-on-replica.md)
|
||||
|
||||
### test-on-replica-skip-replica-stop
|
||||
|
||||
Default `False`. When `--test-on-replica` is enabled, do not issue commands stop replication (requires `--test-on-replica`).
|
||||
|
||||
### throttle-control-replicas
|
||||
|
||||
Provide a command delimited list of replicas; `gh-ost` will throttle when any of the given replicas lag beyond [`--max-lag-millis`](#max-lag-millis). The list can be queried and updated dynamically via [interactive commands](interactive-commands.md)
|
||||
|
||||
### throttle-http
|
||||
|
||||
Provide an HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check.
|
||||
|
||||
### throttle-http-interval-millis
|
||||
|
||||
Defaults to 100. Configures the HTTP throttle check interval in milliseconds.
|
||||
|
||||
### throttle-http-timeout-millis
|
||||
|
||||
Defaults to 1000 (1 second). Configures the HTTP throttler check timeout in milliseconds.
|
||||
|
||||
### timestamp-old-table
|
||||
|
||||
Makes the _old_ table include a timestamp value. The _old_ table is what the original table is renamed to at the end of a successful migration. For example, if the table is `gh_ost_test`, then the _old_ table would normally be `_gh_ost_test_del`. With `--timestamp-old-table` it would be, for example, `_gh_ost_test_20170221103147_del`.
|
||||
|
||||
### tungsten
|
||||
|
||||
See [`tungsten`](cheatsheet.md#tungsten) on the cheatsheet.
|
||||
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [testing-on-replica](testing-on-replica.md)
|
||||
|
@ -44,7 +44,6 @@ The full list of supported hooks is best found in code: [hooks.go](https://githu
|
||||
- `gh-ost-on-interactive-command`
|
||||
- `gh-ost-on-row-copy-complete`
|
||||
- `gh-ost-on-stop-replication`
|
||||
- `gh-ost-on-start-replication`
|
||||
- `gh-ost-on-begin-postponed`
|
||||
- `gh-ost-on-before-cut-over`
|
||||
- `gh-ost-on-success`
|
||||
@ -65,17 +64,10 @@ The following variables are available on all hooks:
|
||||
- `GH_OST_ELAPSED_COPY_SECONDS` - row-copy time (excluding startup, row-count and postpone time)
|
||||
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
|
||||
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
|
||||
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
|
||||
- `GH_OST_HEARTBEAT_LAG` - lag in seconds (floating point) of heartbeat
|
||||
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
|
||||
- `GH_OST_ETA_SECONDS` - estimated duration until migration finishes in seconds
|
||||
- `GH_OST_MIGRATED_HOST`
|
||||
- `GH_OST_INSPECTED_HOST`
|
||||
- `GH_OST_EXECUTING_HOST`
|
||||
- `GH_OST_HOOKS_HINT` - copy of `--hooks-hint` value
|
||||
- `GH_OST_HOOKS_HINT_OWNER` - copy of `--hooks-hint-owner` value
|
||||
- `GH_OST_HOOKS_HINT_TOKEN` - copy of `--hooks-hint-token` value
|
||||
- `GH_OST_DRY_RUN` - whether or not the `gh-ost` run is a dry run
|
||||
|
||||
The following variable are available on particular hooks:
|
||||
|
||||
|
@ -17,11 +17,7 @@ Both interfaces may serve at the same time. Both respond to simple text command,
|
||||
- `help`: shows a brief list of available commands
|
||||
- `status`: returns a detailed status summary of migration progress and configuration
|
||||
- `sup`: returns a brief status summary of migration progress
|
||||
- `coordinates`: returns recent (though not exactly up to date) binary log coordinates of the inspected server
|
||||
- `applier`: returns the hostname of the applier
|
||||
- `inspector`: returns the hostname of the inspector
|
||||
- `chunk-size=<newsize>`: modify the `chunk-size`; applies on next running copy-iteration
|
||||
- `dml-batch-size=<newsize>`: modify the `dml-batch-size`; applies on next applying of binary log events
|
||||
- `max-lag-millis=<max-lag>`: modify the maximum replication lag threshold (milliseconds, minimum value is `100`, i.e. `0.1` second)
|
||||
- `max-load=<max-load-thresholds>`: modify the `max-load` config; applies on next running copy-iteration
|
||||
- The `max-load` format must be: `some_status=<numeric-threshold>[,some_status=<numeric-threshold>...]`'
|
||||
@ -30,12 +26,11 @@ Both interfaces may serve at the same time. Both respond to simple text command,
|
||||
- The `critical-load` format must be: `some_status=<numeric-threshold>[,some_status=<numeric-threshold>...]`'
|
||||
- For example: `Threads_running=1000,threads_connected=5000`, and you would then write/echo `critical-load=Threads_running=1000,threads_connected=5000` to the socket.
|
||||
- `nice-ratio=<ratio>`: change _nice_ ratio: 0 for aggressive (not nice, not sleeping), positive integer `n`:
|
||||
- For any `1ms` spent copying rows, spend `n*1ms` units of time sleeping.
|
||||
- Examples: assume a single rows chunk copy takes `100ms` to complete.
|
||||
- `nice-ratio=0.5` will cause `gh-ost` to sleep for `50ms` immediately following.
|
||||
- For any `1ms` spent copying rows, spend `n*1ms` units of time sleeping.
|
||||
- Examples: assume a single rows chunk copy takes `100ms` to complete.
|
||||
- `nice-ratio=0.5` will cause `gh-ost` to sleep for `50ms` immediately following.
|
||||
- `nice-ratio=1` will cause `gh-ost` to sleep for `100ms`, effectively doubling runtime
|
||||
- value of `2` will effectively triple the runtime; etc.
|
||||
- `throttle-http`: change throttle HTTP endpoint
|
||||
- `throttle-query`: change throttle query
|
||||
- `throttle-control-replicas='replica1,replica2'`: change list of throttle-control replicas, these are replicas `gh-ost` will check. This takes a comma separated list of replica's to check and replaces the previous list.
|
||||
- `throttle`: force migration suspend
|
||||
@ -43,10 +38,6 @@ Both interfaces may serve at the same time. Both respond to simple text command,
|
||||
- `unpostpone`: at a time where `gh-ost` is postponing the [cut-over](cut-over.md) phase, instruct `gh-ost` to stop postponing and proceed immediately to cut-over.
|
||||
- `panic`: immediately panic and abort operation
|
||||
|
||||
### Querying for data
|
||||
|
||||
For commands that accept an argument as value, pass `?` (question mark) to _get_ current value rather than _set_ a new one.
|
||||
|
||||
### Examples
|
||||
|
||||
While migration is running:
|
||||
@ -55,7 +46,7 @@ While migration is running:
|
||||
$ echo status | nc -U /tmp/gh-ost.test.sample_data_0.sock
|
||||
# Migrating `test`.`sample_data_0`; Ghost table is `test`.`_sample_data_0_gst`
|
||||
# Migration started at Tue Jun 07 11:45:16 +0200 2016
|
||||
# chunk-size: 200; max lag: 1500ms; dml-batch-size: 10; max-load: map[Threads_connected:20]
|
||||
# chunk-size: 200; max lag: 1500ms; max-load: map[Threads_connected:20]
|
||||
# Throttle additional flag file: /tmp/gh-ost.throttle
|
||||
# Serving on unix socket: /tmp/gh-ost.test.sample_data_0.sock
|
||||
# Serving on TCP port: 10001
|
||||
@ -66,17 +57,12 @@ Copy: 0/2915 0.0%; Applied: 0; Backlog: 0/100; Elapsed: 40s(copy), 41s(total); s
|
||||
$ echo "chunk-size=250" | nc -U /tmp/gh-ost.test.sample_data_0.sock
|
||||
# Migrating `test`.`sample_data_0`; Ghost table is `test`.`_sample_data_0_gst`
|
||||
# Migration started at Tue Jun 07 11:56:03 +0200 2016
|
||||
# chunk-size: 250; max lag: 1500ms; dml-batch-size: 10; max-load: map[Threads_connected:20]
|
||||
# chunk-size: 250; max lag: 1500ms; max-load: map[Threads_connected:20]
|
||||
# Throttle additional flag file: /tmp/gh-ost.throttle
|
||||
# Serving on unix socket: /tmp/gh-ost.test.sample_data_0.sock
|
||||
# Serving on TCP port: 10001
|
||||
```
|
||||
|
||||
```shell
|
||||
$ echo "chunk-size=?" | nc -U /tmp/gh-ost.test.sample_data_0.sock
|
||||
250
|
||||
```
|
||||
|
||||
```shell
|
||||
$ echo throttle | nc -U /tmp/gh-ost.test.sample_data_0.sock
|
||||
|
||||
|
@ -1,36 +0,0 @@
|
||||
# How?
|
||||
|
||||
### How does the cut-over work? Is it really atomic?
|
||||
|
||||
The cut-over phase, where the original table is swapped away, and the _ghost_ table takes its place, is an atomic, blocking, controlled operation.
|
||||
|
||||
- Atomic: the tables are swapped together. There is no gap where your table does not exist.
|
||||
- Blocking: all app queries involving the migrated (original) table are either operate on the original table, or are blocked, or proceed to operate on the _new_ table (formerly the _ghost_ table, now swapped in).
|
||||
- Controlled: the cut-over times out at pre-defined threshold, and is atomically aborted, then re-attempted. Cut-over only takes place when no lags are present, and otherwise no throttling reason is found. Cut-over step itself gets high priority and is never throttled.
|
||||
|
||||
Read more on [cut-over](cut-over.md) and on the [cut-over design Issue](https://github.com/github/gh-ost/issues/82)
|
||||
|
||||
|
||||
# Is it possible to?
|
||||
|
||||
### Is it possible to add a UNIQUE KEY?
|
||||
|
||||
Adding a `UNIQUE KEY` is possible, in the condition that no violation will occur. That is, you must make sure there aren't any violating rows on your table before, and during the migration.
|
||||
|
||||
At this time there is no equivalent to `ALTER IGNORE`, where duplicates are implicitly and silently thrown away. The MySQL `5.7` docs say:
|
||||
|
||||
> As of MySQL 5.7.4, the IGNORE clause for ALTER TABLE is removed and its use produces an error.
|
||||
|
||||
It is therefore unlikely that `gh-ost` will support this behavior.
|
||||
|
||||
### Run concurrent migrations?
|
||||
|
||||
Yes. TL;DR if running all on same replica/master, make sure to provide `--replica-server-id`. [Read more](cheatsheet.md#concurrent-migrations)
|
||||
|
||||
# Why
|
||||
|
||||
### Why Is the "Connect to Replica" mode preferred?
|
||||
|
||||
To avoid placing extra load on the master. `gh-ost` connects as a replication client. Each additional replica adds some load to the master.
|
||||
|
||||
To monitor replication lag from a replica. This makes the replication lag throttle, `--max-lag-millis`, more representative of the lag experienced by other replicas following the master (perhaps N levels deep in a tree of replicas).
|
55
doc/rds.md
55
doc/rds.md
@ -1,55 +0,0 @@
|
||||
`gh-ost` has been updated to work with Amazon RDS however due to GitHub not using AWS for databases, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
|
||||
|
||||
# Amazon RDS
|
||||
|
||||
## Limitations
|
||||
|
||||
- No `SUPER` privileges.
|
||||
- `gh-ost` runs should be setup use [`--assume-rbr`][assume_rbr_docs] and use `binlog_format=ROW`.
|
||||
- Aurora does not allow editing of the `read_only` parameter. While it is defined as `{TrueIfReplica}`, the parameter is non-modifiable field.
|
||||
|
||||
## Aurora
|
||||
|
||||
#### Replication
|
||||
|
||||
In Aurora replication, you have separate reader and writer endpoints however because the cluster shares the underlying storage layer, `gh-ost` will detect it is running on the master. This becomes an issue when you wish to use [migrate/test on replica][migrate_test_on_replica_docs] because you won't be able to use a single cluster in the same way you would with MySQL RDS.
|
||||
|
||||
To work around this, you can follow along the [AWS replication between clusters documentation][aws_replication_docs] for Aurora with one small caveat. For the "Create a Snapshot of Your Replication Master" step, the binlog position is not available in the AWS console. You will need to issue the SQL query `SHOW SLAVE STATUS` or `aws rds describe-events` API call to get the correct position.
|
||||
|
||||
#### Percona Toolkit
|
||||
|
||||
If you use `pt-table-checksum` as a part of your data integrity checks, you might want to check out [this patch][percona_toolkit_patch] which will enable you to run `pt-table-checksum` with the `--no-binlog-format-check` flag and prevent errors like the following:
|
||||
|
||||
```
|
||||
03-24T12:51:06 Failed to /*!50108 SET @@binlog_format := 'STATEMENT'*/: DBD::mysql::db do failed: Access denied; you need (at least one of) the SUPER privilege(s) for this operation [for Statement "/*!50108 SET @@binlog_format := 'STATEMENT'*/"] at pt-table-checksum line 9292.
|
||||
|
||||
This tool requires binlog_format=STATEMENT, but the current binlog_format is set to ROW and an error occurred while attempting to change it. If running MySQL 5.1.29 or newer, setting binlog_format requires the SUPER privilege. You will need to manually set binlog_format to 'STATEMENT' before running this tool.
|
||||
```
|
||||
|
||||
#### Binlog filtering
|
||||
|
||||
In Aurora, the [binlog filtering feature][aws_replication_docs_bin_log_filtering] is enabled by default. This becomes an issue when gh-ost tries to do the cut-over, because gh-ost waits for an entry in the binlog to proceed but this entry will never end up in the binlog because it gets filtered out by the binlog filtering feature.
|
||||
You need to turn this feature off during the migration process.
|
||||
Set the `aurora_enable_repl_bin_log_filtering` parameter to 0 in the Parameter Group for your cluster.
|
||||
When the migration is done, set it back to 1 (default).
|
||||
|
||||
|
||||
#### Preflight checklist
|
||||
|
||||
Before trying to run any `gh-ost` migrations you will want to confirm the following:
|
||||
|
||||
- [ ] You have a secondary cluster available that will act as a replica. Rule of thumb here has been a 1 instance per cluster to mimic MySQL-style replication as opposed to Aurora style.
|
||||
- [ ] The database instance parameters and database cluster parameters are consistent between your master and replicas
|
||||
- [ ] Executing `SHOW SLAVE STATUS\G` on your replica cluster displays the correct master host, binlog position, etc.
|
||||
- [ ] Database backup retention is greater than 1 day to enable binlogs
|
||||
- [ ] You have setup [`hooks`][ghost_hooks] to issue RDS procedures for stopping and starting replication. (see [github/gh-ost#163][ghost_rds_issue_tracking] for examples)
|
||||
- [ ] The parameter `aurora_enable_repl_bin_log_filtering` is set to 0
|
||||
|
||||
[new_issue]: https://github.com/github/gh-ost/issues/new
|
||||
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
|
||||
[migrate_test_on_replica_docs]: https://github.com/github/gh-ost/blob/master/doc/cheatsheet.md#c-migratetest-on-replica
|
||||
[aws_replication_docs]: http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Overview.Replication.MySQLReplication.html
|
||||
[percona_toolkit_patch]: https://github.com/jacobbednarz/percona-toolkit/commit/0271ba6a094da446a5e5bb8d99b5c26f1777f2b9
|
||||
[ghost_hooks]: https://github.com/github/gh-ost/blob/master/doc/hooks.md
|
||||
[ghost_rds_issue_tracking]: https://github.com/github/gh-ost/issues/163
|
||||
[aws_replication_docs_bin_log_filtering]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Replication.html#AuroraMySQL.Replication.Performance
|
@ -2,8 +2,6 @@
|
||||
|
||||
### Requirements
|
||||
|
||||
- `gh-ost` currently requires MySQL versions 5.7 and greater.
|
||||
|
||||
- You will need to have one server serving Row Based Replication (RBR) format binary logs. Right now `FULL` row image is supported. `MINIMAL` to be supported in the near future. `gh-ost` prefers to work with replicas. You may [still have your master configured with Statement Based Replication](migrating-with-sbr.md) (SBR).
|
||||
|
||||
- If you are using a replica, the table must have an identical schema between the master and replica.
|
||||
@ -20,40 +18,34 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
|
||||
- Switching your `binlog_format` to `ROW`, in the case where it is _not_ `ROW` and you explicitly specified `--switch-to-rbr`
|
||||
- If your replication is already in RBR (`binlog_format=ROW`) you can specify `--assume-rbr` to avoid the `STOP SLAVE/START SLAVE` operations, hence no need for `SUPER`.
|
||||
|
||||
- `gh-ost` uses the `REPEATABLE_READ` transaction isolation level for all MySQL connections, regardless of the server default.
|
||||
|
||||
- Running `--test-on-replica`: before the cut-over phase, `gh-ost` stops replication so that you can compare the two tables and satisfy that the migration is sound.
|
||||
|
||||
### Limitations
|
||||
|
||||
- Foreign key constraints are not supported. They may be supported in the future, to some extent.
|
||||
- Foreign keys not supported. They may be supported in the future, to some extent.
|
||||
|
||||
- Triggers are not supported. They may be supported in the future.
|
||||
|
||||
- MySQL 5.7 `JSON` columns are supported but not as part of `PRIMARY KEY`
|
||||
- MySQL 5.7 generated columns are not supported. They may be supported in the future.
|
||||
|
||||
- The two _before_ & _after_ tables must share a `PRIMARY KEY` or other `UNIQUE KEY`. This key will be used by `gh-ost` to iterate through the table rows when copying. [Read more](shared-key.md)
|
||||
- The migration key must not include columns with NULL values. This means either:
|
||||
1. The columns are `NOT NULL`, or
|
||||
2. The columns are nullable but don't contain any NULL values.
|
||||
- by default, `gh-ost` will not run if the only `UNIQUE KEY` includes nullable columns.
|
||||
- You may override this via `--allow-nullable-unique-key` but make sure there are no actual `NULL` values in those columns. Existing NULL values can't guarantee data integrity on the migrated table.
|
||||
- MySQL 5.7 `JSON` columns are not supported. They are likely to be supported shortly.
|
||||
|
||||
- The two _before_ & _after_ tables must share some `UNIQUE KEY`. Such key would be used by `gh-ost` to iterate the table.
|
||||
- As an example, if your table has a single `UNIQUE KEY` and no `PRIMARY KEY`, and you wish to replace it with a `PRIMARY KEY`, you will need two migrations: one to add the `PRIMARY KEY` (this migration will use the existing `UNIQUE KEY`), another to drop the now redundant `UNIQUE KEY` (this migration will use the `PRIMARY KEY`).
|
||||
|
||||
- The chosen migration key must not include columns with `NULL` values.
|
||||
- `gh-ost` will do its best to pick a migration key with non-nullable columns. It will by default refuse a migration where the only possible `UNIQUE KEY` includes nullable-columns. You may override this refusal via `--allow-nullable-unique-key` but **you must** be sure there are no actual `NULL` values in those columns. Such `NULL` values would cause a data integrity problem and potentially a corrupted migration.
|
||||
|
||||
- It is not allowed to migrate a table where another table exists with same name and different upper/lower case.
|
||||
- For example, you may not migrate `MyTable` if another table called `MYtable` exists in the same schema.
|
||||
|
||||
- Amazon RDS works, but has its own [limitations](rds.md).
|
||||
- Google Cloud SQL works, `--gcp` flag required.
|
||||
- Aliyun RDS works, `--aliyun-rds` flag required.
|
||||
- Azure Database for MySQL works, `--azure` flag required, and have detailed document about it. (azure.md)
|
||||
- Amazon RDS and Google Cloud SQL are currently not supported
|
||||
- We began working towards removing this limitation. See tracking issue: https://github.com/github/gh-ost/issues/163
|
||||
|
||||
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)
|
||||
|
||||
- Master-master setup is only supported in active-passive setup. Active-active (where table is being written to on both masters concurrently) is unsupported. It may be supported in the future.
|
||||
|
||||
- If you have an `enum` field as part of your migration key (typically the `PRIMARY KEY`), migration performance will be degraded and potentially bad. [Read more](https://github.com/github/gh-ost/pull/277#issuecomment-254811520)
|
||||
- If you have en `enum` field as part of your migration key (typically the `PRIMARY KEY`), migration performance will be degraded and potentially bad. [Read more](https://github.com/github/gh-ost/pull/277#issuecomment-254811520)
|
||||
|
||||
- Migrating a `FEDERATED` table is unsupported and is irrelevant to the problem `gh-ost` tackles.
|
||||
|
||||
- [Encrypted binary logs](https://www.percona.com/blog/2018/03/08/binlog-encryption-percona-server-mysql/) are not supported.
|
||||
- `ALTER TABLE ... RENAME TO some_other_name` is not supported (and you shouldn't use `gh-ost` for such a trivial operation).
|
||||
- Migrating a `FEDERATED` table is unsupported and is irrelevant to the problem `gh-ost` tackles.
|
||||
|
@ -1,86 +0,0 @@
|
||||
# Shared key
|
||||
|
||||
gh-ost requires for every migration that both the _before_ and _after_ versions of the table share the same unique not-null key columns. This page illustrates this rule.
|
||||
|
||||
### Introduction
|
||||
|
||||
Consider a simple migration, with a normal table,
|
||||
|
||||
```sql
|
||||
CREATE TABLE tbl (
|
||||
id bigint unsigned not null auto_increment,
|
||||
data varchar(255),
|
||||
more_data int,
|
||||
PRIMARY KEY(id)
|
||||
)
|
||||
```
|
||||
|
||||
and the migration `add column ts timestamp`. The _after_ table version would be:
|
||||
|
||||
```sql
|
||||
CREATE TABLE tbl (
|
||||
id bigint unsigned not null auto_increment,
|
||||
data varchar(255),
|
||||
more_data int,
|
||||
ts timestamp,
|
||||
PRIMARY KEY(id)
|
||||
)
|
||||
```
|
||||
|
||||
(This is also the definition of the _ghost_ table, except that that table would be called `_tbl_gho`).
|
||||
|
||||
In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tbl` onto `_tbl_gho`.
|
||||
|
||||
The applying of the binlog events is what requires the shared unique key. For example, an `UPDATE` statement to `tbl` translates to a `REPLACE` statement which `gh-ost` applies to `_tbl_gho`. A `REPLACE` statement expects to insert or replace an existing row based on its row's values and the table's unique key constraints. In particular, if inserting that row would result in a unique key violation (e.g., a row with that primary key already exists), it would _replace_ that existing row with the new values.
|
||||
|
||||
So `gh-ost` correlates `tbl` and `_tbl_gho` rows one to one using a unique key. In the above example that would be the `PRIMARY KEY`.
|
||||
|
||||
### Interpreting the rule
|
||||
|
||||
The _before_ and _after_ versions of the table share the same unique not-null key, but:
|
||||
- the key doesn't have to be the PRIMARY KEY
|
||||
- the key can have a different name between the _before_ and _after_ versions (e.g., renamed via DROP INDEX and ADD INDEX) so long as it contains the exact same column(s)
|
||||
|
||||
At the start of the migration, `gh-ost` inspects both the original and _ghost_ table it created, and attempts to find at least one such unique key (or rather, a set of columns) that is shared between the two. Typically this would just be the `PRIMARY KEY`, but some tables don't have primary keys, or sometimes it is the primary key that is being modified by the migration. In these cases `gh-ost` will look for other options.
|
||||
|
||||
`gh-ost` expects unique keys where no `NULL` values are found, i.e. all columns contained in the unique key are defined as `NOT NULL`. This is implicitly true for primary keys. If no such key can be found, `gh-ost` bails out.
|
||||
|
||||
If the table contains a unique key with nullable columns, but you know your columns contain no `NULL` values, use the `--allow-nullable-unique-key` option. The migration will run well as long as no `NULL` values are found in the unique key's columns. **Any actual `NULL`s may corrupt the migration.**
|
||||
|
||||
### Examples: Allowed and Not Allowed
|
||||
|
||||
```sql
|
||||
create table some_table (
|
||||
id int not null auto_increment,
|
||||
ts timestamp,
|
||||
name varchar(128) not null,
|
||||
owner_id int not null,
|
||||
loc_id int not null,
|
||||
primary key(id),
|
||||
unique key name_uidx(name)
|
||||
)
|
||||
```
|
||||
|
||||
Note the two unique, not-null indexes: the primary key and `name_uidx`.
|
||||
|
||||
Allowed migrations:
|
||||
|
||||
- `add column i int`
|
||||
- `add key owner_idx (owner_id)`
|
||||
- `add unique key owner_name_idx (owner_id, name)` - **be careful not to write conflicting rows while this migration runs**
|
||||
- `drop key name_uidx` - `primary key` is shared between the tables
|
||||
- `drop primary key, add primary key(owner_id, loc_id)` - `name_uidx` is shared between the tables
|
||||
- `change id bigint unsigned not null auto_increment` - the `primary key` changes datatype but not value, and can be used
|
||||
- `drop primary key, drop key name_uidx, add primary key(name), add unique key id_uidx(id)` - swapping the two keys. Either `id` or `name` could be used
|
||||
|
||||
Not allowed:
|
||||
|
||||
- `drop primary key, drop key name_uidx` - the _ghost_ table has no unique key
|
||||
- `drop primary key, drop key name_uidx, create primary key(name, owner_id)` - no shared columns to the unique keys on both tables. Even though `name` exists in the _ghost_ table's `primary key`, it is only part of the key and in itself does not guarantee uniqueness in the _ghost_ table.
|
||||
|
||||
|
||||
### Workarounds
|
||||
|
||||
If you need to change your primary key or only not-null unique index to use different columns, you will want to do it as two separate migrations:
|
||||
1. `ADD UNIQUE KEY temp_pk (temp_pk_column,...)`
|
||||
1. `DROP PRIMARY KEY, DROP KEY temp_pk, ADD PRIMARY KEY (temp_pk_column,...)`
|
@ -2,7 +2,7 @@
|
||||
|
||||
`gh-ost` is able to utilize sub-second replication lag measurements.
|
||||
|
||||
At GitHub, small replication lag is crucial, and we like to keep it below `1s` at all times.
|
||||
At GitHub, small replication lag is crucial, and we like to keep it below `1s` at all times. If you have similar concern, we strongly urge you to proceed to implement sub-second lag throttling.
|
||||
|
||||
`gh-ost` will do sub-second throttling when `--max-lag-millis` is smaller than `1000`, i.e. smaller than `1sec`.
|
||||
Replication lag is measured on:
|
||||
@ -10,10 +10,24 @@ Replication lag is measured on:
|
||||
- The "inspected" server (the server `gh-ost` connects to; replica is desired but not mandatory)
|
||||
- The `throttle-control-replicas` list
|
||||
|
||||
In both cases, `gh-ost` uses an internal heartbeat mechanism. It injects heartbeat events onto the utility changelog table, then reads those entries on replicas, and compares times. This measurement is on by default and by definition supports sub-second resolution.
|
||||
For the inspected server, `gh-ost` uses an internal heartbeat mechanism. It injects heartbeat events onto the utility changelog table, then reads those events in the binary log, and compares times. This measurement is by default and by definition sub-second enabled.
|
||||
|
||||
You can explicitly define how frequently will `gh-ost` inject heartbeat events, via `heartbeat-interval-millis`. You should set `heartbeat-interval-millis <= max-lag-millis`. It still works if not, but loses granularity and effect.
|
||||
|
||||
In earlier versions, the `--throttle-control-replicas` list was subjected to `1` second resolution or to 3rd party heartbeat injections such as `pt-heartbeat`. This is no longer the case. The argument `--replication-lag-query` has been deprecated and is no longer needed.
|
||||
On the `throttle-control-replicas`, `gh-ost` only issues SQL queries, and does not attempt to read the binary log stream. Perhaps those other replicas don't have binary logs in the first place.
|
||||
|
||||
Our production migrations use sub-second lag throttling and are able to keep our entire fleet of replicas well below `1sec` lag. We use `--heartbeat-interval-millis=100` on our production migrations with a `--max-lag-millis` value of between `300` and `500`.
|
||||
The standard way of getting replication lag on a replica is to issue `SHOW SLAVE STATUS`, then reading `Seconds_behind_master` value. But that value has a `1sec` granularity.
|
||||
|
||||
To be able to throttle on your production replicas fleet when replication lag exceeds a sub-second threshold, you must provide with a `replication-lag-query` that returns a sub-second resolution lag.
|
||||
|
||||
As a common example, many use [pt-heartbeat](https://www.percona.com/doc/percona-toolkit/2.2/pt-heartbeat.html) to inject heartbeat events on the master. You would issue something like:
|
||||
|
||||
/usr/bin/pt-heartbeat -- -D your_schema --create-table --update --replace --interval=0.1 --daemonize --pid ...
|
||||
|
||||
Note `--interval=0.1` to indicate `10` heartbeats per second.
|
||||
|
||||
You would then provide
|
||||
|
||||
gh-ost ... --replication-lag-query="select unix_timestamp(now(6)) - unix_timestamp(ts) as ghost_lag_check from your_schema.heartbeat order by ts desc limit 1"
|
||||
|
||||
Our production migrations use sub-second lag throttling and are able to keep our entire fleet of replicas well below `1sec` lag.
|
||||
|
@ -28,7 +28,15 @@ Otherwise you may specify your own list of replica servers you wish it to observ
|
||||
|
||||
- `--max-lag-millis`: maximum allowed lag; any controlled replica lagging more than this value will cause throttling to kick in. When all control replicas have smaller lag than indicated, operation resumes.
|
||||
|
||||
Note that you may dynamically change both `--max-lag-millis` and the `throttle-control-replicas` list via [interactive commands](interactive-commands.md)
|
||||
- `--replication-lag-query`: `gh-ost` will, by default, issue a `show slave status` query to find replication lag. However, this is a notoriously flaky value. If you're using your own `heartbeat` mechanism, e.g. via [`pt-heartbeat`](https://www.percona.com/doc/percona-toolkit/2.2/pt-heartbeat.html), you may provide your own custom query to return a single decimal (floating point) value indicating replication lag.
|
||||
|
||||
Example: `--replication-lag-query="SELECT UNIX_TIMESTAMP() - MAX(UNIX_TIMESTAMP(ts)) AS lag FROM mydb.heartbeat"`
|
||||
|
||||
We encourage you to use [sub-second replication lag throttling](subsecond-lag.md). Your query may then look like:
|
||||
|
||||
`--replication-lag-query="SELECT UNIX_TIMESTAMP(6) - MAX(UNIX_TIMESTAMP(ts)) AS lag FROM mydb.heartbeat"`
|
||||
|
||||
Note that you may dynamically change both `replication-lag-query` and the `throttle-control-replicas` list via [interactive commands](interactive-commands.md)
|
||||
|
||||
#### Status thresholds
|
||||
|
||||
@ -38,7 +46,7 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
|
||||
|
||||
`--max-load='Threads_running=100,Threads_connected=500'`
|
||||
|
||||
Metrics must be valid, numeric [status variables](https://dev.mysql.com/doc/refman/5.7/en/server-status-variables.html)
|
||||
Metrics must be valid, numeric [status variables](http://dev.mysql.com/doc/refman/5.6/en/server-status-variables.html)
|
||||
|
||||
#### Throttle query
|
||||
|
||||
@ -46,14 +54,6 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
|
||||
|
||||
An example query could be: `--throttle-query="select hour(now()) between 8 and 17"` which implies throttling auto-starts `8:00am` and migration auto-resumes at `18:00pm`.
|
||||
|
||||
#### HTTP Throttle
|
||||
|
||||
The `--throttle-http` flag allows for throttling via HTTP. Every 100ms `gh-ost` issues a `HEAD` request to the provided URL. If the response status code is not `200` throttling will kick in until a `200` response status code is returned.
|
||||
|
||||
If no URL is provided or the URL provided doesn't contain the scheme then the HTTP check will be disabled. For example `--throttle-http="http://1.2.3.4:6789/throttle"` will enable the HTTP check/throttling, but `--throttle-http="1.2.3.4:6789/throttle"` will not.
|
||||
|
||||
The URL can be queried and updated dynamically via [interactive interface](interactive-commands.md).
|
||||
|
||||
#### Manual control
|
||||
|
||||
In addition to the above, you are able to take control and throttle the operation any time you like.
|
||||
@ -97,7 +97,7 @@ Copy: 0/2915 0.0%; Applied: 0; Backlog: 0/100; Elapsed: 42s(copy), 42s(total); s
|
||||
|
||||
Throttling time is limited by the availability of the binary logs. When throttling begins, `gh-ost` suspends reading the binary logs, and expects to resume reading from same binary log where it paused.
|
||||
|
||||
Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
|
||||
Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
|
||||
|
||||
Having said that, throttling for so long is far fetching, in that the `gh-ost` process itself must be kept alive during that time; and the amount of binary logs to process once it resumes will potentially take days to replay.
|
||||
|
||||
|
@ -112,7 +112,7 @@ It is also interesting to observe that `gh-ost` is the only application writing
|
||||
|
||||
When `gh-ost` pauses (throttles), it issues no writes on the ghost table. Because there are no triggers, write workload is decoupled from the `gh-ost` write workload. And because we're using an asynchronous approach, the algorithm already handles a time difference between a master write time and the ghost apply time. A difference of a few microseconds is no different from a difference of minutes or hours.
|
||||
|
||||
When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or an explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
|
||||
When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or and explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
|
||||
An exception is the ongoing heartbeat writes onto the changelog table, which we consider to be negligible.
|
||||
|
||||
#### Testability
|
||||
|
@ -24,15 +24,15 @@ Initial output lines may look like this:
|
||||
2016-05-19 17:57:11 INFO connection validated on 127.0.0.1:3306
|
||||
2016-05-19 17:57:11 INFO rotate to next log name: mysql-bin.002587
|
||||
2016-05-19 17:57:11 INFO connection validated on 127.0.0.1:3306
|
||||
2016-05-19 17:57:11 INFO Dropping table `mydb`.`_mytable_gst`
|
||||
2016-05-19 17:57:11 INFO Droppping table `mydb`.`_mytable_gst`
|
||||
2016-05-19 17:57:11 INFO Table dropped
|
||||
2016-05-19 17:57:11 INFO Dropping table `mydb`.`_mytable_old`
|
||||
2016-05-19 17:57:11 INFO Droppping table `mydb`.`_mytable_old`
|
||||
2016-05-19 17:57:11 INFO Table dropped
|
||||
2016-05-19 17:57:11 INFO Creating ghost table `mydb`.`_mytable_gst`
|
||||
2016-05-19 17:57:11 INFO Ghost table created
|
||||
2016-05-19 17:57:11 INFO Altering ghost table `mydb`.`_mytable_gst`
|
||||
2016-05-19 17:57:11 INFO Ghost table altered
|
||||
2016-05-19 17:57:11 INFO Dropping table `mydb`.`_mytable_osc`
|
||||
2016-05-19 17:57:11 INFO Droppping table `mydb`.`_mytable_osc`
|
||||
2016-05-19 17:57:11 INFO Table dropped
|
||||
2016-05-19 17:57:11 INFO Creating changelog table `mydb`.`_mytable_osc`
|
||||
2016-05-19 17:57:11 INFO Changelog table created
|
||||
|
@ -7,7 +7,7 @@ Existing MySQL schema migration tools:
|
||||
- [LHM](https://github.com/soundcloud/lhm)
|
||||
- [oak-online-alter-table](https://github.com/shlomi-noach/openarkkit)
|
||||
|
||||
are all using [triggers](https://dev.mysql.com/doc/refman/5.7/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
|
||||
are all using [triggers](http://dev.mysql.com/doc/refman/5.6/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
|
||||
|
||||
Use of triggers simplifies a lot of the flow in doing a live table migration, but also poses some limitations or difficulties. Here are reasons why we choose to [design a triggerless solution](triggerless-design.md) to schema migrations.
|
||||
|
||||
@ -16,7 +16,7 @@ Use of triggers simplifies a lot of the flow in doing a live table migration, bu
|
||||
|
||||
Triggers are stored routines which are invoked on a per-row operation upon `INSERT`, `DELETE`, `UPDATE` on a table.
|
||||
They were introduced in MySQL `5.0`.
|
||||
A trigger may contain a set of queries, and these queries run in the same transaction space as the query that manipulates the table. This makes for an atomicity of both the original operation on the table and the trigger-invoked operations.
|
||||
A trigger may contain a set of queries, and these queries run in the same transaction space as the query that manipulates the table. This makes for an atomicy of both the original operation on the table and the trigger-invoked operations.
|
||||
|
||||
### Triggers, overhead
|
||||
|
||||
|
@ -1,7 +0,0 @@
|
||||
version: "3.5"
|
||||
services:
|
||||
app:
|
||||
image: app
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.test
|
27
go.mod
27
go.mod
@ -1,27 +0,0 @@
|
||||
module github.com/github/gh-ost
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/go-ini/ini v1.62.0
|
||||
github.com/go-mysql-org/go-mysql v1.3.0
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/openark/golib v0.0.0-20210531070646-355f37940af8
|
||||
github.com/satori/go.uuid v1.2.0
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
|
||||
golang.org/x/text v0.3.6
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
|
||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
|
||||
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 // indirect
|
||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
)
|
136
go.sum
136
go.sum
@ -1,136 +0,0 @@
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
|
||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
|
||||
github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM=
|
||||
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
|
||||
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
|
||||
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/go-ini/ini v1.62.0 h1:7VJT/ZXjzqSrvtraFp4ONq80hTcRQth1c9ZnQ3uNQvU=
|
||||
github.com/go-ini/ini v1.62.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-mysql-org/go-mysql v1.3.0 h1:lpNqkwdPzIrYSZGdqt8HIgAXZaK6VxBNfr8f7Z4FgGg=
|
||||
github.com/go-mysql-org/go-mysql v1.3.0/go.mod h1:3lFZKf7l95Qo70+3XB2WpiSf9wu2s3na3geLMaIIrqQ=
|
||||
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/openark/golib v0.0.0-20210531070646-355f37940af8 h1:9ciIHNuyFqRWi9NpMNw9sVLB6z1ItpP5ZhTY9Q1xVu4=
|
||||
github.com/openark/golib v0.0.0-20210531070646-355f37940af8/go.mod h1:1jj8x1eDVZxgc/Z4VyamX4qTbAdHPUQA6NeVtCd8Sl8=
|
||||
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
|
||||
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
|
||||
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pingcap/errors v0.11.5-0.20201029093017-5a7df2af2ac7/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
|
||||
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 h1:LllgC9eGfqzkfubMgjKIDyZYaa609nNWAyNZtpy2B3M=
|
||||
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
|
||||
github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
|
||||
github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
|
||||
github.com/pingcap/parser v0.0.0-20210415081931-48e7f467fd74/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
|
||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
|
||||
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 h1:oI+RNwuC9jF2g2lP0u0cVEEZrc/AYBCuFdvwrLWM/6Q=
|
||||
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07/go.mod h1:yFdBgwXP24JziuRl2NMUahT7nGLNOKi1SIiFxMttVD4=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -7,7 +7,6 @@ package base
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
@ -15,13 +14,11 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
uuid "github.com/satori/go.uuid"
|
||||
|
||||
"github.com/github/gh-ost/go/mysql"
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
"github.com/openark/golib/log"
|
||||
|
||||
"github.com/go-ini/ini"
|
||||
"gopkg.in/gcfg.v1"
|
||||
gcfgscanner "gopkg.in/gcfg.v1/scanner"
|
||||
)
|
||||
|
||||
// RowsEstimateMethod is the type of row number estimation
|
||||
@ -29,29 +26,22 @@ type RowsEstimateMethod string
|
||||
|
||||
const (
|
||||
TableStatusRowsEstimate RowsEstimateMethod = "TableStatusRowsEstimate"
|
||||
ExplainRowsEstimate RowsEstimateMethod = "ExplainRowsEstimate"
|
||||
CountRowsEstimate RowsEstimateMethod = "CountRowsEstimate"
|
||||
ExplainRowsEstimate = "ExplainRowsEstimate"
|
||||
CountRowsEstimate = "CountRowsEstimate"
|
||||
)
|
||||
|
||||
type CutOver int
|
||||
|
||||
const (
|
||||
CutOverAtomic CutOver = iota
|
||||
CutOverTwoStep
|
||||
CutOverAtomic CutOver = iota
|
||||
CutOverTwoStep = iota
|
||||
)
|
||||
|
||||
type ThrottleReasonHint string
|
||||
|
||||
const (
|
||||
NoThrottleReasonHint ThrottleReasonHint = "NoThrottleReasonHint"
|
||||
UserCommandThrottleReasonHint ThrottleReasonHint = "UserCommandThrottleReasonHint"
|
||||
LeavingHibernationThrottleReasonHint ThrottleReasonHint = "LeavingHibernationThrottleReasonHint"
|
||||
)
|
||||
|
||||
const (
|
||||
HTTPStatusOK = 200
|
||||
MaxEventsBatchSize = 1000
|
||||
ETAUnknown = math.MinInt64
|
||||
NoThrottleReasonHint ThrottleReasonHint = "NoThrottleReasonHint"
|
||||
UserCommandThrottleReasonHint = "UserCommandThrottleReasonHint"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -75,76 +65,48 @@ func NewThrottleCheckResult(throttle bool, reason string, reasonHint ThrottleRea
|
||||
// MigrationContext has the general, global state of migration. It is used by
|
||||
// all components throughout the migration process.
|
||||
type MigrationContext struct {
|
||||
Uuid string
|
||||
DatabaseName string
|
||||
OriginalTableName string
|
||||
AlterStatement string
|
||||
|
||||
DatabaseName string
|
||||
OriginalTableName string
|
||||
AlterStatement string
|
||||
AlterStatementOptions string // anything following the 'ALTER TABLE [schema.]table' from AlterStatement
|
||||
|
||||
countMutex sync.Mutex
|
||||
countTableRowsCancelFunc func()
|
||||
CountTableRows bool
|
||||
ConcurrentCountTableRows bool
|
||||
AllowedRunningOnMaster bool
|
||||
AllowedMasterMaster bool
|
||||
SwitchToRowBinlogFormat bool
|
||||
AssumeRBR bool
|
||||
SkipForeignKeyChecks bool
|
||||
SkipStrictMode bool
|
||||
AllowZeroInDate bool
|
||||
NullableUniqueKeyAllowed bool
|
||||
ApproveRenamedColumns bool
|
||||
SkipRenamedColumns bool
|
||||
IsTungsten bool
|
||||
DiscardForeignKeys bool
|
||||
AliyunRDS bool
|
||||
GoogleCloudPlatform bool
|
||||
AzureMySQL bool
|
||||
AttemptInstantDDL bool
|
||||
|
||||
config ContextConfig
|
||||
configMutex *sync.Mutex
|
||||
ConfigFile string
|
||||
CliUser string
|
||||
CliPassword string
|
||||
UseTLS bool
|
||||
TLSAllowInsecure bool
|
||||
TLSCACertificate string
|
||||
TLSCertificate string
|
||||
TLSKey string
|
||||
CliMasterUser string
|
||||
CliMasterPassword string
|
||||
config ContextConfig
|
||||
configMutex *sync.Mutex
|
||||
ConfigFile string
|
||||
CliUser string
|
||||
CliPassword string
|
||||
|
||||
HeartbeatIntervalMilliseconds int64
|
||||
defaultNumRetries int64
|
||||
ChunkSize int64
|
||||
niceRatio float64
|
||||
MaxLagMillisecondsThrottleThreshold int64
|
||||
replicationLagQuery string
|
||||
throttleControlReplicaKeys *mysql.InstanceKeyMap
|
||||
ThrottleFlagFile string
|
||||
ThrottleAdditionalFlagFile string
|
||||
throttleQuery string
|
||||
throttleHTTP string
|
||||
IgnoreHTTPErrors bool
|
||||
ThrottleCommandedByUser int64
|
||||
HibernateUntil int64
|
||||
maxLoad LoadMap
|
||||
criticalLoad LoadMap
|
||||
CriticalLoadIntervalMilliseconds int64
|
||||
CriticalLoadHibernateSeconds int64
|
||||
PostponeCutOverFlagFile string
|
||||
CutOverLockTimeoutSeconds int64
|
||||
CutOverExponentialBackoff bool
|
||||
ExponentialBackoffMaxInterval int64
|
||||
ForceNamedCutOverCommand bool
|
||||
ForceNamedPanicCommand bool
|
||||
PanicFlagFile string
|
||||
HooksPath string
|
||||
HooksHintMessage string
|
||||
HooksHintOwner string
|
||||
HooksHintToken string
|
||||
HooksStatusIntervalSec int64
|
||||
|
||||
DropServeSocket bool
|
||||
ServeSocketFile string
|
||||
@ -157,9 +119,7 @@ type MigrationContext struct {
|
||||
OkToDropTable bool
|
||||
InitiallyDropOldTable bool
|
||||
InitiallyDropGhostTable bool
|
||||
TimestampOldTable bool // Should old table name include a timestamp
|
||||
CutOverType CutOver
|
||||
ReplicaServerId uint
|
||||
|
||||
Hostname string
|
||||
AssumeMasterHostname string
|
||||
@ -172,9 +132,7 @@ type MigrationContext struct {
|
||||
OriginalBinlogFormat string
|
||||
OriginalBinlogRowImage string
|
||||
InspectorConnectionConfig *mysql.ConnectionConfig
|
||||
InspectorMySQLVersion string
|
||||
ApplierConnectionConfig *mysql.ConnectionConfig
|
||||
ApplierMySQLVersion string
|
||||
StartTime time.Time
|
||||
RowCopyStartTime time.Time
|
||||
RowCopyEndTime time.Time
|
||||
@ -183,75 +141,39 @@ type MigrationContext struct {
|
||||
RenameTablesEndTime time.Time
|
||||
pointOfInterestTime time.Time
|
||||
pointOfInterestTimeMutex *sync.Mutex
|
||||
lastHeartbeatOnChangelogTime time.Time
|
||||
lastHeartbeatOnChangelogMutex *sync.Mutex
|
||||
CurrentLag int64
|
||||
currentProgress uint64
|
||||
etaNanoseonds int64
|
||||
ThrottleHTTPIntervalMillis int64
|
||||
ThrottleHTTPStatusCode int64
|
||||
ThrottleHTTPTimeoutMillis int64
|
||||
controlReplicasLagResult mysql.ReplicationLagResult
|
||||
TotalRowsCopied int64
|
||||
TotalDMLEventsApplied int64
|
||||
DMLBatchSize int64
|
||||
isThrottled bool
|
||||
throttleReason string
|
||||
throttleReasonHint ThrottleReasonHint
|
||||
throttleGeneralCheckResult ThrottleCheckResult
|
||||
throttleMutex *sync.Mutex
|
||||
throttleHTTPMutex *sync.Mutex
|
||||
IsPostponingCutOver int64
|
||||
CountingRowsFlag int64
|
||||
AllEventsUpToLockProcessedInjectedFlag int64
|
||||
CleanupImminentFlag int64
|
||||
UserCommandedUnpostponeFlag int64
|
||||
CutOverCompleteFlag int64
|
||||
InCutOverCriticalSectionFlag int64
|
||||
PanicAbort chan error
|
||||
|
||||
OriginalTableColumnsOnApplier *sql.ColumnList
|
||||
OriginalTableColumns *sql.ColumnList
|
||||
OriginalTableVirtualColumns *sql.ColumnList
|
||||
OriginalTableUniqueKeys [](*sql.UniqueKey)
|
||||
OriginalTableAutoIncrement uint64
|
||||
GhostTableColumns *sql.ColumnList
|
||||
GhostTableVirtualColumns *sql.ColumnList
|
||||
GhostTableUniqueKeys [](*sql.UniqueKey)
|
||||
UniqueKey *sql.UniqueKey
|
||||
SharedColumns *sql.ColumnList
|
||||
ColumnRenameMap map[string]string
|
||||
DroppedColumnsMap map[string]bool
|
||||
MappedSharedColumns *sql.ColumnList
|
||||
MigrationRangeMinValues *sql.ColumnValues
|
||||
MigrationRangeMaxValues *sql.ColumnValues
|
||||
Iteration int64
|
||||
MigrationIterationRangeMinValues *sql.ColumnValues
|
||||
MigrationIterationRangeMaxValues *sql.ColumnValues
|
||||
ForceTmpTableName string
|
||||
|
||||
recentBinlogCoordinates mysql.BinlogCoordinates
|
||||
|
||||
BinlogSyncerMaxReconnectAttempts int
|
||||
|
||||
Log Logger
|
||||
}
|
||||
|
||||
type Logger interface {
|
||||
Debug(args ...interface{})
|
||||
Debugf(format string, args ...interface{})
|
||||
Info(args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Warning(args ...interface{}) error
|
||||
Warningf(format string, args ...interface{}) error
|
||||
Error(args ...interface{}) error
|
||||
Errorf(format string, args ...interface{}) error
|
||||
Errore(err error) error
|
||||
Fatal(args ...interface{}) error
|
||||
Fatalf(format string, args ...interface{}) error
|
||||
Fatale(err error) error
|
||||
SetLevel(level log.LogLevel)
|
||||
SetPrintStackTrace(printStackTraceFlag bool)
|
||||
CanStopStreaming func() bool
|
||||
}
|
||||
|
||||
type ContextConfig struct {
|
||||
@ -267,90 +189,55 @@ type ContextConfig struct {
|
||||
}
|
||||
}
|
||||
|
||||
func NewMigrationContext() *MigrationContext {
|
||||
var context *MigrationContext
|
||||
|
||||
func init() {
|
||||
context = newMigrationContext()
|
||||
}
|
||||
|
||||
func newMigrationContext() *MigrationContext {
|
||||
return &MigrationContext{
|
||||
Uuid: uuid.NewV4().String(),
|
||||
defaultNumRetries: 60,
|
||||
ChunkSize: 1000,
|
||||
InspectorConnectionConfig: mysql.NewConnectionConfig(),
|
||||
ApplierConnectionConfig: mysql.NewConnectionConfig(),
|
||||
MaxLagMillisecondsThrottleThreshold: 1500,
|
||||
CutOverLockTimeoutSeconds: 3,
|
||||
DMLBatchSize: 10,
|
||||
etaNanoseonds: ETAUnknown,
|
||||
maxLoad: NewLoadMap(),
|
||||
criticalLoad: NewLoadMap(),
|
||||
throttleMutex: &sync.Mutex{},
|
||||
throttleHTTPMutex: &sync.Mutex{},
|
||||
throttleControlReplicaKeys: mysql.NewInstanceKeyMap(),
|
||||
configMutex: &sync.Mutex{},
|
||||
pointOfInterestTimeMutex: &sync.Mutex{},
|
||||
lastHeartbeatOnChangelogMutex: &sync.Mutex{},
|
||||
ColumnRenameMap: make(map[string]string),
|
||||
PanicAbort: make(chan error),
|
||||
Log: NewDefaultLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetConnectionConfig(storageEngine string) error {
|
||||
var transactionIsolation string
|
||||
switch storageEngine {
|
||||
case "rocksdb":
|
||||
transactionIsolation = "READ-COMMITTED"
|
||||
default:
|
||||
transactionIsolation = "REPEATABLE-READ"
|
||||
}
|
||||
this.InspectorConnectionConfig.TransactionIsolation = transactionIsolation
|
||||
this.ApplierConnectionConfig.TransactionIsolation = transactionIsolation
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSafeTableName(baseName string, suffix string) string {
|
||||
name := fmt.Sprintf("_%s_%s", baseName, suffix)
|
||||
if len(name) <= mysql.MaxTableNameLength {
|
||||
return name
|
||||
}
|
||||
extraCharacters := len(name) - mysql.MaxTableNameLength
|
||||
return fmt.Sprintf("_%s_%s", baseName[0:len(baseName)-extraCharacters], suffix)
|
||||
// GetMigrationContext
|
||||
func GetMigrationContext() *MigrationContext {
|
||||
return context
|
||||
}
|
||||
|
||||
// GetGhostTableName generates the name of ghost table, based on original table name
|
||||
// or a given table name
|
||||
func (this *MigrationContext) GetGhostTableName() string {
|
||||
if this.ForceTmpTableName != "" {
|
||||
return getSafeTableName(this.ForceTmpTableName, "gho")
|
||||
} else {
|
||||
return getSafeTableName(this.OriginalTableName, "gho")
|
||||
}
|
||||
return fmt.Sprintf("_%s_gho", this.OriginalTableName)
|
||||
}
|
||||
|
||||
// GetOldTableName generates the name of the "old" table, into which the original table is renamed.
|
||||
func (this *MigrationContext) GetOldTableName() string {
|
||||
var tableName string
|
||||
if this.ForceTmpTableName != "" {
|
||||
tableName = this.ForceTmpTableName
|
||||
} else {
|
||||
tableName = this.OriginalTableName
|
||||
if this.TestOnReplica {
|
||||
return fmt.Sprintf("_%s_ght", this.OriginalTableName)
|
||||
}
|
||||
|
||||
if this.TimestampOldTable {
|
||||
t := this.StartTime
|
||||
timestamp := fmt.Sprintf("%d%02d%02d%02d%02d%02d",
|
||||
t.Year(), t.Month(), t.Day(),
|
||||
t.Hour(), t.Minute(), t.Second())
|
||||
return getSafeTableName(tableName, fmt.Sprintf("%s_del", timestamp))
|
||||
if this.MigrateOnReplica {
|
||||
return fmt.Sprintf("_%s_ghr", this.OriginalTableName)
|
||||
}
|
||||
return getSafeTableName(tableName, "del")
|
||||
return fmt.Sprintf("_%s_del", this.OriginalTableName)
|
||||
}
|
||||
|
||||
// GetChangelogTableName generates the name of changelog table, based on original table name
|
||||
// or a given table name.
|
||||
func (this *MigrationContext) GetChangelogTableName() string {
|
||||
if this.ForceTmpTableName != "" {
|
||||
return getSafeTableName(this.ForceTmpTableName, "ghc")
|
||||
} else {
|
||||
return getSafeTableName(this.OriginalTableName, "ghc")
|
||||
}
|
||||
return fmt.Sprintf("_%s_ghc", this.OriginalTableName)
|
||||
}
|
||||
|
||||
// GetVoluntaryLockName returns a name of a voluntary lock to be used throughout
|
||||
@ -410,14 +297,6 @@ func (this *MigrationContext) SetCutOverLockTimeoutSeconds(timeoutSeconds int64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetExponentialBackoffMaxInterval(intervalSeconds int64) error {
|
||||
if intervalSeconds < 2 {
|
||||
return fmt.Errorf("Minimal maximum interval is 2sec. Timeout remains at %d", this.ExponentialBackoffMaxInterval)
|
||||
}
|
||||
this.ExponentialBackoffMaxInterval = intervalSeconds
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetDefaultNumRetries(retries int64) {
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
@ -443,44 +322,10 @@ func (this *MigrationContext) IsTransactionalTable() bool {
|
||||
{
|
||||
return true
|
||||
}
|
||||
case "rocksdb":
|
||||
{
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SetCountTableRowsCancelFunc sets the cancel function for the CountTableRows query context
|
||||
func (this *MigrationContext) SetCountTableRowsCancelFunc(f func()) {
|
||||
this.countMutex.Lock()
|
||||
defer this.countMutex.Unlock()
|
||||
|
||||
this.countTableRowsCancelFunc = f
|
||||
}
|
||||
|
||||
// IsCountingTableRows returns true if the migration has a table count query running
|
||||
func (this *MigrationContext) IsCountingTableRows() bool {
|
||||
this.countMutex.Lock()
|
||||
defer this.countMutex.Unlock()
|
||||
|
||||
return this.countTableRowsCancelFunc != nil
|
||||
}
|
||||
|
||||
// CancelTableRowsCount cancels the CountTableRows query context. It is safe to
|
||||
// call function even when IsCountingTableRows is false.
|
||||
func (this *MigrationContext) CancelTableRowsCount() {
|
||||
this.countMutex.Lock()
|
||||
defer this.countMutex.Unlock()
|
||||
|
||||
if this.countTableRowsCancelFunc == nil {
|
||||
return
|
||||
}
|
||||
|
||||
this.countTableRowsCancelFunc()
|
||||
this.countTableRowsCancelFunc = nil
|
||||
}
|
||||
|
||||
// ElapsedTime returns time since very beginning of the process
|
||||
func (this *MigrationContext) ElapsedTime() time.Duration {
|
||||
return time.Since(this.StartTime)
|
||||
@ -516,40 +361,6 @@ func (this *MigrationContext) MarkRowCopyEndTime() {
|
||||
this.RowCopyEndTime = time.Now()
|
||||
}
|
||||
|
||||
func (this *MigrationContext) TimeSinceLastHeartbeatOnChangelog() time.Duration {
|
||||
return time.Since(this.GetLastHeartbeatOnChangelogTime())
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
|
||||
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetProgressPct() float64 {
|
||||
return math.Float64frombits(atomic.LoadUint64(&this.currentProgress))
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetProgressPct(progressPct float64) {
|
||||
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetETADuration() time.Duration {
|
||||
return time.Duration(atomic.LoadInt64(&this.etaNanoseonds))
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetETADuration(etaDuration time.Duration) {
|
||||
atomic.StoreInt64(&this.etaNanoseonds, etaDuration.Nanoseconds())
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetETASeconds() int64 {
|
||||
nano := atomic.LoadInt64(&this.etaNanoseonds)
|
||||
if nano < 0 {
|
||||
return ETAUnknown
|
||||
}
|
||||
return nano / int64(time.Second)
|
||||
}
|
||||
|
||||
// math.Float64bits([f=0..100])
|
||||
|
||||
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
|
||||
// This is not exactly the same as the rows being iterated via chunks, but potentially close enough
|
||||
func (this *MigrationContext) GetTotalRowsCopied() int64 {
|
||||
@ -575,20 +386,6 @@ func (this *MigrationContext) TimeSincePointOfInterest() time.Duration {
|
||||
return time.Since(this.pointOfInterestTime)
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetLastHeartbeatOnChangelogTime(t time.Time) {
|
||||
this.lastHeartbeatOnChangelogMutex.Lock()
|
||||
defer this.lastHeartbeatOnChangelogMutex.Unlock()
|
||||
|
||||
this.lastHeartbeatOnChangelogTime = t
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetLastHeartbeatOnChangelogTime() time.Time {
|
||||
this.lastHeartbeatOnChangelogMutex.Lock()
|
||||
defer this.lastHeartbeatOnChangelogMutex.Unlock()
|
||||
|
||||
return this.lastHeartbeatOnChangelogTime
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetHeartbeatIntervalMilliseconds(heartbeatIntervalMilliseconds int64) {
|
||||
if heartbeatIntervalMilliseconds < 100 {
|
||||
heartbeatIntervalMilliseconds = 100
|
||||
@ -607,8 +404,8 @@ func (this *MigrationContext) SetMaxLagMillisecondsThrottleThreshold(maxLagMilli
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetChunkSize(chunkSize int64) {
|
||||
if chunkSize < 10 {
|
||||
chunkSize = 10
|
||||
if chunkSize < 100 {
|
||||
chunkSize = 100
|
||||
}
|
||||
if chunkSize > 100000 {
|
||||
chunkSize = 100000
|
||||
@ -616,16 +413,6 @@ func (this *MigrationContext) SetChunkSize(chunkSize int64) {
|
||||
atomic.StoreInt64(&this.ChunkSize, chunkSize)
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetDMLBatchSize(batchSize int64) {
|
||||
if batchSize < 1 {
|
||||
batchSize = 1
|
||||
}
|
||||
if batchSize > MaxEventsBatchSize {
|
||||
batchSize = MaxEventsBatchSize
|
||||
}
|
||||
atomic.StoreInt64(&this.DMLBatchSize, batchSize)
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetThrottleGeneralCheckResult(checkResult *ThrottleCheckResult) *ThrottleCheckResult {
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
@ -651,24 +438,33 @@ func (this *MigrationContext) SetThrottled(throttle bool, reason string, reasonH
|
||||
func (this *MigrationContext) IsThrottled() (bool, string, ThrottleReasonHint) {
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
|
||||
// we don't throttle when cutting over. We _do_ throttle:
|
||||
// - during copy phase
|
||||
// - just before cut-over
|
||||
// - in between cut-over retries
|
||||
// When cutting over, we need to be aggressive. Cut-over holds table locks.
|
||||
// We need to release those asap.
|
||||
if atomic.LoadInt64(&this.InCutOverCriticalSectionFlag) > 0 {
|
||||
return false, "critical section", NoThrottleReasonHint
|
||||
}
|
||||
return this.isThrottled, this.throttleReason, this.throttleReasonHint
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetThrottleQuery() string {
|
||||
func (this *MigrationContext) GetReplicationLagQuery() string {
|
||||
var query string
|
||||
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
|
||||
var query = this.throttleQuery
|
||||
query = this.replicationLagQuery
|
||||
return query
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetReplicationLagQuery(newQuery string) {
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
|
||||
this.replicationLagQuery = newQuery
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetThrottleQuery() string {
|
||||
var query string
|
||||
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
|
||||
query = this.throttleQuery
|
||||
return query
|
||||
}
|
||||
|
||||
@ -679,28 +475,6 @@ func (this *MigrationContext) SetThrottleQuery(newQuery string) {
|
||||
this.throttleQuery = newQuery
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetThrottleHTTP() string {
|
||||
this.throttleHTTPMutex.Lock()
|
||||
defer this.throttleHTTPMutex.Unlock()
|
||||
|
||||
var throttleHTTP = this.throttleHTTP
|
||||
return throttleHTTP
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetThrottleHTTP(throttleHTTP string) {
|
||||
this.throttleHTTPMutex.Lock()
|
||||
defer this.throttleHTTPMutex.Unlock()
|
||||
|
||||
this.throttleHTTP = throttleHTTP
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetIgnoreHTTPErrors(ignoreHTTPErrors bool) {
|
||||
this.throttleHTTPMutex.Lock()
|
||||
defer this.throttleHTTPMutex.Unlock()
|
||||
|
||||
this.IgnoreHTTPErrors = ignoreHTTPErrors
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetMaxLoad() LoadMap {
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
@ -735,19 +509,6 @@ func (this *MigrationContext) SetNiceRatio(newRatio float64) {
|
||||
this.niceRatio = newRatio
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetRecentBinlogCoordinates() mysql.BinlogCoordinates {
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
|
||||
return this.recentBinlogCoordinates
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetRecentBinlogCoordinates(coordinates mysql.BinlogCoordinates) {
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
this.recentBinlogCoordinates = coordinates
|
||||
}
|
||||
|
||||
// ReadMaxLoad parses the `--max-load` flag, which is in multiple key-value format,
|
||||
// such as: 'Threads_running=100,Threads_connected=500'
|
||||
// It only applies changes in case there's no parsing error.
|
||||
@ -789,11 +550,7 @@ func (this *MigrationContext) GetControlReplicasLagResult() mysql.ReplicationLag
|
||||
func (this *MigrationContext) SetControlReplicasLagResult(lagResult *mysql.ReplicationLagResult) {
|
||||
this.throttleMutex.Lock()
|
||||
defer this.throttleMutex.Unlock()
|
||||
if lagResult == nil {
|
||||
this.controlReplicasLagResult = *mysql.NewNoReplicationLagResult()
|
||||
} else {
|
||||
this.controlReplicasLagResult = *lagResult
|
||||
}
|
||||
this.controlReplicasLagResult = *lagResult
|
||||
}
|
||||
|
||||
func (this *MigrationContext) GetThrottleControlReplicaKeys() *mysql.InstanceKeyMap {
|
||||
@ -847,13 +604,6 @@ func (this *MigrationContext) ApplyCredentials() {
|
||||
}
|
||||
}
|
||||
|
||||
func (this *MigrationContext) SetupTLS() error {
|
||||
if this.UseTLS {
|
||||
return this.InspectorConnectionConfig.UseTLS(this.TLSCACertificate, this.TLSCertificate, this.TLSKey, this.TLSAllowInsecure)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadConfigFile attempts to read the config file, if it exists
|
||||
func (this *MigrationContext) ReadConfigFile() error {
|
||||
this.configMutex.Lock()
|
||||
@ -862,41 +612,12 @@ func (this *MigrationContext) ReadConfigFile() error {
|
||||
if this.ConfigFile == "" {
|
||||
return nil
|
||||
}
|
||||
cfg, err := ini.Load(this.ConfigFile)
|
||||
if err != nil {
|
||||
gcfg.RelaxedParserMode = true
|
||||
gcfgscanner.RelaxedScannerMode = true
|
||||
if err := gcfg.ReadFileInto(&this.config, this.ConfigFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.Section("client").HasKey("user") {
|
||||
this.config.Client.User = cfg.Section("client").Key("user").String()
|
||||
}
|
||||
|
||||
if cfg.Section("client").HasKey("password") {
|
||||
this.config.Client.Password = cfg.Section("client").Key("password").String()
|
||||
}
|
||||
|
||||
if cfg.Section("osc").HasKey("chunk_size") {
|
||||
this.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read osc chunk size: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Section("osc").HasKey("max_load") {
|
||||
this.config.Osc.Max_Load = cfg.Section("osc").Key("max_load").String()
|
||||
}
|
||||
|
||||
if cfg.Section("osc").HasKey("replication_lag_query") {
|
||||
this.config.Osc.Replication_Lag_Query = cfg.Section("osc").Key("replication_lag_query").String()
|
||||
}
|
||||
|
||||
if cfg.Section("osc").HasKey("max_lag_millis") {
|
||||
this.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read max lag millis: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// We accept user & password in the form "${SOME_ENV_VARIABLE}" in which case we pull
|
||||
// the given variable from os env
|
||||
if submatch := envVariableRegexp.FindStringSubmatch(this.config.Client.User); len(submatch) > 1 {
|
||||
|
@ -1,122 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
test "github.com/openark/golib/tests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetLevel(log.ERROR)
|
||||
}
|
||||
|
||||
func TestGetTableNames(t *testing.T) {
|
||||
{
|
||||
context := NewMigrationContext()
|
||||
context.OriginalTableName = "some_table"
|
||||
test.S(t).ExpectEquals(context.GetOldTableName(), "_some_table_del")
|
||||
test.S(t).ExpectEquals(context.GetGhostTableName(), "_some_table_gho")
|
||||
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_some_table_ghc")
|
||||
}
|
||||
{
|
||||
context := NewMigrationContext()
|
||||
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890"
|
||||
test.S(t).ExpectEquals(context.GetOldTableName(), "_a1234567890123456789012345678901234567890123456789012345678_del")
|
||||
test.S(t).ExpectEquals(context.GetGhostTableName(), "_a1234567890123456789012345678901234567890123456789012345678_gho")
|
||||
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_a1234567890123456789012345678901234567890123456789012345678_ghc")
|
||||
}
|
||||
{
|
||||
context := NewMigrationContext()
|
||||
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890123"
|
||||
oldTableName := context.GetOldTableName()
|
||||
test.S(t).ExpectEquals(oldTableName, "_a1234567890123456789012345678901234567890123456789012345678_del")
|
||||
}
|
||||
{
|
||||
context := NewMigrationContext()
|
||||
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890123"
|
||||
context.TimestampOldTable = true
|
||||
longForm := "Jan 2, 2006 at 3:04pm (MST)"
|
||||
context.StartTime, _ = time.Parse(longForm, "Feb 3, 2013 at 7:54pm (PST)")
|
||||
oldTableName := context.GetOldTableName()
|
||||
test.S(t).ExpectEquals(oldTableName, "_a1234567890123456789012345678901234567890123_20130203195400_del")
|
||||
}
|
||||
{
|
||||
context := NewMigrationContext()
|
||||
context.OriginalTableName = "foo_bar_baz"
|
||||
context.ForceTmpTableName = "tmp"
|
||||
test.S(t).ExpectEquals(context.GetOldTableName(), "_tmp_del")
|
||||
test.S(t).ExpectEquals(context.GetGhostTableName(), "_tmp_gho")
|
||||
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_tmp_ghc")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadConfigFile(t *testing.T) {
|
||||
{
|
||||
context := NewMigrationContext()
|
||||
context.ConfigFile = "/does/not/exist"
|
||||
if err := context.ReadConfigFile(); err == nil {
|
||||
t.Fatal("Expected .ReadConfigFile() to return an error, got nil")
|
||||
}
|
||||
}
|
||||
{
|
||||
f, err := ioutil.TempFile("", t.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create tmp file: %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
f.Write([]byte("[client]"))
|
||||
context := NewMigrationContext()
|
||||
context.ConfigFile = f.Name()
|
||||
if err := context.ReadConfigFile(); err != nil {
|
||||
t.Fatalf(".ReadConfigFile() failed: %v", err)
|
||||
}
|
||||
}
|
||||
{
|
||||
f, err := ioutil.TempFile("", t.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create tmp file: %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
f.Write([]byte("[client]\nuser=test\npassword=123456"))
|
||||
context := NewMigrationContext()
|
||||
context.ConfigFile = f.Name()
|
||||
if err := context.ReadConfigFile(); err != nil {
|
||||
t.Fatalf(".ReadConfigFile() failed: %v", err)
|
||||
}
|
||||
|
||||
if context.config.Client.User != "test" {
|
||||
t.Fatalf("Expected client user %q, got %q", "test", context.config.Client.User)
|
||||
} else if context.config.Client.Password != "123456" {
|
||||
t.Fatalf("Expected client password %q, got %q", "123456", context.config.Client.Password)
|
||||
}
|
||||
}
|
||||
{
|
||||
f, err := ioutil.TempFile("", t.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create tmp file: %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
f.Write([]byte("[osc]\nmax_load=10"))
|
||||
context := NewMigrationContext()
|
||||
context.ConfigFile = f.Name()
|
||||
if err := context.ReadConfigFile(); err != nil {
|
||||
t.Fatalf(".ReadConfigFile() failed: %v", err)
|
||||
}
|
||||
|
||||
if context.config.Osc.Max_Load != "10" {
|
||||
t.Fatalf("Expected osc 'max_load' %q, got %q", "10", context.config.Osc.Max_Load)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"github.com/openark/golib/log"
|
||||
)
|
||||
|
||||
type simpleLogger struct{}
|
||||
|
||||
func NewDefaultLogger() *simpleLogger {
|
||||
return &simpleLogger{}
|
||||
}
|
||||
|
||||
func (*simpleLogger) Debug(args ...interface{}) {
|
||||
log.Debug(args[0].(string), args[1:])
|
||||
}
|
||||
|
||||
func (*simpleLogger) Debugf(format string, args ...interface{}) {
|
||||
log.Debugf(format, args...)
|
||||
}
|
||||
|
||||
func (*simpleLogger) Info(args ...interface{}) {
|
||||
log.Info(args[0].(string), args[1:])
|
||||
}
|
||||
|
||||
func (*simpleLogger) Infof(format string, args ...interface{}) {
|
||||
log.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (*simpleLogger) Warning(args ...interface{}) error {
|
||||
return log.Warning(args[0].(string), args[1:])
|
||||
}
|
||||
|
||||
func (*simpleLogger) Warningf(format string, args ...interface{}) error {
|
||||
return log.Warningf(format, args...)
|
||||
}
|
||||
|
||||
func (*simpleLogger) Error(args ...interface{}) error {
|
||||
return log.Error(args[0].(string), args[1:])
|
||||
}
|
||||
|
||||
func (*simpleLogger) Errorf(format string, args ...interface{}) error {
|
||||
return log.Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (*simpleLogger) Errore(err error) error {
|
||||
return log.Errore(err)
|
||||
}
|
||||
|
||||
func (*simpleLogger) Fatal(args ...interface{}) error {
|
||||
return log.Fatal(args[0].(string), args[1:])
|
||||
}
|
||||
|
||||
func (*simpleLogger) Fatalf(format string, args ...interface{}) error {
|
||||
return log.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (*simpleLogger) Fatale(err error) error {
|
||||
return log.Fatale(err)
|
||||
}
|
||||
|
||||
func (*simpleLogger) SetLevel(level log.LogLevel) {
|
||||
log.SetLevel(level)
|
||||
}
|
||||
|
||||
func (*simpleLogger) SetPrintStackTrace(printStackTraceFlag bool) {
|
||||
log.SetPrintStackTrace(printStackTraceFlag)
|
||||
}
|
@ -8,8 +8,8 @@ package base
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
test "github.com/openark/golib/tests"
|
||||
"github.com/outbrain/golib/log"
|
||||
test "github.com/outbrain/golib/tests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -11,10 +11,6 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
gosql "database/sql"
|
||||
|
||||
"github.com/github/gh-ost/go/mysql"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -25,7 +21,9 @@ func PrettifyDurationOutput(d time.Duration) string {
|
||||
if d < time.Second {
|
||||
return "0s"
|
||||
}
|
||||
return prettifyDurationRegexp.ReplaceAllString(d.String(), "")
|
||||
result := fmt.Sprintf("%s", d)
|
||||
result = prettifyDurationRegexp.ReplaceAllString(result, "")
|
||||
return result
|
||||
}
|
||||
|
||||
func FileExists(fileName string) bool {
|
||||
@ -35,14 +33,6 @@ func FileExists(fileName string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func TouchFile(fileName string) error {
|
||||
f, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// StringContainsAll returns true if `s` contains all non empty given `substrings`
|
||||
// The function returns `false` if no non-empty arguments are given.
|
||||
func StringContainsAll(s string, substrings ...string) bool {
|
||||
@ -60,36 +50,3 @@ func StringContainsAll(s string, substrings ...string) bool {
|
||||
}
|
||||
return nonEmptyStringsFound
|
||||
}
|
||||
|
||||
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext, name string) (string, error) {
|
||||
versionQuery := `select @@global.version`
|
||||
var port, extraPort int
|
||||
var version string
|
||||
if err := db.QueryRow(versionQuery).Scan(&version); err != nil {
|
||||
return "", err
|
||||
}
|
||||
extraPortQuery := `select @@global.extra_port`
|
||||
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil { // nolint:staticcheck
|
||||
// swallow this error. not all servers support extra_port
|
||||
}
|
||||
// AliyunRDS set users port to "NULL", replace it by gh-ost param
|
||||
// GCP set users port to "NULL", replace it by gh-ost param
|
||||
// Azure MySQL set users port to a different value by design, replace it by gh-ost para
|
||||
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform || migrationContext.AzureMySQL {
|
||||
port = connectionConfig.Key.Port
|
||||
} else {
|
||||
portQuery := `select @@global.port`
|
||||
if err := db.QueryRow(portQuery).Scan(&port); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
|
||||
migrationContext.Log.Infof("%s connection validated on %+v", name, connectionConfig.Key)
|
||||
return version, nil
|
||||
} else if extraPort == 0 {
|
||||
return "", fmt.Errorf("Unexpected database port reported: %+v", port)
|
||||
} else {
|
||||
return "", fmt.Errorf("Unexpected database port reported: %+v / extra_port: %+v", port, extraPort)
|
||||
}
|
||||
}
|
||||
|
@ -8,8 +8,8 @@ package base
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
test "github.com/openark/golib/tests"
|
||||
"github.com/outbrain/golib/log"
|
||||
test "github.com/outbrain/golib/tests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -7,18 +7,17 @@ package binlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type EventDML string
|
||||
|
||||
const (
|
||||
NotDML EventDML = "NoDML"
|
||||
InsertDML EventDML = "Insert"
|
||||
UpdateDML EventDML = "Update"
|
||||
DeleteDML EventDML = "Delete"
|
||||
InsertDML = "Insert"
|
||||
UpdateDML = "Update"
|
||||
DeleteDML = "Delete"
|
||||
)
|
||||
|
||||
func ToEventDML(description string) EventDML {
|
||||
|
@ -26,7 +26,7 @@ func NewBinlogEntry(logFile string, logPos uint64) *BinlogEntry {
|
||||
return binlogEntry
|
||||
}
|
||||
|
||||
// NewBinlogEntryAt creates an empty, ready to go BinlogEntry object
|
||||
// NewBinlogEntry creates an empty, ready to go BinlogEntry object
|
||||
func NewBinlogEntryAt(coordinates mysql.BinlogCoordinates) *BinlogEntry {
|
||||
binlogEntry := &BinlogEntry{
|
||||
Coordinates: coordinates,
|
||||
@ -41,7 +41,7 @@ func (this *BinlogEntry) Duplicate() *BinlogEntry {
|
||||
return binlogEntry
|
||||
}
|
||||
|
||||
// String() returns a string representation of this binlog entry
|
||||
// Duplicate creates and returns a new binlog entry, with some of the attributes pre-assigned
|
||||
func (this *BinlogEntry) String() string {
|
||||
return fmt.Sprintf("[BinlogEntry at %+v; dml:%+v]", this.Coordinates, this.DmlEvent)
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -9,17 +9,19 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
"github.com/github/gh-ost/go/mysql"
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
|
||||
gomysql "github.com/go-mysql-org/go-mysql/mysql"
|
||||
"github.com/go-mysql-org/go-mysql/replication"
|
||||
"golang.org/x/net/context"
|
||||
"github.com/outbrain/golib/log"
|
||||
gomysql "github.com/siddontang/go-mysql/mysql"
|
||||
"github.com/siddontang/go-mysql/replication"
|
||||
)
|
||||
|
||||
const (
|
||||
serverId = 99999
|
||||
)
|
||||
|
||||
type GoMySQLReader struct {
|
||||
migrationContext *base.MigrationContext
|
||||
connectionConfig *mysql.ConnectionConfig
|
||||
binlogSyncer *replication.BinlogSyncer
|
||||
binlogStreamer *replication.BinlogStreamer
|
||||
@ -28,40 +30,33 @@ type GoMySQLReader struct {
|
||||
LastAppliedRowsEventHint mysql.BinlogCoordinates
|
||||
}
|
||||
|
||||
func NewGoMySQLReader(migrationContext *base.MigrationContext) *GoMySQLReader {
|
||||
connectionConfig := migrationContext.InspectorConnectionConfig
|
||||
return &GoMySQLReader{
|
||||
migrationContext: migrationContext,
|
||||
func NewGoMySQLReader(connectionConfig *mysql.ConnectionConfig) (binlogReader *GoMySQLReader, err error) {
|
||||
binlogReader = &GoMySQLReader{
|
||||
connectionConfig: connectionConfig,
|
||||
currentCoordinates: mysql.BinlogCoordinates{},
|
||||
currentCoordinatesMutex: &sync.Mutex{},
|
||||
binlogSyncer: replication.NewBinlogSyncer(replication.BinlogSyncerConfig{
|
||||
ServerID: uint32(migrationContext.ReplicaServerId),
|
||||
Flavor: gomysql.MySQLFlavor,
|
||||
Host: connectionConfig.Key.Hostname,
|
||||
Port: uint16(connectionConfig.Key.Port),
|
||||
User: connectionConfig.User,
|
||||
Password: connectionConfig.Password,
|
||||
TLSConfig: connectionConfig.TLSConfig(),
|
||||
UseDecimal: true,
|
||||
MaxReconnectAttempts: migrationContext.BinlogSyncerMaxReconnectAttempts,
|
||||
}),
|
||||
binlogSyncer: nil,
|
||||
binlogStreamer: nil,
|
||||
}
|
||||
binlogReader.binlogSyncer = replication.NewBinlogSyncer(serverId, "mysql")
|
||||
|
||||
return binlogReader, err
|
||||
}
|
||||
|
||||
// ConnectBinlogStreamer
|
||||
func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) {
|
||||
if coordinates.IsEmpty() {
|
||||
return this.migrationContext.Log.Errorf("Empty coordinates at ConnectBinlogStreamer()")
|
||||
return log.Errorf("Emptry coordinates at ConnectBinlogStreamer()")
|
||||
}
|
||||
log.Infof("Registering replica at %+v:%+v", this.connectionConfig.Key.Hostname, uint16(this.connectionConfig.Key.Port))
|
||||
if err := this.binlogSyncer.RegisterSlave(this.connectionConfig.Key.Hostname, uint16(this.connectionConfig.Key.Port), this.connectionConfig.User, this.connectionConfig.Password); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
this.currentCoordinates = coordinates
|
||||
this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
|
||||
// Start sync with specified binlog file and position
|
||||
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{
|
||||
Name: this.currentCoordinates.LogFile,
|
||||
Pos: uint32(this.currentCoordinates.LogPos),
|
||||
})
|
||||
log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
|
||||
// Start sync with sepcified binlog file and position
|
||||
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
|
||||
|
||||
return err
|
||||
}
|
||||
@ -76,7 +71,7 @@ func (this *GoMySQLReader) GetCurrentBinlogCoordinates() *mysql.BinlogCoordinate
|
||||
// StreamEvents
|
||||
func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *replication.RowsEvent, entriesChannel chan<- *BinlogEntry) error {
|
||||
if this.currentCoordinates.SmallerThanOrEquals(&this.LastAppliedRowsEventHint) {
|
||||
this.migrationContext.Log.Debugf("Skipping handled query at %+v", this.currentCoordinates)
|
||||
log.Debugf("Skipping handled query at %+v", this.currentCoordinates)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -111,8 +106,8 @@ func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEven
|
||||
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
|
||||
}
|
||||
}
|
||||
// The channel will do the throttling. Whoever is reading from the channel
|
||||
// decides whether action is taken synchronously (meaning we wait before
|
||||
// The channel will do the throttling. Whoever is reding from the channel
|
||||
// decides whether action is taken sycnhronously (meaning we wait before
|
||||
// next iteration) or asynchronously (we keep pushing more events)
|
||||
// In reality, reads will be synchronous
|
||||
entriesChannel <- binlogEntry
|
||||
@ -130,7 +125,7 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
|
||||
if canStopStreaming() {
|
||||
break
|
||||
}
|
||||
ev, err := this.binlogStreamer.GetEvent(context.Background())
|
||||
ev, err := this.binlogStreamer.GetEvent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -139,22 +134,20 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
|
||||
defer this.currentCoordinatesMutex.Unlock()
|
||||
this.currentCoordinates.LogPos = int64(ev.Header.LogPos)
|
||||
}()
|
||||
|
||||
switch binlogEvent := ev.Event.(type) {
|
||||
case *replication.RotateEvent:
|
||||
if rotateEvent, ok := ev.Event.(*replication.RotateEvent); ok {
|
||||
func() {
|
||||
this.currentCoordinatesMutex.Lock()
|
||||
defer this.currentCoordinatesMutex.Unlock()
|
||||
this.currentCoordinates.LogFile = string(binlogEvent.NextLogName)
|
||||
this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
|
||||
}()
|
||||
this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), binlogEvent.NextLogName)
|
||||
case *replication.RowsEvent:
|
||||
if err := this.handleRowsEvent(ev, binlogEvent, entriesChannel); err != nil {
|
||||
log.Infof("rotate to next log name: %s", rotateEvent.NextLogName)
|
||||
} else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
|
||||
if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
this.migrationContext.Log.Debugf("done streaming events")
|
||||
log.Debugf("done streaming events")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -8,18 +8,13 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
"github.com/github/gh-ost/go/logic"
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/openark/golib/log"
|
||||
|
||||
"golang.org/x/term"
|
||||
"github.com/outbrain/golib/log"
|
||||
)
|
||||
|
||||
var AppVersion string
|
||||
@ -33,7 +28,7 @@ func acceptSignals(migrationContext *base.MigrationContext) {
|
||||
for sig := range c {
|
||||
switch sig {
|
||||
case syscall.SIGHUP:
|
||||
migrationContext.Log.Infof("Received SIGHUP. Reloading configuration")
|
||||
log.Infof("Received SIGHUP. Reloading configuration")
|
||||
if err := migrationContext.ReadConfigFile(); err != nil {
|
||||
log.Errore(err)
|
||||
} else {
|
||||
@ -46,45 +41,27 @@ func acceptSignals(migrationContext *base.MigrationContext) {
|
||||
|
||||
// main is the application's entry point. It will either spawn a CLI or HTTP interfaces.
|
||||
func main() {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrationContext := base.GetMigrationContext()
|
||||
|
||||
flag.StringVar(&migrationContext.InspectorConnectionConfig.Key.Hostname, "host", "127.0.0.1", "MySQL hostname (preferably a replica, not the master)")
|
||||
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unable to determine the master")
|
||||
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unabel to determine the master")
|
||||
flag.IntVar(&migrationContext.InspectorConnectionConfig.Key.Port, "port", 3306, "MySQL port (preferably a replica, not the master)")
|
||||
flag.Float64Var(&migrationContext.InspectorConnectionConfig.Timeout, "mysql-timeout", 0.0, "Connect, read and write timeout for MySQL")
|
||||
flag.StringVar(&migrationContext.CliUser, "user", "", "MySQL user")
|
||||
flag.StringVar(&migrationContext.CliPassword, "password", "", "MySQL password")
|
||||
flag.StringVar(&migrationContext.CliMasterUser, "master-user", "", "MySQL user on master, if different from that on replica. Requires --assume-master-host")
|
||||
flag.StringVar(&migrationContext.CliMasterPassword, "master-password", "", "MySQL password on master, if different from that on replica. Requires --assume-master-host")
|
||||
flag.StringVar(&migrationContext.ConfigFile, "conf", "", "Config file")
|
||||
askPass := flag.Bool("ask-pass", false, "prompt for MySQL password")
|
||||
|
||||
flag.BoolVar(&migrationContext.UseTLS, "ssl", false, "Enable SSL encrypted connections to MySQL hosts")
|
||||
flag.StringVar(&migrationContext.TLSCACertificate, "ssl-ca", "", "CA certificate in PEM format for TLS connections to MySQL hosts. Requires --ssl")
|
||||
flag.StringVar(&migrationContext.TLSCertificate, "ssl-cert", "", "Certificate in PEM format for TLS connections to MySQL hosts. Requires --ssl")
|
||||
flag.StringVar(&migrationContext.TLSKey, "ssl-key", "", "Key in PEM format for TLS connections to MySQL hosts. Requires --ssl")
|
||||
flag.BoolVar(&migrationContext.TLSAllowInsecure, "ssl-allow-insecure", false, "Skips verification of MySQL hosts' certificate chain and host name. Requires --ssl")
|
||||
|
||||
flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)")
|
||||
flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)")
|
||||
flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)")
|
||||
flag.BoolVar(&migrationContext.AttemptInstantDDL, "attempt-instant-ddl", false, "Attempt to use instant DDL for this migration first")
|
||||
storageEngine := flag.String("storage-engine", "innodb", "Specify table storage engine (default: 'innodb'). When 'rocksdb': the session transaction isolation level is changed from REPEATABLE_READ to READ_COMMITTED.")
|
||||
|
||||
flag.BoolVar(&migrationContext.CountTableRows, "exact-rowcount", false, "actually count table rows as opposed to estimate them (results in more accurate progress estimation)")
|
||||
flag.BoolVar(&migrationContext.ConcurrentCountTableRows, "concurrent-rowcount", true, "(with --exact-rowcount), when true (default): count rows after row-copy begins, concurrently, and adjust row estimate later on; when false: first count rows, then start row copy")
|
||||
flag.BoolVar(&migrationContext.AllowedRunningOnMaster, "allow-on-master", false, "allow this migration to run directly on master. Preferably it would run on a replica")
|
||||
flag.BoolVar(&migrationContext.AllowedMasterMaster, "allow-master-master", false, "explicitly allow running in a master-master setup")
|
||||
flag.BoolVar(&migrationContext.NullableUniqueKeyAllowed, "allow-nullable-unique-key", false, "allow gh-ost to migrate based on a unique key with nullable columns. As long as no NULL values exist, this should be OK. If NULL values exist in chosen key, data may be corrupted. Use at your own risk!")
|
||||
flag.BoolVar(&migrationContext.ApproveRenamedColumns, "approve-renamed-columns", false, "in case your `ALTER` statement renames columns, gh-ost will note that and offer its interpretation of the rename. By default gh-ost does not proceed to execute. This flag approves that gh-ost's interpretation is correct")
|
||||
flag.BoolVar(&migrationContext.ApproveRenamedColumns, "approve-renamed-columns", false, "in case your `ALTER` statement renames columns, gh-ost will note that and offer its interpretation of the rename. By default gh-ost does not proceed to execute. This flag approves that gh-ost's interpretation si correct")
|
||||
flag.BoolVar(&migrationContext.SkipRenamedColumns, "skip-renamed-columns", false, "in case your `ALTER` statement renames columns, gh-ost will note that and offer its interpretation of the rename. By default gh-ost does not proceed to execute. This flag tells gh-ost to skip the renamed columns, i.e. to treat what gh-ost thinks are renamed columns as unrelated columns. NOTE: you may lose column data")
|
||||
flag.BoolVar(&migrationContext.IsTungsten, "tungsten", false, "explicitly let gh-ost know that you are running on a tungsten-replication based topology (you are likely to also provide --assume-master-host)")
|
||||
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
|
||||
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
|
||||
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
|
||||
flag.BoolVar(&migrationContext.AllowZeroInDate, "allow-zero-in-date", false, "explicitly tell gh-ost binlog applier to ignore NO_ZERO_IN_DATE,NO_ZERO_DATE in sql_mode")
|
||||
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
|
||||
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
|
||||
flag.BoolVar(&migrationContext.AzureMySQL, "azure", false, "set to 'true' when you execute on Azure Database on MySQL.")
|
||||
|
||||
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
|
||||
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
|
||||
@ -94,30 +71,21 @@ func main() {
|
||||
flag.BoolVar(&migrationContext.OkToDropTable, "ok-to-drop-table", false, "Shall the tool drop the old table at end of operation. DROPping tables can be a long locking operation, which is why I'm not doing it by default. I'm an online tool, yes?")
|
||||
flag.BoolVar(&migrationContext.InitiallyDropOldTable, "initially-drop-old-table", false, "Drop a possibly existing OLD table (remains from a previous run?) before beginning operation. Default is to panic and abort if such table exists")
|
||||
flag.BoolVar(&migrationContext.InitiallyDropGhostTable, "initially-drop-ghost-table", false, "Drop a possibly existing Ghost table (remains from a previous run?) before beginning operation. Default is to panic and abort if such table exists")
|
||||
flag.BoolVar(&migrationContext.TimestampOldTable, "timestamp-old-table", false, "Use a timestamp in old table name. This makes old table names unique and non conflicting cross migrations")
|
||||
cutOver := flag.String("cut-over", "atomic", "choose cut-over type (default|atomic, two-step)")
|
||||
flag.BoolVar(&migrationContext.ForceNamedCutOverCommand, "force-named-cut-over", false, "When true, the 'unpostpone|cut-over' interactive command must name the migrated table")
|
||||
flag.BoolVar(&migrationContext.ForceNamedPanicCommand, "force-named-panic", false, "When true, the 'panic' interactive command must name the migrated table")
|
||||
|
||||
flag.BoolVar(&migrationContext.SwitchToRowBinlogFormat, "switch-to-rbr", false, "let this tool automatically switch binary log format to 'ROW' on the replica, if needed. The format will NOT be switched back. I'm too scared to do that, and wish to protect you if you happen to execute another migration while this one is running")
|
||||
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
|
||||
flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').")
|
||||
exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.")
|
||||
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 10-100,000)")
|
||||
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
|
||||
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
|
||||
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
|
||||
cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)")
|
||||
niceRatio := flag.Float64("nice-ratio", 0, "force being 'nice', imply sleep time per chunk time; range: [0.0..100.0]. Example values: 0 is aggressive. 1: for every 1ms spent copying rows, sleep additional 1ms (effectively doubling runtime); 0.7: for every 10ms spend in a rowcopy chunk, spend 7ms sleeping immediately after")
|
||||
|
||||
maxLagMillis := flag.Int64("max-lag-millis", 1500, "replication lag at which to throttle operation")
|
||||
replicationLagQuery := flag.String("replication-lag-query", "", "Deprecated. gh-ost uses an internal, subsecond resolution query")
|
||||
replicationLagQuery := flag.String("replication-lag-query", "", "Query that detects replication lag in seconds. Result can be a floating point (by default gh-ost issues SHOW SLAVE STATUS and reads Seconds_behind_master). If you're using pt-heartbeat, query would be something like: SELECT ROUND(UNIX_TIMESTAMP() - MAX(UNIX_TIMESTAMP(ts))) AS delay FROM my_schema.heartbeat")
|
||||
throttleControlReplicas := flag.String("throttle-control-replicas", "", "List of replicas on which to check for lag; comma delimited. Example: myhost1.com:3306,myhost2.com,myhost3.com:3307")
|
||||
throttleQuery := flag.String("throttle-query", "", "when given, issued (every second) to check if operation should throttle. Expecting to return zero for no-throttle, >0 for throttle. Query is issued on the migrated server. Make sure this query is lightweight")
|
||||
throttleHTTP := flag.String("throttle-http", "", "when given, gh-ost checks given URL via HEAD request; any response code other than 200 (OK) causes throttling; make sure it has low latency response")
|
||||
flag.Int64Var(&migrationContext.ThrottleHTTPIntervalMillis, "throttle-http-interval-millis", 100, "Number of milliseconds to wait before triggering another HTTP throttle check")
|
||||
flag.Int64Var(&migrationContext.ThrottleHTTPTimeoutMillis, "throttle-http-timeout-millis", 1000, "Number of milliseconds to use as an HTTP throttle check timeout")
|
||||
ignoreHTTPErrors := flag.Bool("ignore-http-errors", false, "ignore HTTP connection errors during throttle check")
|
||||
heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 100, "how frequently would gh-ost inject a heartbeat value")
|
||||
heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 500, "how frequently would gh-ost inject a heartbeat value")
|
||||
flag.StringVar(&migrationContext.ThrottleFlagFile, "throttle-flag-file", "", "operation pauses when this file exists; hint: use a file that is specific to the table being altered")
|
||||
flag.StringVar(&migrationContext.ThrottleAdditionalFlagFile, "throttle-additional-flag-file", "/tmp/gh-ost.throttle", "operation pauses when this file exists; hint: keep default, use for throttling multiple gh-ost operations")
|
||||
flag.StringVar(&migrationContext.PostponeCutOverFlagFile, "postpone-cut-over-flag-file", "", "while this file exists, migration will postpone the final stage of swapping tables, and will keep on syncing the ghost table. Cut-over/swapping would be ready to perform the moment the file is deleted.")
|
||||
@ -129,34 +97,20 @@ func main() {
|
||||
|
||||
flag.StringVar(&migrationContext.HooksPath, "hooks-path", "", "directory where hook files are found (default: empty, ie. hooks disabled). Hook files found on this path, and conforming to hook naming conventions will be executed")
|
||||
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
|
||||
flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience")
|
||||
flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience")
|
||||
flag.Int64Var(&migrationContext.HooksStatusIntervalSec, "hooks-status-interval", 60, "how many seconds to wait between calling onStatus hook")
|
||||
|
||||
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
|
||||
flag.IntVar(&migrationContext.BinlogSyncerMaxReconnectAttempts, "binlogsyncer-max-reconnect-attempts", 0, "when master node fails, the maximum number of binlog synchronization attempts to reconnect. 0 is unlimited")
|
||||
|
||||
maxLoad := flag.String("max-load", "", "Comma delimited status-name=threshold. e.g: 'Threads_running=100,Threads_connected=500'. When status exceeds threshold, app throttles writes")
|
||||
criticalLoad := flag.String("critical-load", "", "Comma delimited status-name=threshold, same format as --max-load. When status exceeds threshold, app panics and quits")
|
||||
flag.Int64Var(&migrationContext.CriticalLoadIntervalMilliseconds, "critical-load-interval-millis", 0, "When 0, migration immediately bails out upon meeting critical-load. When non-zero, a second check is done after given interval, and migration only bails out if 2nd check still meets critical load")
|
||||
flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When non-zero, critical-load does not panic and bail out; instead, gh-ost goes into hibernation for the specified duration. It will not read/write anything from/to any server")
|
||||
quiet := flag.Bool("quiet", false, "quiet")
|
||||
verbose := flag.Bool("verbose", false, "verbose")
|
||||
debug := flag.Bool("debug", false, "debug mode (very verbose)")
|
||||
stack := flag.Bool("stack", false, "add stack trace upon error")
|
||||
help := flag.Bool("help", false, "Display usage")
|
||||
version := flag.Bool("version", false, "Print version & exit")
|
||||
checkFlag := flag.Bool("check-flag", false, "Check if another flag exists/supported. This allows for cross-version scripting. Exits with 0 when all additional provided flags exist, nonzero otherwise. You must provide (dummy) values for flags that require a value. Example: gh-ost --check-flag --cut-over-lock-timeout-seconds --nice-ratio 0")
|
||||
flag.StringVar(&migrationContext.ForceTmpTableName, "force-table-names", "", "table name prefix to be used on the temporary tables")
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if *checkFlag {
|
||||
return
|
||||
}
|
||||
if *help {
|
||||
fmt.Fprintf(os.Stdout, "Usage of gh-ost:\n")
|
||||
fmt.Fprintf(os.Stderr, "Usage of gh-ost:\n")
|
||||
flag.PrintDefaults()
|
||||
return
|
||||
}
|
||||
@ -169,92 +123,51 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
migrationContext.Log.SetLevel(log.ERROR)
|
||||
log.SetLevel(log.ERROR)
|
||||
if *verbose {
|
||||
migrationContext.Log.SetLevel(log.INFO)
|
||||
log.SetLevel(log.INFO)
|
||||
}
|
||||
if *debug {
|
||||
migrationContext.Log.SetLevel(log.DEBUG)
|
||||
log.SetLevel(log.DEBUG)
|
||||
}
|
||||
if *stack {
|
||||
migrationContext.Log.SetPrintStackTrace(*stack)
|
||||
log.SetPrintStackTrace(*stack)
|
||||
}
|
||||
if *quiet {
|
||||
// Override!!
|
||||
migrationContext.Log.SetLevel(log.ERROR)
|
||||
log.SetLevel(log.ERROR)
|
||||
}
|
||||
|
||||
if err := migrationContext.SetConnectionConfig(*storageEngine); err != nil {
|
||||
migrationContext.Log.Fatale(err)
|
||||
}
|
||||
|
||||
if migrationContext.AlterStatement == "" {
|
||||
log.Fatal("--alter must be provided and statement must not be empty")
|
||||
}
|
||||
parser := sql.NewParserFromAlterStatement(migrationContext.AlterStatement)
|
||||
migrationContext.AlterStatementOptions = parser.GetAlterStatementOptions()
|
||||
|
||||
if migrationContext.DatabaseName == "" {
|
||||
if parser.HasExplicitSchema() {
|
||||
migrationContext.DatabaseName = parser.GetExplicitSchema()
|
||||
} else {
|
||||
log.Fatal("--database must be provided and database name must not be empty, or --alter must specify database name")
|
||||
}
|
||||
log.Fatalf("--database must be provided and database name must not be empty")
|
||||
}
|
||||
|
||||
if err := flag.Set("database", url.QueryEscape(migrationContext.DatabaseName)); err != nil {
|
||||
migrationContext.Log.Fatale(err)
|
||||
}
|
||||
|
||||
if migrationContext.OriginalTableName == "" {
|
||||
if parser.HasExplicitTable() {
|
||||
migrationContext.OriginalTableName = parser.GetExplicitTable()
|
||||
} else {
|
||||
log.Fatal("--table must be provided and table name must not be empty, or --alter must specify table name")
|
||||
}
|
||||
log.Fatalf("--table must be provided and table name must not be empty")
|
||||
}
|
||||
if migrationContext.AlterStatement == "" {
|
||||
log.Fatalf("--alter must be provided and statement must not be empty")
|
||||
}
|
||||
migrationContext.Noop = !(*executeFlag)
|
||||
if migrationContext.AllowedRunningOnMaster && migrationContext.TestOnReplica {
|
||||
migrationContext.Log.Fatal("--allow-on-master and --test-on-replica are mutually exclusive")
|
||||
log.Fatalf("--allow-on-master and --test-on-replica are mutually exclusive")
|
||||
}
|
||||
if migrationContext.AllowedRunningOnMaster && migrationContext.MigrateOnReplica {
|
||||
migrationContext.Log.Fatal("--allow-on-master and --migrate-on-replica are mutually exclusive")
|
||||
log.Fatalf("--allow-on-master and --migrate-on-replica are mutually exclusive")
|
||||
}
|
||||
if migrationContext.MigrateOnReplica && migrationContext.TestOnReplica {
|
||||
migrationContext.Log.Fatal("--migrate-on-replica and --test-on-replica are mutually exclusive")
|
||||
log.Fatalf("--migrate-on-replica and --test-on-replica are mutually exclusive")
|
||||
}
|
||||
if migrationContext.SwitchToRowBinlogFormat && migrationContext.AssumeRBR {
|
||||
migrationContext.Log.Fatal("--switch-to-rbr and --assume-rbr are mutually exclusive")
|
||||
log.Fatalf("--switch-to-rbr and --assume-rbr are mutually exclusive")
|
||||
}
|
||||
if migrationContext.TestOnReplicaSkipReplicaStop {
|
||||
if !migrationContext.TestOnReplica {
|
||||
migrationContext.Log.Fatal("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
|
||||
log.Fatalf("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
|
||||
}
|
||||
migrationContext.Log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
|
||||
log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
|
||||
}
|
||||
if migrationContext.CliMasterUser != "" && migrationContext.AssumeMasterHostname == "" {
|
||||
migrationContext.Log.Fatal("--master-user requires --assume-master-host")
|
||||
}
|
||||
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
|
||||
migrationContext.Log.Fatal("--master-password requires --assume-master-host")
|
||||
}
|
||||
if migrationContext.TLSCACertificate != "" && !migrationContext.UseTLS {
|
||||
migrationContext.Log.Fatal("--ssl-ca requires --ssl")
|
||||
}
|
||||
if migrationContext.TLSCertificate != "" && !migrationContext.UseTLS {
|
||||
migrationContext.Log.Fatal("--ssl-cert requires --ssl")
|
||||
}
|
||||
if migrationContext.TLSKey != "" && !migrationContext.UseTLS {
|
||||
migrationContext.Log.Fatal("--ssl-key requires --ssl")
|
||||
}
|
||||
if migrationContext.TLSAllowInsecure && !migrationContext.UseTLS {
|
||||
migrationContext.Log.Fatal("--ssl-allow-insecure requires --ssl")
|
||||
}
|
||||
if *replicationLagQuery != "" {
|
||||
migrationContext.Log.Warning("--replication-lag-query is deprecated")
|
||||
}
|
||||
if *storageEngine == "rocksdb" {
|
||||
migrationContext.Log.Warning("RocksDB storage engine support is experimental")
|
||||
if migrationContext.AssumeMasterHostname != "" && !migrationContext.AllowedMasterMaster && !migrationContext.IsTungsten {
|
||||
log.Fatalf("--assume-master-host requires either --allow-master-master or --tungsten")
|
||||
}
|
||||
|
||||
switch *cutOver {
|
||||
@ -263,58 +176,43 @@ func main() {
|
||||
case "two-step":
|
||||
migrationContext.CutOverType = base.CutOverTwoStep
|
||||
default:
|
||||
migrationContext.Log.Fatalf("Unknown cut-over: %s", *cutOver)
|
||||
log.Fatalf("Unknown cut-over: %s", *cutOver)
|
||||
}
|
||||
if err := migrationContext.ReadConfigFile(); err != nil {
|
||||
migrationContext.Log.Fatale(err)
|
||||
log.Fatale(err)
|
||||
}
|
||||
if err := migrationContext.ReadThrottleControlReplicaKeys(*throttleControlReplicas); err != nil {
|
||||
migrationContext.Log.Fatale(err)
|
||||
log.Fatale(err)
|
||||
}
|
||||
if err := migrationContext.ReadMaxLoad(*maxLoad); err != nil {
|
||||
migrationContext.Log.Fatale(err)
|
||||
log.Fatale(err)
|
||||
}
|
||||
if err := migrationContext.ReadCriticalLoad(*criticalLoad); err != nil {
|
||||
migrationContext.Log.Fatale(err)
|
||||
log.Fatale(err)
|
||||
}
|
||||
if migrationContext.ServeSocketFile == "" {
|
||||
migrationContext.ServeSocketFile = fmt.Sprintf("/tmp/gh-ost.%s.%s.sock", migrationContext.DatabaseName, migrationContext.OriginalTableName)
|
||||
}
|
||||
if *askPass {
|
||||
fmt.Println("Password:")
|
||||
bytePassword, err := term.ReadPassword(syscall.Stdin)
|
||||
if err != nil {
|
||||
migrationContext.Log.Fatale(err)
|
||||
}
|
||||
migrationContext.CliPassword = string(bytePassword)
|
||||
}
|
||||
migrationContext.SetHeartbeatIntervalMilliseconds(*heartbeatIntervalMillis)
|
||||
migrationContext.SetNiceRatio(*niceRatio)
|
||||
migrationContext.SetChunkSize(*chunkSize)
|
||||
migrationContext.SetDMLBatchSize(*dmlBatchSize)
|
||||
migrationContext.SetMaxLagMillisecondsThrottleThreshold(*maxLagMillis)
|
||||
migrationContext.SetReplicationLagQuery(*replicationLagQuery)
|
||||
migrationContext.SetThrottleQuery(*throttleQuery)
|
||||
migrationContext.SetThrottleHTTP(*throttleHTTP)
|
||||
migrationContext.SetIgnoreHTTPErrors(*ignoreHTTPErrors)
|
||||
migrationContext.SetDefaultNumRetries(*defaultRetries)
|
||||
migrationContext.ApplyCredentials()
|
||||
if err := migrationContext.SetupTLS(); err != nil {
|
||||
migrationContext.Log.Fatale(err)
|
||||
}
|
||||
if err := migrationContext.SetCutOverLockTimeoutSeconds(*cutOverLockTimeoutSeconds); err != nil {
|
||||
migrationContext.Log.Errore(err)
|
||||
}
|
||||
if err := migrationContext.SetExponentialBackoffMaxInterval(*exponentialBackoffMaxInterval); err != nil {
|
||||
migrationContext.Log.Errore(err)
|
||||
log.Errore(err)
|
||||
}
|
||||
|
||||
log.Infof("starting gh-ost %+v", AppVersion)
|
||||
acceptSignals(migrationContext)
|
||||
|
||||
migrator := logic.NewMigrator(migrationContext, AppVersion)
|
||||
if err := migrator.Migrate(); err != nil {
|
||||
migrator := logic.NewMigrator()
|
||||
err := migrator.Migrate()
|
||||
if err != nil {
|
||||
migrator.ExecOnFailureHook()
|
||||
migrationContext.Log.Fatale(err)
|
||||
log.Fatale(err)
|
||||
}
|
||||
fmt.Fprintln(os.Stdout, "# Done")
|
||||
log.Info("Done")
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,185 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
test "github.com/openark/golib/tests"
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
"github.com/github/gh-ost/go/binlog"
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
)
|
||||
|
||||
func TestApplierGenerateSqlModeQuery(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
applier := NewApplier(migrationContext)
|
||||
|
||||
{
|
||||
test.S(t).ExpectEquals(
|
||||
applier.generateSqlModeQuery(),
|
||||
`sql_mode = CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO,STRICT_ALL_TABLES')`,
|
||||
)
|
||||
}
|
||||
{
|
||||
migrationContext.SkipStrictMode = true
|
||||
migrationContext.AllowZeroInDate = false
|
||||
test.S(t).ExpectEquals(
|
||||
applier.generateSqlModeQuery(),
|
||||
`sql_mode = CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO')`,
|
||||
)
|
||||
}
|
||||
{
|
||||
migrationContext.SkipStrictMode = false
|
||||
migrationContext.AllowZeroInDate = true
|
||||
test.S(t).ExpectEquals(
|
||||
applier.generateSqlModeQuery(),
|
||||
`sql_mode = REPLACE(REPLACE(CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO,STRICT_ALL_TABLES'), 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')`,
|
||||
)
|
||||
}
|
||||
{
|
||||
migrationContext.SkipStrictMode = true
|
||||
migrationContext.AllowZeroInDate = true
|
||||
test.S(t).ExpectEquals(
|
||||
applier.generateSqlModeQuery(),
|
||||
`sql_mode = REPLACE(REPLACE(CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO'), 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplierUpdateModifiesUniqueKeyColumns(t *testing.T) {
|
||||
columns := sql.NewColumnList([]string{"id", "item_id"})
|
||||
columnValues := sql.ToColumnValues([]interface{}{123456, 42})
|
||||
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrationContext.OriginalTableColumns = columns
|
||||
migrationContext.UniqueKey = &sql.UniqueKey{
|
||||
Name: t.Name(),
|
||||
Columns: *columns,
|
||||
}
|
||||
|
||||
applier := NewApplier(migrationContext)
|
||||
|
||||
t.Run("unmodified", func(t *testing.T) {
|
||||
modifiedColumn, isModified := applier.updateModifiesUniqueKeyColumns(&binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.UpdateDML,
|
||||
NewColumnValues: columnValues,
|
||||
WhereColumnValues: columnValues,
|
||||
})
|
||||
test.S(t).ExpectEquals(modifiedColumn, "")
|
||||
test.S(t).ExpectFalse(isModified)
|
||||
})
|
||||
|
||||
t.Run("modified", func(t *testing.T) {
|
||||
modifiedColumn, isModified := applier.updateModifiesUniqueKeyColumns(&binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.UpdateDML,
|
||||
NewColumnValues: sql.ToColumnValues([]interface{}{123456, 24}),
|
||||
WhereColumnValues: columnValues,
|
||||
})
|
||||
test.S(t).ExpectEquals(modifiedColumn, "item_id")
|
||||
test.S(t).ExpectTrue(isModified)
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplierBuildDMLEventQuery(t *testing.T) {
|
||||
columns := sql.NewColumnList([]string{"id", "item_id"})
|
||||
columnValues := sql.ToColumnValues([]interface{}{123456, 42})
|
||||
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrationContext.OriginalTableName = "test"
|
||||
migrationContext.OriginalTableColumns = columns
|
||||
migrationContext.SharedColumns = columns
|
||||
migrationContext.MappedSharedColumns = columns
|
||||
migrationContext.UniqueKey = &sql.UniqueKey{
|
||||
Name: t.Name(),
|
||||
Columns: *columns,
|
||||
}
|
||||
|
||||
applier := NewApplier(migrationContext)
|
||||
|
||||
t.Run("delete", func(t *testing.T) {
|
||||
binlogEvent := &binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.DeleteDML,
|
||||
WhereColumnValues: columnValues,
|
||||
}
|
||||
|
||||
res := applier.buildDMLEventQuery(binlogEvent)
|
||||
test.S(t).ExpectEquals(len(res), 1)
|
||||
test.S(t).ExpectNil(res[0].err)
|
||||
test.S(t).ExpectEquals(strings.TrimSpace(res[0].query),
|
||||
`delete /* gh-ost `+"`test`.`_test_gho`"+` */
|
||||
from
|
||||
`+"`test`.`_test_gho`"+`
|
||||
where
|
||||
((`+"`id`"+` = ?) and (`+"`item_id`"+` = ?))`)
|
||||
|
||||
test.S(t).ExpectEquals(len(res[0].args), 2)
|
||||
test.S(t).ExpectEquals(res[0].args[0], 123456)
|
||||
test.S(t).ExpectEquals(res[0].args[1], 42)
|
||||
})
|
||||
|
||||
t.Run("insert", func(t *testing.T) {
|
||||
binlogEvent := &binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.InsertDML,
|
||||
NewColumnValues: columnValues,
|
||||
}
|
||||
res := applier.buildDMLEventQuery(binlogEvent)
|
||||
test.S(t).ExpectEquals(len(res), 1)
|
||||
test.S(t).ExpectNil(res[0].err)
|
||||
test.S(t).ExpectEquals(strings.TrimSpace(res[0].query),
|
||||
`replace /* gh-ost `+"`test`.`_test_gho`"+` */ into
|
||||
`+"`test`.`_test_gho`"+`
|
||||
`+"(`id`, `item_id`)"+`
|
||||
values
|
||||
(?, ?)`)
|
||||
test.S(t).ExpectEquals(len(res[0].args), 2)
|
||||
test.S(t).ExpectEquals(res[0].args[0], 123456)
|
||||
test.S(t).ExpectEquals(res[0].args[1], 42)
|
||||
})
|
||||
|
||||
t.Run("update", func(t *testing.T) {
|
||||
binlogEvent := &binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.UpdateDML,
|
||||
NewColumnValues: columnValues,
|
||||
WhereColumnValues: columnValues,
|
||||
}
|
||||
res := applier.buildDMLEventQuery(binlogEvent)
|
||||
test.S(t).ExpectEquals(len(res), 1)
|
||||
test.S(t).ExpectNil(res[0].err)
|
||||
test.S(t).ExpectEquals(strings.TrimSpace(res[0].query),
|
||||
`update /* gh-ost `+"`test`.`_test_gho`"+` */
|
||||
`+"`test`.`_test_gho`"+`
|
||||
set
|
||||
`+"`id`"+`=?, `+"`item_id`"+`=?
|
||||
where
|
||||
((`+"`id`"+` = ?) and (`+"`item_id`"+` = ?))`)
|
||||
test.S(t).ExpectEquals(len(res[0].args), 4)
|
||||
test.S(t).ExpectEquals(res[0].args[0], 123456)
|
||||
test.S(t).ExpectEquals(res[0].args[1], 42)
|
||||
test.S(t).ExpectEquals(res[0].args[2], 123456)
|
||||
test.S(t).ExpectEquals(res[0].args[3], 42)
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplierInstantDDL(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrationContext.DatabaseName = "test"
|
||||
migrationContext.OriginalTableName = "mytable"
|
||||
migrationContext.AlterStatementOptions = "ADD INDEX (foo)"
|
||||
applier := NewApplier(migrationContext)
|
||||
|
||||
t.Run("instantDDLstmt", func(t *testing.T) {
|
||||
stmt := applier.generateInstantDDLQuery()
|
||||
test.S(t).ExpectEquals(stmt, "ALTER /* gh-ost */ TABLE `test`.`mytable` ADD INDEX (foo), ALGORITHM=INSTANT")
|
||||
})
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
/*
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -7,14 +8,13 @@ package logic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
"github.com/openark/golib/log"
|
||||
"github.com/outbrain/golib/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -30,22 +30,23 @@ const (
|
||||
onFailure = "gh-ost-on-failure"
|
||||
onStatus = "gh-ost-on-status"
|
||||
onStopReplication = "gh-ost-on-stop-replication"
|
||||
onStartReplication = "gh-ost-on-start-replication"
|
||||
)
|
||||
|
||||
type HooksExecutor struct {
|
||||
migrationContext *base.MigrationContext
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func NewHooksExecutor(migrationContext *base.MigrationContext) *HooksExecutor {
|
||||
func NewHooksExecutor() *HooksExecutor {
|
||||
return &HooksExecutor{
|
||||
migrationContext: migrationContext,
|
||||
writer: os.Stderr,
|
||||
migrationContext: base.GetMigrationContext(),
|
||||
}
|
||||
}
|
||||
|
||||
func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) []string {
|
||||
func (this *HooksExecutor) initHooks() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *HooksExecutor) applyEnvironmentVairables(extraVariables ...string) []string {
|
||||
env := os.Environ()
|
||||
env = append(env, fmt.Sprintf("GH_OST_DATABASE_NAME=%s", this.migrationContext.DatabaseName))
|
||||
env = append(env, fmt.Sprintf("GH_OST_TABLE_NAME=%s", this.migrationContext.OriginalTableName))
|
||||
@ -61,27 +62,22 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
|
||||
env = append(env, fmt.Sprintf("GH_OST_MIGRATED_HOST=%s", this.migrationContext.GetApplierHostname()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
|
||||
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_HEARTBEAT_LAG=%f", this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_ETA_SECONDS=%d", this.migrationContext.GetETASeconds()))
|
||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
|
||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
|
||||
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
|
||||
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
|
||||
|
||||
env = append(env, extraVariables...)
|
||||
for _, variable := range extraVariables {
|
||||
env = append(env, variable)
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
// executeHook executes a command, and sets relevant environment variables
|
||||
// combined output & error are printed to the configured writer.
|
||||
// combined output & error are printed to gh-ost's standard error.
|
||||
func (this *HooksExecutor) executeHook(hook string, extraVariables ...string) error {
|
||||
cmd := exec.Command(hook)
|
||||
cmd.Env = this.applyEnvironmentVariables(extraVariables...)
|
||||
cmd.Env = this.applyEnvironmentVairables(extraVariables...)
|
||||
|
||||
combinedOutput, err := cmd.CombinedOutput()
|
||||
fmt.Fprintln(this.writer, string(combinedOutput))
|
||||
fmt.Fprintln(os.Stderr, string(combinedOutput))
|
||||
return log.Errore(err)
|
||||
}
|
||||
|
||||
@ -156,7 +152,3 @@ func (this *HooksExecutor) onStatus(statusMessage string) error {
|
||||
func (this *HooksExecutor) onStopReplication() error {
|
||||
return this.executeHooks(onStopReplication)
|
||||
}
|
||||
|
||||
func (this *HooksExecutor) onStartReplication() error {
|
||||
return this.executeHooks(onStartReplication)
|
||||
}
|
||||
|
@ -1,113 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openark/golib/tests"
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
)
|
||||
|
||||
func TestHooksExecutorExecuteHooks(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrationContext.AlterStatement = "ENGINE=InnoDB"
|
||||
migrationContext.DatabaseName = "test"
|
||||
migrationContext.Hostname = "test.example.com"
|
||||
migrationContext.OriginalTableName = "tablename"
|
||||
migrationContext.RowsDeltaEstimate = 1
|
||||
migrationContext.RowsEstimate = 122
|
||||
migrationContext.TotalRowsCopied = 123456
|
||||
migrationContext.SetETADuration(time.Minute)
|
||||
migrationContext.SetProgressPct(50)
|
||||
hooksExecutor := NewHooksExecutor(migrationContext)
|
||||
|
||||
writeTmpHookFunc := func(testName, hookName, script string) (path string, err error) {
|
||||
if path, err = os.MkdirTemp("", testName); err != nil {
|
||||
return path, err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(path, hookName), []byte(script), 0777)
|
||||
return path, err
|
||||
}
|
||||
|
||||
t.Run("does-not-exist", func(t *testing.T) {
|
||||
migrationContext.HooksPath = "/does/not/exist"
|
||||
tests.S(t).ExpectNil(hooksExecutor.executeHooks("test-hook"))
|
||||
})
|
||||
|
||||
t.Run("failed", func(t *testing.T) {
|
||||
var err error
|
||||
if migrationContext.HooksPath, err = writeTmpHookFunc(
|
||||
"TestHooksExecutorExecuteHooks-failed",
|
||||
"failed-hook",
|
||||
"#!/bin/sh\nexit 1",
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(migrationContext.HooksPath)
|
||||
tests.S(t).ExpectNotNil(hooksExecutor.executeHooks("failed-hook"))
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
var err error
|
||||
if migrationContext.HooksPath, err = writeTmpHookFunc(
|
||||
"TestHooksExecutorExecuteHooks-success",
|
||||
"success-hook",
|
||||
"#!/bin/sh\nenv",
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(migrationContext.HooksPath)
|
||||
|
||||
var buf bytes.Buffer
|
||||
hooksExecutor.writer = &buf
|
||||
tests.S(t).ExpectNil(hooksExecutor.executeHooks("success-hook", "TEST="+t.Name()))
|
||||
|
||||
scanner := bufio.NewScanner(&buf)
|
||||
for scanner.Scan() {
|
||||
split := strings.SplitN(scanner.Text(), "=", 2)
|
||||
switch split[0] {
|
||||
case "GH_OST_COPIED_ROWS":
|
||||
copiedRows, _ := strconv.ParseInt(split[1], 10, 64)
|
||||
tests.S(t).ExpectEquals(copiedRows, migrationContext.TotalRowsCopied)
|
||||
case "GH_OST_DATABASE_NAME":
|
||||
tests.S(t).ExpectEquals(split[1], migrationContext.DatabaseName)
|
||||
case "GH_OST_DDL":
|
||||
tests.S(t).ExpectEquals(split[1], migrationContext.AlterStatement)
|
||||
case "GH_OST_DRY_RUN":
|
||||
tests.S(t).ExpectEquals(split[1], "false")
|
||||
case "GH_OST_ESTIMATED_ROWS":
|
||||
estimatedRows, _ := strconv.ParseInt(split[1], 10, 64)
|
||||
tests.S(t).ExpectEquals(estimatedRows, int64(123))
|
||||
case "GH_OST_ETA_SECONDS":
|
||||
etaSeconds, _ := strconv.ParseInt(split[1], 10, 64)
|
||||
tests.S(t).ExpectEquals(etaSeconds, int64(60))
|
||||
case "GH_OST_EXECUTING_HOST":
|
||||
tests.S(t).ExpectEquals(split[1], migrationContext.Hostname)
|
||||
case "GH_OST_GHOST_TABLE_NAME":
|
||||
tests.S(t).ExpectEquals(split[1], fmt.Sprintf("_%s_gho", migrationContext.OriginalTableName))
|
||||
case "GH_OST_OLD_TABLE_NAME":
|
||||
tests.S(t).ExpectEquals(split[1], fmt.Sprintf("_%s_del", migrationContext.OriginalTableName))
|
||||
case "GH_OST_PROGRESS":
|
||||
progress, _ := strconv.ParseFloat(split[1], 64)
|
||||
tests.S(t).ExpectEquals(progress, 50.0)
|
||||
case "GH_OST_TABLE_NAME":
|
||||
tests.S(t).ExpectEquals(split[1], migrationContext.OriginalTableName)
|
||||
case "TEST":
|
||||
tests.S(t).ExpectEquals(split[1], t.Name())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
@ -1,67 +1,52 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
gosql "database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
"github.com/github/gh-ost/go/mysql"
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
|
||||
"github.com/openark/golib/sqlutils"
|
||||
"github.com/outbrain/golib/log"
|
||||
"github.com/outbrain/golib/sqlutils"
|
||||
)
|
||||
|
||||
const startSlavePostWaitMilliseconds = 500 * time.Millisecond
|
||||
|
||||
// Inspector reads data from the read-MySQL-server (typically a replica, but can be the master)
|
||||
// It is used for gaining initial status and structure, and later also follow up on progress and changelog
|
||||
type Inspector struct {
|
||||
connectionConfig *mysql.ConnectionConfig
|
||||
db *gosql.DB
|
||||
informationSchemaDb *gosql.DB
|
||||
migrationContext *base.MigrationContext
|
||||
name string
|
||||
connectionConfig *mysql.ConnectionConfig
|
||||
db *gosql.DB
|
||||
migrationContext *base.MigrationContext
|
||||
}
|
||||
|
||||
func NewInspector(migrationContext *base.MigrationContext) *Inspector {
|
||||
func NewInspector() *Inspector {
|
||||
return &Inspector{
|
||||
connectionConfig: migrationContext.InspectorConnectionConfig,
|
||||
migrationContext: migrationContext,
|
||||
name: "inspector",
|
||||
connectionConfig: base.GetMigrationContext().InspectorConnectionConfig,
|
||||
migrationContext: base.GetMigrationContext(),
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Inspector) InitDBConnections() (err error) {
|
||||
inspectorUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
||||
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, inspectorUri); err != nil {
|
||||
if this.db, _, err = sqlutils.GetDB(inspectorUri); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
informationSchemaUri := this.connectionConfig.GetDBUri("information_schema")
|
||||
if this.informationSchemaDb, _, err = mysql.GetDB(this.migrationContext.Uuid, informationSchemaUri); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := this.validateConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
|
||||
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
||||
return err
|
||||
} else {
|
||||
this.connectionConfig.ImpliedKey = impliedKey
|
||||
}
|
||||
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
|
||||
return err
|
||||
} else {
|
||||
this.connectionConfig.ImpliedKey = impliedKey
|
||||
}
|
||||
if err := this.validateGrants(); err != nil {
|
||||
return err
|
||||
@ -72,7 +57,6 @@ func (this *Inspector) InitDBConnections() (err error) {
|
||||
if err := this.applyBinlogFormat(); err != nil {
|
||||
return err
|
||||
}
|
||||
this.migrationContext.Log.Infof("Inspector initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.InspectorMySQLVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -92,29 +76,25 @@ func (this *Inspector) ValidateOriginalTable() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, virtualColumns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) {
|
||||
func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) {
|
||||
uniqueKeys, err = this.getCandidateUniqueKeys(tableName)
|
||||
if err != nil {
|
||||
return columns, virtualColumns, uniqueKeys, err
|
||||
return columns, uniqueKeys, err
|
||||
}
|
||||
if len(uniqueKeys) == 0 {
|
||||
return columns, virtualColumns, uniqueKeys, fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out")
|
||||
return columns, uniqueKeys, fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out")
|
||||
}
|
||||
columns, virtualColumns, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, tableName)
|
||||
columns, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, tableName)
|
||||
if err != nil {
|
||||
return columns, virtualColumns, uniqueKeys, err
|
||||
return columns, uniqueKeys, err
|
||||
}
|
||||
|
||||
return columns, virtualColumns, uniqueKeys, nil
|
||||
return columns, uniqueKeys, nil
|
||||
}
|
||||
|
||||
func (this *Inspector) InspectOriginalTable() (err error) {
|
||||
this.migrationContext.OriginalTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.OriginalTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.OriginalTableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
this.migrationContext.OriginalTableAutoIncrement, err = this.getAutoIncrementValue(this.migrationContext.OriginalTableName)
|
||||
if err != nil {
|
||||
this.migrationContext.OriginalTableColumns, this.migrationContext.OriginalTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.OriginalTableName)
|
||||
if err == nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -129,55 +109,41 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
||||
return fmt.Errorf("It seems like table structure is not identical between master and replica. This scenario is not supported.")
|
||||
}
|
||||
|
||||
this.migrationContext.GhostTableColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.GhostTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.GetGhostTableName())
|
||||
this.migrationContext.GhostTableColumns, this.migrationContext.GhostTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.GetGhostTableName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sharedUniqueKeys := this.getSharedUniqueKeys(this.migrationContext.OriginalTableUniqueKeys, this.migrationContext.GhostTableUniqueKeys)
|
||||
for i, sharedUniqueKey := range sharedUniqueKeys {
|
||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &sharedUniqueKey.Columns)
|
||||
uniqueKeyIsValid := true
|
||||
for _, column := range sharedUniqueKey.Columns.Columns() {
|
||||
switch column.Type {
|
||||
case sql.FloatColumnType:
|
||||
{
|
||||
this.migrationContext.Log.Warning("Will not use %+v as shared key due to FLOAT data type", sharedUniqueKey.Name)
|
||||
uniqueKeyIsValid = false
|
||||
}
|
||||
case sql.JSONColumnType:
|
||||
{
|
||||
// Noteworthy that at this time MySQL does not allow JSON indexing anyhow, but this code
|
||||
// will remain in place to potentially handle the future case where JSON is supported in indexes.
|
||||
this.migrationContext.Log.Warning("Will not use %+v as shared key due to JSON data type", sharedUniqueKey.Name)
|
||||
uniqueKeyIsValid = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if uniqueKeyIsValid {
|
||||
this.migrationContext.UniqueKey = sharedUniqueKeys[i]
|
||||
break
|
||||
}
|
||||
sharedUniqueKeys, err := this.getSharedUniqueKeys(this.migrationContext.OriginalTableUniqueKeys, this.migrationContext.GhostTableUniqueKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if this.migrationContext.UniqueKey == nil {
|
||||
if len(sharedUniqueKeys) == 0 {
|
||||
return fmt.Errorf("No shared unique key can be found after ALTER! Bailing out")
|
||||
}
|
||||
this.migrationContext.Log.Infof("Chosen shared unique key is %s", this.migrationContext.UniqueKey.Name)
|
||||
this.migrationContext.UniqueKey = sharedUniqueKeys[0]
|
||||
log.Infof("Chosen shared unique key is %s", this.migrationContext.UniqueKey.Name)
|
||||
if this.migrationContext.UniqueKey.HasNullable {
|
||||
if this.migrationContext.NullableUniqueKeyAllowed {
|
||||
this.migrationContext.Log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", this.migrationContext.UniqueKey)
|
||||
log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", this.migrationContext.UniqueKey)
|
||||
} else {
|
||||
return fmt.Errorf("Chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", this.migrationContext.UniqueKey)
|
||||
}
|
||||
}
|
||||
if !this.migrationContext.UniqueKey.IsPrimary() {
|
||||
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
|
||||
return fmt.Errorf("binlog_row_image is '%s' and chosen key is %s, which is not the primary key. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.migrationContext.OriginalBinlogRowImage, this.migrationContext.UniqueKey)
|
||||
}
|
||||
}
|
||||
|
||||
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.ColumnRenameMap)
|
||||
this.migrationContext.Log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
|
||||
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.ColumnRenameMap)
|
||||
log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
|
||||
// By fact that a non-empty unique key exists we also know the shared columns are non-empty
|
||||
|
||||
// This additional step looks at which columns are unsigned. We could have merged this within
|
||||
// the `getTableColumns()` function, but it's a later patch and introduces some complexity; I feel
|
||||
// comfortable in doing this as a separate step.
|
||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, &this.migrationContext.UniqueKey.Columns)
|
||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns)
|
||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &this.migrationContext.UniqueKey.Columns)
|
||||
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.GhostTableColumns, this.migrationContext.MappedSharedColumns)
|
||||
|
||||
for i := range this.migrationContext.SharedColumns.Columns() {
|
||||
@ -186,20 +152,13 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
||||
if column.Name == mappedColumn.Name && column.Type == sql.DateTimeColumnType && mappedColumn.Type == sql.TimestampColumnType {
|
||||
this.migrationContext.MappedSharedColumns.SetConvertDatetimeToTimestamp(column.Name, this.migrationContext.ApplierTimeZone)
|
||||
}
|
||||
if column.Name == mappedColumn.Name && column.Type == sql.EnumColumnType && mappedColumn.Charset != "" {
|
||||
this.migrationContext.MappedSharedColumns.SetEnumToTextConversion(column.Name)
|
||||
this.migrationContext.MappedSharedColumns.SetEnumValues(column.Name, column.EnumValues)
|
||||
}
|
||||
if column.Name == mappedColumn.Name && column.Charset != mappedColumn.Charset {
|
||||
this.migrationContext.SharedColumns.SetCharsetConversion(column.Name, column.Charset, mappedColumn.Charset)
|
||||
if column.Name == mappedColumn.Name && column.Charset != "" && column.Charset == mappedColumn.Charset {
|
||||
this.migrationContext.SharedColumns.SetCharsetUnchanged(column.Name)
|
||||
this.migrationContext.MappedSharedColumns.SetCharsetUnchanged(column.Name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, column := range this.migrationContext.UniqueKey.Columns.Columns() {
|
||||
if this.migrationContext.GhostTableVirtualColumns.GetColumn(column.Name) != nil {
|
||||
// this is a virtual column
|
||||
continue
|
||||
}
|
||||
if this.migrationContext.MappedSharedColumns.HasTimezoneConversion(column.Name) {
|
||||
return fmt.Errorf("No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key. Column: %s, key: %s", column.Name, this.migrationContext.UniqueKey.Name)
|
||||
}
|
||||
@ -210,17 +169,20 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
|
||||
|
||||
// validateConnection issues a simple can-connect to MySQL
|
||||
func (this *Inspector) validateConnection() error {
|
||||
if len(this.connectionConfig.Password) > mysql.MaxReplicationPasswordLength {
|
||||
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
|
||||
query := `select @@global.port`
|
||||
var port int
|
||||
if err := this.db.QueryRow(query).Scan(&port); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
|
||||
this.migrationContext.InspectorMySQLVersion = version
|
||||
return err
|
||||
if port != this.connectionConfig.Key.Port {
|
||||
return fmt.Errorf("Unexpected database port reported: %+v", port)
|
||||
}
|
||||
log.Infof("connection validated on %+v", this.connectionConfig.Key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateGrants verifies the user by which we're executing has necessary grants
|
||||
// to do its thing.
|
||||
// to do its thang.
|
||||
func (this *Inspector) validateGrants() error {
|
||||
query := `show /* gh-ost */ grants for current_user()`
|
||||
foundAll := false
|
||||
@ -247,9 +209,6 @@ func (this *Inspector) validateGrants() error {
|
||||
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", this.migrationContext.DatabaseName)) {
|
||||
foundDBAll = true
|
||||
}
|
||||
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", strings.Replace(this.migrationContext.DatabaseName, "_", "\\_", -1))) {
|
||||
foundDBAll = true
|
||||
}
|
||||
if base.StringContainsAll(grant, `ALTER`, `CREATE`, `DELETE`, `DROP`, `INDEX`, `INSERT`, `LOCK TABLES`, `SELECT`, `TRIGGER`, `UPDATE`, ` ON *.*`) {
|
||||
foundDBAll = true
|
||||
}
|
||||
@ -265,27 +224,27 @@ func (this *Inspector) validateGrants() error {
|
||||
this.migrationContext.HasSuperPrivilege = foundSuper
|
||||
|
||||
if foundAll {
|
||||
this.migrationContext.Log.Infof("User has ALL privileges")
|
||||
log.Infof("User has ALL privileges")
|
||||
return nil
|
||||
}
|
||||
if foundSuper && foundReplicationSlave && foundDBAll {
|
||||
this.migrationContext.Log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
||||
log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
||||
return nil
|
||||
}
|
||||
if foundReplicationClient && foundReplicationSlave && foundDBAll {
|
||||
this.migrationContext.Log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
||||
log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
||||
return nil
|
||||
}
|
||||
this.migrationContext.Log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(this.migrationContext.DatabaseName), foundDBAll)
|
||||
return this.migrationContext.Log.Errorf("User has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
||||
log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(this.migrationContext.DatabaseName), foundDBAll)
|
||||
return log.Errorf("User has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
|
||||
}
|
||||
|
||||
// restartReplication is required so that we are _certain_ the binlog format and
|
||||
// row image settings have actually been applied to the replication thread.
|
||||
// It is entirely possible, for example, that the replication is using 'STATEMENT'
|
||||
// It is entriely possible, for example, that the replication is using 'STATEMENT'
|
||||
// binlog format even as the variable says 'ROW'
|
||||
func (this *Inspector) restartReplication() error {
|
||||
this.migrationContext.Log.Infof("Restarting replication on %s to make sure binlog settings apply to replication thread", this.connectionConfig.Key.String())
|
||||
log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||
|
||||
masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.connectionConfig)
|
||||
if masterKey == nil {
|
||||
@ -302,9 +261,7 @@ func (this *Inspector) restartReplication() error {
|
||||
if startError != nil {
|
||||
return startError
|
||||
}
|
||||
time.Sleep(startSlavePostWaitMilliseconds)
|
||||
|
||||
this.migrationContext.Log.Debugf("Replication restarted")
|
||||
log.Debugf("Replication restarted")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -324,7 +281,7 @@ func (this *Inspector) applyBinlogFormat() error {
|
||||
if err := this.restartReplication(); err != nil {
|
||||
return err
|
||||
}
|
||||
this.migrationContext.Log.Debugf("'ROW' binlog format applied")
|
||||
log.Debugf("'ROW' binlog format applied")
|
||||
return nil
|
||||
}
|
||||
// We already have RBR, no explicit switch
|
||||
@ -344,13 +301,13 @@ func (this *Inspector) validateBinlogs() error {
|
||||
return err
|
||||
}
|
||||
if !hasBinaryLogs {
|
||||
return fmt.Errorf("%s must have binary logs enabled", this.connectionConfig.Key.String())
|
||||
return fmt.Errorf("%s:%d must have binary logs enabled", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||
}
|
||||
if this.migrationContext.RequiresBinlogFormatChange() {
|
||||
if !this.migrationContext.SwitchToRowBinlogFormat {
|
||||
return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", this.connectionConfig.Key.String())
|
||||
return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s:%d doesn't have replicas", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||
}
|
||||
query := `show /* gh-ost */ slave hosts`
|
||||
query := fmt.Sprintf(`show /* gh-ost */ slave hosts`)
|
||||
countReplicas := 0
|
||||
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
|
||||
countReplicas++
|
||||
@ -360,20 +317,18 @@ func (this *Inspector) validateBinlogs() error {
|
||||
return err
|
||||
}
|
||||
if countReplicas > 0 {
|
||||
return fmt.Errorf("%s has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
|
||||
return fmt.Errorf("%s:%d has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
|
||||
}
|
||||
this.migrationContext.Log.Infof("%s has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
|
||||
log.Infof("%s:%d has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
|
||||
}
|
||||
query = `select @@global.binlog_row_image`
|
||||
if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil {
|
||||
return err
|
||||
// Only as of 5.6. We wish to support 5.5 as well
|
||||
this.migrationContext.OriginalBinlogRowImage = "FULL"
|
||||
}
|
||||
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
|
||||
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
|
||||
return fmt.Errorf("%s has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogRowImage)
|
||||
}
|
||||
|
||||
this.migrationContext.Log.Infof("binary logs validated on %s", this.connectionConfig.Key.String())
|
||||
log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -386,25 +341,25 @@ func (this *Inspector) validateLogSlaveUpdates() error {
|
||||
}
|
||||
|
||||
if logSlaveUpdates {
|
||||
this.migrationContext.Log.Infof("log_slave_updates validated on %s", this.connectionConfig.Key.String())
|
||||
log.Infof("log_slave_updates validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||
return nil
|
||||
}
|
||||
|
||||
if this.migrationContext.IsTungsten {
|
||||
this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.String())
|
||||
log.Warning("log_slave_updates not found on %s:%d, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||
return nil
|
||||
}
|
||||
|
||||
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
|
||||
return fmt.Errorf("%s must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.String())
|
||||
return fmt.Errorf("%s:%d must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||
}
|
||||
|
||||
if this.migrationContext.InspectorIsAlsoApplier() {
|
||||
this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.String())
|
||||
log.Warning("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("%s must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.String())
|
||||
return fmt.Errorf("%s:%d must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
|
||||
}
|
||||
|
||||
// validateTable makes sure the table we need to operate on actually exists
|
||||
@ -427,19 +382,15 @@ func (this *Inspector) validateTable() error {
|
||||
return err
|
||||
}
|
||||
if !tableFound {
|
||||
return this.migrationContext.Log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
return log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
}
|
||||
this.migrationContext.Log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine)
|
||||
this.migrationContext.Log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate)
|
||||
log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine)
|
||||
log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate)
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateTableForeignKeys makes sure no foreign keys exist on the migrated table
|
||||
func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) error {
|
||||
if this.migrationContext.SkipForeignKeyChecks {
|
||||
this.migrationContext.Log.Warning("--skip-foreign-key-checks provided: will not check for foreign keys")
|
||||
return nil
|
||||
}
|
||||
query := `
|
||||
SELECT
|
||||
SUM(REFERENCED_TABLE_NAME IS NOT NULL AND TABLE_SCHEMA=? AND TABLE_NAME=?) as num_child_side_fk,
|
||||
@ -471,16 +422,16 @@ func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) erro
|
||||
return err
|
||||
}
|
||||
if numParentForeignKeys > 0 {
|
||||
return this.migrationContext.Log.Errorf("Found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
return log.Errorf("Found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
}
|
||||
if numChildForeignKeys > 0 {
|
||||
if allowChildForeignKeys {
|
||||
this.migrationContext.Log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag")
|
||||
log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag")
|
||||
return nil
|
||||
}
|
||||
return this.migrationContext.Log.Errorf("Found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
return log.Errorf("Found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
}
|
||||
this.migrationContext.Log.Debugf("Validated no foreign keys exist on table")
|
||||
log.Debugf("Validated no foreign keys exist on table")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -506,9 +457,9 @@ func (this *Inspector) validateTableTriggers() error {
|
||||
return err
|
||||
}
|
||||
if numTriggers > 0 {
|
||||
return this.migrationContext.Log.Errorf("Found triggers on %s.%s. Triggers are not supported at this time. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
return log.Errorf("Found triggers on %s.%s. Triggers are not supported at this time. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
}
|
||||
this.migrationContext.Log.Debugf("Validated no triggers exist on table")
|
||||
log.Debugf("Validated no triggers exist on table")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -528,48 +479,28 @@ func (this *Inspector) estimateTableRowsViaExplain() error {
|
||||
return err
|
||||
}
|
||||
if !outputFound {
|
||||
return this.migrationContext.Log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
return log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
}
|
||||
this.migrationContext.Log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate)
|
||||
log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CountTableRows counts exact number of rows on the original table
|
||||
func (this *Inspector) CountTableRows(ctx context.Context) error {
|
||||
func (this *Inspector) CountTableRows() error {
|
||||
atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1)
|
||||
defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0)
|
||||
|
||||
this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
|
||||
log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
|
||||
|
||||
conn, err := this.db.Conn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
var connectionID string
|
||||
if err := conn.QueryRowContext(ctx, `SELECT /* gh-ost */ CONNECTION_ID()`).Scan(&connectionID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
query := fmt.Sprintf(`select /* gh-ost */ count(*) as rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
var rowsEstimate int64
|
||||
if err := conn.QueryRowContext(ctx, query).Scan(&rowsEstimate); err != nil {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
this.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err())
|
||||
return mysql.Kill(this.db, connectionID)
|
||||
}
|
||||
if err := this.db.QueryRow(query).Scan(&rowsEstimate); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// row count query finished. nil out the cancel func, so the main migration thread
|
||||
// doesn't bother calling it after row copy is done.
|
||||
this.migrationContext.SetCountTableRowsCancelFunc(nil)
|
||||
|
||||
atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate)
|
||||
this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate
|
||||
|
||||
this.migrationContext.Log.Infof("Exact number of rows via COUNT: %d", rowsEstimate)
|
||||
log.Infof("Exact number of rows via COUNT: %d", rowsEstimate)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -588,41 +519,29 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
|
||||
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
||||
columnName := m.GetString("COLUMN_NAME")
|
||||
columnType := m.GetString("COLUMN_TYPE")
|
||||
columnOctetLength := m.GetUint("CHARACTER_OCTET_LENGTH")
|
||||
for _, columnsList := range columnsLists {
|
||||
column := columnsList.GetColumn(columnName)
|
||||
if column == nil {
|
||||
continue
|
||||
if strings.Contains(columnType, "unsigned") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.SetUnsigned(columnName)
|
||||
}
|
||||
|
||||
if strings.Contains(columnType, "unsigned") {
|
||||
column.IsUnsigned = true
|
||||
}
|
||||
if strings.Contains(columnType, "timestamp") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.GetColumn(columnName).Type = sql.TimestampColumnType
|
||||
}
|
||||
if strings.Contains(columnType, "mediumint") {
|
||||
column.Type = sql.MediumIntColumnType
|
||||
}
|
||||
if strings.Contains(columnType, "datetime") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.GetColumn(columnName).Type = sql.DateTimeColumnType
|
||||
}
|
||||
if strings.Contains(columnType, "timestamp") {
|
||||
column.Type = sql.TimestampColumnType
|
||||
}
|
||||
if strings.HasPrefix(columnType, "enum") {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.GetColumn(columnName).Type = sql.EnumColumnValue
|
||||
}
|
||||
if strings.Contains(columnType, "datetime") {
|
||||
column.Type = sql.DateTimeColumnType
|
||||
}
|
||||
if strings.Contains(columnType, "json") {
|
||||
column.Type = sql.JSONColumnType
|
||||
}
|
||||
if strings.Contains(columnType, "float") {
|
||||
column.Type = sql.FloatColumnType
|
||||
}
|
||||
if strings.HasPrefix(columnType, "enum") {
|
||||
column.Type = sql.EnumColumnType
|
||||
column.EnumValues = sql.ParseEnumValues(m.GetString("COLUMN_TYPE"))
|
||||
}
|
||||
if strings.HasPrefix(columnType, "binary") {
|
||||
column.Type = sql.BinaryColumnType
|
||||
column.BinaryOctetLength = columnOctetLength
|
||||
}
|
||||
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
|
||||
column.Charset = charset
|
||||
}
|
||||
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
|
||||
for _, columnsList := range columnsLists {
|
||||
columnsList.SetCharset(columnName, charset)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -630,24 +549,6 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
|
||||
return err
|
||||
}
|
||||
|
||||
// getAutoIncrementValue get's the original table's AUTO_INCREMENT value, if exists (0 value if not exists)
|
||||
func (this *Inspector) getAutoIncrementValue(tableName string) (autoIncrement uint64, err error) {
|
||||
query := `
|
||||
SELECT
|
||||
AUTO_INCREMENT
|
||||
FROM INFORMATION_SCHEMA.TABLES
|
||||
WHERE
|
||||
TABLES.TABLE_SCHEMA = ?
|
||||
AND TABLES.TABLE_NAME = ?
|
||||
AND AUTO_INCREMENT IS NOT NULL
|
||||
`
|
||||
err = sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
||||
autoIncrement = m.GetUint64("AUTO_INCREMENT")
|
||||
return nil
|
||||
}, this.migrationContext.DatabaseName, tableName)
|
||||
return autoIncrement, err
|
||||
}
|
||||
|
||||
// getCandidateUniqueKeys investigates a table and returns the list of unique keys
|
||||
// candidate for chunking
|
||||
func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) {
|
||||
@ -680,6 +581,8 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*
|
||||
GROUP BY TABLE_SCHEMA, TABLE_NAME, INDEX_NAME
|
||||
) AS UNIQUES
|
||||
ON (
|
||||
COLUMNS.TABLE_SCHEMA = UNIQUES.TABLE_SCHEMA AND
|
||||
COLUMNS.TABLE_NAME = UNIQUES.TABLE_NAME AND
|
||||
COLUMNS.COLUMN_NAME = UNIQUES.FIRST_COLUMN_NAME
|
||||
)
|
||||
WHERE
|
||||
@ -721,13 +624,13 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*
|
||||
if err != nil {
|
||||
return uniqueKeys, err
|
||||
}
|
||||
this.migrationContext.Log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys)
|
||||
log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys)
|
||||
return uniqueKeys, nil
|
||||
}
|
||||
|
||||
// getSharedUniqueKeys returns the intersection of two given unique keys,
|
||||
// testing by list of columns
|
||||
func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys []*sql.UniqueKey) (uniqueKeys []*sql.UniqueKey) {
|
||||
func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [](*sql.UniqueKey)) (uniqueKeys [](*sql.UniqueKey), err error) {
|
||||
// We actually do NOT rely on key name, just on the set of columns. This is because maybe
|
||||
// the ALTER is on the name itself...
|
||||
for _, originalUniqueKey := range originalUniqueKeys {
|
||||
@ -737,41 +640,18 @@ func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [
|
||||
}
|
||||
}
|
||||
}
|
||||
return uniqueKeys
|
||||
return uniqueKeys, nil
|
||||
}
|
||||
|
||||
// getSharedColumns returns the intersection of two lists of columns in same order as the first list
|
||||
func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, originalVirtualColumns, ghostVirtualColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) {
|
||||
func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) {
|
||||
columnsInGhost := make(map[string]bool)
|
||||
for _, ghostColumn := range ghostColumns.Names() {
|
||||
columnsInGhost[ghostColumn] = true
|
||||
}
|
||||
sharedColumnNames := []string{}
|
||||
for _, originalColumn := range originalColumns.Names() {
|
||||
isSharedColumn := false
|
||||
for _, ghostColumn := range ghostColumns.Names() {
|
||||
if strings.EqualFold(originalColumn, ghostColumn) {
|
||||
isSharedColumn = true
|
||||
break
|
||||
}
|
||||
if strings.EqualFold(columnRenameMap[originalColumn], ghostColumn) {
|
||||
isSharedColumn = true
|
||||
break
|
||||
}
|
||||
}
|
||||
for droppedColumn := range this.migrationContext.DroppedColumnsMap {
|
||||
if strings.EqualFold(originalColumn, droppedColumn) {
|
||||
isSharedColumn = false
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, virtualColumn := range originalVirtualColumns.Names() {
|
||||
if strings.EqualFold(originalColumn, virtualColumn) {
|
||||
isSharedColumn = false
|
||||
}
|
||||
}
|
||||
for _, virtualColumn := range ghostVirtualColumns.Names() {
|
||||
if strings.EqualFold(originalColumn, virtualColumn) {
|
||||
isSharedColumn = false
|
||||
}
|
||||
}
|
||||
if isSharedColumn {
|
||||
if columnsInGhost[originalColumn] || columnsInGhost[columnRenameMap[originalColumn]] {
|
||||
sharedColumnNames = append(sharedColumnNames, originalColumn)
|
||||
}
|
||||
}
|
||||
@ -795,35 +675,22 @@ func (this *Inspector) showCreateTable(tableName string) (createTableStatement s
|
||||
}
|
||||
|
||||
// readChangelogState reads changelog hints
|
||||
func (this *Inspector) readChangelogState(hint string) (string, error) {
|
||||
func (this *Inspector) readChangelogState() (map[string]string, error) {
|
||||
query := fmt.Sprintf(`
|
||||
select hint, value from %s.%s where hint = ? and id <= 255
|
||||
select hint, value from %s.%s where id <= 255
|
||||
`,
|
||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
||||
)
|
||||
result := ""
|
||||
result := make(map[string]string)
|
||||
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
|
||||
result = m.GetString("value")
|
||||
result[m.GetString("hint")] = m.GetString("value")
|
||||
return nil
|
||||
}, hint)
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (this *Inspector) getMasterConnectionConfig() (applierConfig *mysql.ConnectionConfig, err error) {
|
||||
this.migrationContext.Log.Infof("Recursively searching for replication master")
|
||||
visitedKeys := mysql.NewInstanceKeyMap()
|
||||
return mysql.GetMasterConnectionConfigSafe(this.connectionConfig, visitedKeys, this.migrationContext.AllowedMasterMaster)
|
||||
}
|
||||
|
||||
func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err error) {
|
||||
replicationLag, err = mysql.GetReplicationLagFromSlaveStatus(
|
||||
this.informationSchemaDb,
|
||||
)
|
||||
return replicationLag, err
|
||||
}
|
||||
|
||||
func (this *Inspector) Teardown() {
|
||||
this.db.Close()
|
||||
this.informationSchemaDb.Close()
|
||||
}
|
||||
|
@ -1,31 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
test "github.com/openark/golib/tests"
|
||||
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
)
|
||||
|
||||
func TestInspectGetSharedUniqueKeys(t *testing.T) {
|
||||
origUniqKeys := []*sql.UniqueKey{
|
||||
{Columns: *sql.NewColumnList([]string{"id", "item_id"})},
|
||||
{Columns: *sql.NewColumnList([]string{"id", "org_id"})},
|
||||
}
|
||||
ghostUniqKeys := []*sql.UniqueKey{
|
||||
{Columns: *sql.NewColumnList([]string{"id", "item_id"})},
|
||||
{Columns: *sql.NewColumnList([]string{"id", "org_id"})},
|
||||
{Columns: *sql.NewColumnList([]string{"item_id", "user_id"})},
|
||||
}
|
||||
inspector := &Inspector{}
|
||||
sharedUniqKeys := inspector.getSharedUniqueKeys(origUniqKeys, ghostUniqKeys)
|
||||
test.S(t).ExpectEquals(len(sharedUniqKeys), 2)
|
||||
test.S(t).ExpectEquals(sharedUniqKeys[0].Columns.String(), "id,item_id")
|
||||
test.S(t).ExpectEquals(sharedUniqKeys[1].Columns.String(), "id,org_id")
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,256 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openark/golib/tests"
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
"github.com/github/gh-ost/go/binlog"
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
)
|
||||
|
||||
func TestMigratorOnChangelogEvent(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
|
||||
t.Run("heartbeat", func(t *testing.T) {
|
||||
columnValues := sql.ToColumnValues([]interface{}{
|
||||
123,
|
||||
time.Now().Unix(),
|
||||
"heartbeat",
|
||||
"2022-08-16T00:45:10.52Z",
|
||||
})
|
||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.InsertDML,
|
||||
NewColumnValues: columnValues,
|
||||
}))
|
||||
})
|
||||
|
||||
t.Run("state-AllEventsUpToLockProcessed", func(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
es := <-migrator.applyEventsQueue
|
||||
tests.S(t).ExpectNotNil(es)
|
||||
tests.S(t).ExpectNotNil(es.writeFunc)
|
||||
}(&wg)
|
||||
|
||||
columnValues := sql.ToColumnValues([]interface{}{
|
||||
123,
|
||||
time.Now().Unix(),
|
||||
"state",
|
||||
AllEventsUpToLockProcessed,
|
||||
})
|
||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.InsertDML,
|
||||
NewColumnValues: columnValues,
|
||||
}))
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("state-GhostTableMigrated", func(t *testing.T) {
|
||||
go func() {
|
||||
tests.S(t).ExpectTrue(<-migrator.ghostTableMigrated)
|
||||
}()
|
||||
|
||||
columnValues := sql.ToColumnValues([]interface{}{
|
||||
123,
|
||||
time.Now().Unix(),
|
||||
"state",
|
||||
GhostTableMigrated,
|
||||
})
|
||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.InsertDML,
|
||||
NewColumnValues: columnValues,
|
||||
}))
|
||||
})
|
||||
|
||||
t.Run("state-Migrated", func(t *testing.T) {
|
||||
columnValues := sql.ToColumnValues([]interface{}{
|
||||
123,
|
||||
time.Now().Unix(),
|
||||
"state",
|
||||
Migrated,
|
||||
})
|
||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.InsertDML,
|
||||
NewColumnValues: columnValues,
|
||||
}))
|
||||
})
|
||||
|
||||
t.Run("state-ReadMigrationRangeValues", func(t *testing.T) {
|
||||
columnValues := sql.ToColumnValues([]interface{}{
|
||||
123,
|
||||
time.Now().Unix(),
|
||||
"state",
|
||||
ReadMigrationRangeValues,
|
||||
})
|
||||
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
|
||||
DatabaseName: "test",
|
||||
DML: binlog.InsertDML,
|
||||
NewColumnValues: columnValues,
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
func TestMigratorValidateStatement(t *testing.T) {
|
||||
t.Run("add-column", func(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test ADD test_new VARCHAR(64) NOT NULL`))
|
||||
|
||||
tests.S(t).ExpectNil(migrator.validateAlterStatement())
|
||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
|
||||
})
|
||||
|
||||
t.Run("drop-column", func(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test DROP abc`))
|
||||
|
||||
tests.S(t).ExpectNil(migrator.validateAlterStatement())
|
||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 1)
|
||||
_, exists := migrator.migrationContext.DroppedColumnsMap["abc"]
|
||||
tests.S(t).ExpectTrue(exists)
|
||||
})
|
||||
|
||||
t.Run("rename-column", func(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test CHANGE test123 test1234 bigint unsigned`))
|
||||
|
||||
err := migrator.validateAlterStatement()
|
||||
tests.S(t).ExpectNotNil(err)
|
||||
tests.S(t).ExpectTrue(strings.HasPrefix(err.Error(), "gh-ost believes the ALTER statement renames columns"))
|
||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
|
||||
})
|
||||
|
||||
t.Run("rename-column-approved", func(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
migrator.migrationContext.ApproveRenamedColumns = true
|
||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test CHANGE test123 test1234 bigint unsigned`))
|
||||
|
||||
tests.S(t).ExpectNil(migrator.validateAlterStatement())
|
||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
|
||||
})
|
||||
|
||||
t.Run("rename-table", func(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test RENAME TO test_new`))
|
||||
|
||||
err := migrator.validateAlterStatement()
|
||||
tests.S(t).ExpectNotNil(err)
|
||||
tests.S(t).ExpectTrue(errors.Is(err, ErrMigratorUnsupportedRenameAlter))
|
||||
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMigratorCreateFlagFiles(t *testing.T) {
|
||||
tmpdir, err := os.MkdirTemp("", t.Name())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrationContext.PostponeCutOverFlagFile = filepath.Join(tmpdir, "cut-over.flag")
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
tests.S(t).ExpectNil(migrator.createFlagFiles())
|
||||
tests.S(t).ExpectNil(migrator.createFlagFiles()) // twice to test already-exists
|
||||
|
||||
_, err = os.Stat(migrationContext.PostponeCutOverFlagFile)
|
||||
tests.S(t).ExpectNil(err)
|
||||
}
|
||||
|
||||
func TestMigratorGetProgressPercent(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
|
||||
{
|
||||
tests.S(t).ExpectEquals(migrator.getProgressPercent(0), float64(100.0))
|
||||
}
|
||||
{
|
||||
migrationContext.TotalRowsCopied = 250
|
||||
tests.S(t).ExpectEquals(migrator.getProgressPercent(1000), float64(25.0))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigratorGetMigrationStateAndETA(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
now := time.Now()
|
||||
migrationContext.RowCopyStartTime = now.Add(-time.Minute)
|
||||
migrationContext.RowCopyEndTime = now
|
||||
|
||||
{
|
||||
migrationContext.TotalRowsCopied = 456
|
||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(123456)
|
||||
tests.S(t).ExpectEquals(state, "migrating")
|
||||
tests.S(t).ExpectEquals(eta, "4h29m44s")
|
||||
tests.S(t).ExpectEquals(etaDuration.String(), "4h29m44s")
|
||||
}
|
||||
{
|
||||
migrationContext.TotalRowsCopied = 456
|
||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(456)
|
||||
tests.S(t).ExpectEquals(state, "migrating")
|
||||
tests.S(t).ExpectEquals(eta, "due")
|
||||
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
|
||||
}
|
||||
{
|
||||
migrationContext.TotalRowsCopied = 123456
|
||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(456)
|
||||
tests.S(t).ExpectEquals(state, "migrating")
|
||||
tests.S(t).ExpectEquals(eta, "due")
|
||||
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
|
||||
}
|
||||
{
|
||||
atomic.StoreInt64(&migrationContext.CountingRowsFlag, 1)
|
||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(123456)
|
||||
tests.S(t).ExpectEquals(state, "counting rows")
|
||||
tests.S(t).ExpectEquals(eta, "due")
|
||||
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
|
||||
}
|
||||
{
|
||||
atomic.StoreInt64(&migrationContext.CountingRowsFlag, 0)
|
||||
atomic.StoreInt64(&migrationContext.IsPostponingCutOver, 1)
|
||||
state, eta, etaDuration := migrator.getMigrationStateAndETA(123456)
|
||||
tests.S(t).ExpectEquals(state, "postponing cut-over")
|
||||
tests.S(t).ExpectEquals(eta, "due")
|
||||
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigratorShouldPrintStatus(t *testing.T) {
|
||||
migrationContext := base.NewMigrationContext()
|
||||
migrator := NewMigrator(migrationContext, "1.2.3")
|
||||
|
||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(NoPrintStatusRule, 10, time.Second)) // test 'rule != HeuristicPrintStatusRule' return
|
||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 10, time.Second)) // test 'etaDuration.Seconds() <= 60'
|
||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 90, time.Second)) // test 'etaDuration.Seconds() <= 60' again
|
||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 90, time.Minute)) // test 'etaDuration.Seconds() <= 180'
|
||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 60, 90*time.Second)) // test 'elapsedSeconds <= 180'
|
||||
tests.S(t).ExpectFalse(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 61, 90*time.Second)) // test 'elapsedSeconds <= 180'
|
||||
tests.S(t).ExpectFalse(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 99, 210*time.Second)) // test 'elapsedSeconds <= 180'
|
||||
tests.S(t).ExpectFalse(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 12345, 86400*time.Second)) // test 'else'
|
||||
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 30030, 86400*time.Second)) // test 'else' again
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -16,6 +16,7 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
"github.com/outbrain/golib/log"
|
||||
)
|
||||
|
||||
type printStatusFunc func(PrintStatusRule, io.Writer)
|
||||
@ -29,9 +30,9 @@ type Server struct {
|
||||
printStatus printStatusFunc
|
||||
}
|
||||
|
||||
func NewServer(migrationContext *base.MigrationContext, hooksExecutor *HooksExecutor, printStatus printStatusFunc) *Server {
|
||||
func NewServer(hooksExecutor *HooksExecutor, printStatus printStatusFunc) *Server {
|
||||
return &Server{
|
||||
migrationContext: migrationContext,
|
||||
migrationContext: base.GetMigrationContext(),
|
||||
hooksExecutor: hooksExecutor,
|
||||
printStatus: printStatus,
|
||||
}
|
||||
@ -48,12 +49,12 @@ func (this *Server) BindSocketFile() (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
this.migrationContext.Log.Infof("Listening on unix socket file: %s", this.migrationContext.ServeSocketFile)
|
||||
log.Infof("Listening on unix socket file: %s", this.migrationContext.ServeSocketFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *Server) RemoveSocketFile() (err error) {
|
||||
this.migrationContext.Log.Infof("Removing socket file: %s", this.migrationContext.ServeSocketFile)
|
||||
log.Infof("Removing socket file: %s", this.migrationContext.ServeSocketFile)
|
||||
return os.Remove(this.migrationContext.ServeSocketFile)
|
||||
}
|
||||
|
||||
@ -65,7 +66,7 @@ func (this *Server) BindTCPPort() (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
this.migrationContext.Log.Infof("Listening on tcp port: %d", this.migrationContext.ServeTCPPort)
|
||||
log.Infof("Listening on tcp port: %d", this.migrationContext.ServeTCPPort)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -75,7 +76,7 @@ func (this *Server) Serve() (err error) {
|
||||
for {
|
||||
conn, err := this.unixListener.Accept()
|
||||
if err != nil {
|
||||
this.migrationContext.Log.Errore(err)
|
||||
log.Errore(err)
|
||||
}
|
||||
go this.handleConnection(conn)
|
||||
}
|
||||
@ -87,7 +88,7 @@ func (this *Server) Serve() (err error) {
|
||||
for {
|
||||
conn, err := this.tcpListener.Accept()
|
||||
if err != nil {
|
||||
this.migrationContext.Log.Errore(err)
|
||||
log.Errore(err)
|
||||
}
|
||||
go this.handleConnection(conn)
|
||||
}
|
||||
@ -97,13 +98,8 @@ func (this *Server) Serve() (err error) {
|
||||
}
|
||||
|
||||
func (this *Server) handleConnection(conn net.Conn) (err error) {
|
||||
if conn != nil {
|
||||
defer conn.Close()
|
||||
}
|
||||
defer conn.Close()
|
||||
command, _, err := bufio.NewReader(conn).ReadLine()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return this.onServerCommand(string(command), bufio.NewWriter(conn))
|
||||
}
|
||||
|
||||
@ -117,22 +113,21 @@ func (this *Server) onServerCommand(command string, writer *bufio.Writer) (err e
|
||||
} else {
|
||||
fmt.Fprintf(writer, "%s\n", err.Error())
|
||||
}
|
||||
return this.migrationContext.Log.Errore(err)
|
||||
return log.Errore(err)
|
||||
}
|
||||
|
||||
// applyServerCommand parses and executes commands by user
|
||||
func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (printStatusRule PrintStatusRule, err error) {
|
||||
printStatusRule = NoPrintStatusRule
|
||||
|
||||
tokens := strings.SplitN(command, "=", 2)
|
||||
command = strings.TrimSpace(tokens[0])
|
||||
arg := ""
|
||||
if len(tokens) > 1 {
|
||||
arg = strings.TrimSpace(tokens[1])
|
||||
if unquoted, err := strconv.Unquote(arg); err == nil {
|
||||
arg = unquoted
|
||||
}
|
||||
}
|
||||
argIsQuestion := (arg == "?")
|
||||
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged"
|
||||
|
||||
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged\n"
|
||||
|
||||
if err := this.hooksExecutor.onInteractiveCommand(command); err != nil {
|
||||
return NoPrintStatusRule, err
|
||||
@ -141,64 +136,30 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
|
||||
switch command {
|
||||
case "help":
|
||||
{
|
||||
fmt.Fprint(writer, `available commands:
|
||||
fmt.Fprintln(writer, `available commands:
|
||||
status # Print a detailed status message
|
||||
sup # Print a short status message
|
||||
coordinates # Print the currently inspected coordinates
|
||||
applier # Print the hostname of the applier
|
||||
inspector # Print the hostname of the inspector
|
||||
chunk-size=<newsize> # Set a new chunk-size
|
||||
dml-batch-size=<newsize> # Set a new dml-batch-size
|
||||
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
|
||||
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is agrressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
|
||||
critical-load=<load> # Set a new set of max-load thresholds
|
||||
max-lag-millis=<max-lag> # Set a new replication lag threshold
|
||||
replication-lag-query=<query> # Set a new query that determines replication lag (no quotes)
|
||||
max-load=<load> # Set a new set of max-load thresholds
|
||||
throttle-query=<query> # Set a new throttle-query (no quotes)
|
||||
throttle-http=<URL> # Set a new throttle URL
|
||||
throttle-control-replicas=<replicas> # Set a new comma delimited list of throttle control replicas
|
||||
throttle # Force throttling
|
||||
no-throttle # End forced throttling (other throttling may still apply)
|
||||
unpostpone # Bail out a cut-over postpone; proceed to cut-over
|
||||
panic # panic and quit without cleanup
|
||||
help # This message
|
||||
- use '?' (question mark) as argument to get info rather than set. e.g. "max-load=?" will just print out current max-load.
|
||||
`)
|
||||
}
|
||||
case "sup":
|
||||
return ForcePrintStatusOnlyRule, nil
|
||||
case "info", "status":
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
case "coordinates":
|
||||
{
|
||||
if argIsQuestion || arg == "" {
|
||||
fmt.Fprintf(writer, "%+v\n", this.migrationContext.GetRecentBinlogCoordinates())
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
return NoPrintStatusRule, fmt.Errorf("coordinates are read-only")
|
||||
}
|
||||
case "applier":
|
||||
if this.migrationContext.ApplierConnectionConfig != nil && this.migrationContext.ApplierConnectionConfig.ImpliedKey != nil {
|
||||
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
|
||||
this.migrationContext.ApplierConnectionConfig.ImpliedKey.String(),
|
||||
this.migrationContext.ApplierMySQLVersion,
|
||||
)
|
||||
}
|
||||
return NoPrintStatusRule, nil
|
||||
case "inspector":
|
||||
if this.migrationContext.InspectorConnectionConfig != nil && this.migrationContext.InspectorConnectionConfig.ImpliedKey != nil {
|
||||
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
|
||||
this.migrationContext.InspectorConnectionConfig.ImpliedKey.String(),
|
||||
this.migrationContext.InspectorMySQLVersion,
|
||||
)
|
||||
}
|
||||
return NoPrintStatusRule, nil
|
||||
case "chunk-size":
|
||||
{
|
||||
if argIsQuestion {
|
||||
fmt.Fprintf(writer, "%+v\n", atomic.LoadInt64(&this.migrationContext.ChunkSize))
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
if chunkSize, err := strconv.Atoi(arg); err != nil {
|
||||
return NoPrintStatusRule, err
|
||||
} else {
|
||||
@ -206,25 +167,8 @@ help # This message
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
}
|
||||
case "dml-batch-size":
|
||||
{
|
||||
if argIsQuestion {
|
||||
fmt.Fprintf(writer, "%+v\n", atomic.LoadInt64(&this.migrationContext.DMLBatchSize))
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
if dmlBatchSize, err := strconv.Atoi(arg); err != nil {
|
||||
return NoPrintStatusRule, err
|
||||
} else {
|
||||
this.migrationContext.SetDMLBatchSize(int64(dmlBatchSize))
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
}
|
||||
case "max-lag-millis":
|
||||
{
|
||||
if argIsQuestion {
|
||||
fmt.Fprintf(writer, "%+v\n", atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold))
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
if maxLagMillis, err := strconv.Atoi(arg); err != nil {
|
||||
return NoPrintStatusRule, err
|
||||
} else {
|
||||
@ -234,14 +178,11 @@ help # This message
|
||||
}
|
||||
case "replication-lag-query":
|
||||
{
|
||||
return NoPrintStatusRule, fmt.Errorf("replication-lag-query is deprecated. gh-ost uses an internal, subsecond resolution query")
|
||||
this.migrationContext.SetReplicationLagQuery(arg)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
case "nice-ratio":
|
||||
{
|
||||
if argIsQuestion {
|
||||
fmt.Fprintf(writer, "%+v\n", this.migrationContext.GetNiceRatio())
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
if niceRatio, err := strconv.ParseFloat(arg, 64); err != nil {
|
||||
return NoPrintStatusRule, err
|
||||
} else {
|
||||
@ -251,11 +192,6 @@ help # This message
|
||||
}
|
||||
case "max-load":
|
||||
{
|
||||
if argIsQuestion {
|
||||
maxLoad := this.migrationContext.GetMaxLoad()
|
||||
fmt.Fprintf(writer, "%s\n", maxLoad.String())
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
if err := this.migrationContext.ReadMaxLoad(arg); err != nil {
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
@ -263,11 +199,6 @@ help # This message
|
||||
}
|
||||
case "critical-load":
|
||||
{
|
||||
if argIsQuestion {
|
||||
criticalLoad := this.migrationContext.GetCriticalLoad()
|
||||
fmt.Fprintf(writer, "%s\n", criticalLoad.String())
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
if err := this.migrationContext.ReadCriticalLoad(arg); err != nil {
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
@ -275,30 +206,12 @@ help # This message
|
||||
}
|
||||
case "throttle-query":
|
||||
{
|
||||
if argIsQuestion {
|
||||
fmt.Fprintf(writer, "%+v\n", this.migrationContext.GetThrottleQuery())
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
this.migrationContext.SetThrottleQuery(arg)
|
||||
fmt.Fprintln(writer, throttleHint)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
case "throttle-http":
|
||||
{
|
||||
if argIsQuestion {
|
||||
fmt.Fprintf(writer, "%+v\n", this.migrationContext.GetThrottleHTTP())
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
this.migrationContext.SetThrottleHTTP(arg)
|
||||
fmt.Fprintln(writer, throttleHint)
|
||||
fmt.Fprintf(writer, throttleHint)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
case "throttle-control-replicas":
|
||||
{
|
||||
if argIsQuestion {
|
||||
fmt.Fprintf(writer, "%s\n", this.migrationContext.GetThrottleControlReplicaKeys().ToCommaDelimitedList())
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
if err := this.migrationContext.ReadThrottleControlReplicaKeys(arg); err != nil {
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
@ -307,22 +220,12 @@ help # This message
|
||||
}
|
||||
case "throttle", "pause", "suspend":
|
||||
{
|
||||
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||
err := fmt.Errorf("User commanded 'throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
|
||||
fmt.Fprintln(writer, throttleHint)
|
||||
fmt.Fprintf(writer, throttleHint)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
case "no-throttle", "unthrottle", "resume", "continue":
|
||||
{
|
||||
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||
err := fmt.Errorf("User commanded 'no-throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 0)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
@ -333,8 +236,8 @@ help # This message
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||
err := fmt.Errorf("User commanded 'unpostpone' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||
// User exlpicitly provided table name. This is a courtesy protection mechanism
|
||||
err := fmt.Errorf("User commanded 'unpostpone' on %s, but migrated table is %s; ingoring request.", arg, this.migrationContext.OriginalTableName)
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
if atomic.LoadInt64(&this.migrationContext.IsPostponingCutOver) > 0 {
|
||||
@ -347,16 +250,7 @@ help # This message
|
||||
}
|
||||
case "panic":
|
||||
{
|
||||
if arg == "" && this.migrationContext.ForceNamedPanicCommand {
|
||||
err := fmt.Errorf("User commanded 'panic' without specifying table name, but --force-named-panic is set")
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
if arg != "" && arg != this.migrationContext.OriginalTableName {
|
||||
// User explicitly provided table name. This is a courtesy protection mechanism
|
||||
err := fmt.Errorf("User commanded 'panic' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
err := fmt.Errorf("User commanded 'panic'. The migration will be aborted without cleanup. Please drop the gh-ost tables before trying again.")
|
||||
err := fmt.Errorf("User commanded 'panic'. I will now panic, without cleanup. PANIC!")
|
||||
this.migrationContext.PanicAbort <- err
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -16,7 +16,8 @@ import (
|
||||
"github.com/github/gh-ost/go/binlog"
|
||||
"github.com/github/gh-ost/go/mysql"
|
||||
|
||||
"github.com/openark/golib/sqlutils"
|
||||
"github.com/outbrain/golib/log"
|
||||
"github.com/outbrain/golib/sqlutils"
|
||||
)
|
||||
|
||||
type BinlogEventListener struct {
|
||||
@ -42,23 +43,22 @@ type EventsStreamer struct {
|
||||
listenersMutex *sync.Mutex
|
||||
eventsChannel chan *binlog.BinlogEntry
|
||||
binlogReader *binlog.GoMySQLReader
|
||||
name string
|
||||
}
|
||||
|
||||
func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer {
|
||||
func NewEventsStreamer() *EventsStreamer {
|
||||
return &EventsStreamer{
|
||||
connectionConfig: migrationContext.InspectorConnectionConfig,
|
||||
migrationContext: migrationContext,
|
||||
connectionConfig: base.GetMigrationContext().InspectorConnectionConfig,
|
||||
migrationContext: base.GetMigrationContext(),
|
||||
listeners: [](*BinlogEventListener){},
|
||||
listenersMutex: &sync.Mutex{},
|
||||
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
|
||||
name: "streamer",
|
||||
}
|
||||
}
|
||||
|
||||
// AddListener registers a new listener for binlog events, on a per-table basis
|
||||
func (this *EventsStreamer) AddListener(
|
||||
async bool, databaseName string, tableName string, onDmlEvent func(event *binlog.BinlogDMLEvent) error) (err error) {
|
||||
|
||||
this.listenersMutex.Lock()
|
||||
defer this.listenersMutex.Unlock()
|
||||
|
||||
@ -86,10 +86,10 @@ func (this *EventsStreamer) notifyListeners(binlogEvent *binlog.BinlogDMLEvent)
|
||||
|
||||
for _, listener := range this.listeners {
|
||||
listener := listener
|
||||
if !strings.EqualFold(listener.databaseName, binlogEvent.DatabaseName) {
|
||||
if strings.ToLower(listener.databaseName) != strings.ToLower(binlogEvent.DatabaseName) {
|
||||
continue
|
||||
}
|
||||
if !strings.EqualFold(listener.tableName, binlogEvent.TableName) {
|
||||
if strings.ToLower(listener.tableName) != strings.ToLower(binlogEvent.TableName) {
|
||||
continue
|
||||
}
|
||||
if listener.async {
|
||||
@ -104,10 +104,10 @@ func (this *EventsStreamer) notifyListeners(binlogEvent *binlog.BinlogDMLEvent)
|
||||
|
||||
func (this *EventsStreamer) InitDBConnections() (err error) {
|
||||
EventsStreamerUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
||||
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
|
||||
if this.db, _, err = sqlutils.GetDB(EventsStreamerUri); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name); err != nil {
|
||||
if err := this.validateConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := this.readCurrentBinlogCoordinates(); err != nil {
|
||||
@ -122,7 +122,10 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
|
||||
|
||||
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
|
||||
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
|
||||
goMySQLReader := binlog.NewGoMySQLReader(this.migrationContext)
|
||||
goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext.InspectorConnectionConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := goMySQLReader.ConnectBinlogStreamer(*binlogCoordinates); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -130,6 +133,20 @@ func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoor
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateConnection issues a simple can-connect to MySQL
|
||||
func (this *EventsStreamer) validateConnection() error {
|
||||
query := `select @@global.port`
|
||||
var port int
|
||||
if err := this.db.QueryRow(query).Scan(&port); err != nil {
|
||||
return err
|
||||
}
|
||||
if port != this.connectionConfig.Key.Port {
|
||||
return fmt.Errorf("Unexpected database port reported: %+v", port)
|
||||
}
|
||||
log.Infof("connection validated on %+v", this.connectionConfig.Key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *EventsStreamer) GetCurrentBinlogCoordinates() *mysql.BinlogCoordinates {
|
||||
return this.binlogReader.GetCurrentBinlogCoordinates()
|
||||
}
|
||||
@ -157,7 +174,7 @@ func (this *EventsStreamer) readCurrentBinlogCoordinates() error {
|
||||
if !foundMasterStatus {
|
||||
return fmt.Errorf("Got no results from SHOW MASTER STATUS. Bailing out")
|
||||
}
|
||||
this.migrationContext.Log.Debugf("Streamer binlog coordinates: %+v", *this.initialBinlogCoordinates)
|
||||
log.Debugf("Streamer binlog coordinates: %+v", *this.initialBinlogCoordinates)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -175,15 +192,8 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
|
||||
var successiveFailures int64
|
||||
var lastAppliedRowsEventHint mysql.BinlogCoordinates
|
||||
for {
|
||||
if canStopStreaming() {
|
||||
return nil
|
||||
}
|
||||
if err := this.binlogReader.StreamEvents(canStopStreaming, this.eventsChannel); err != nil {
|
||||
if canStopStreaming() {
|
||||
return nil
|
||||
}
|
||||
|
||||
this.migrationContext.Log.Infof("StreamEvents encountered unexpected error: %+v", err)
|
||||
log.Infof("StreamEvents encountered unexpected error: %+v", err)
|
||||
this.migrationContext.MarkPointOfInterest()
|
||||
time.Sleep(ReconnectStreamerSleepSeconds * time.Second)
|
||||
|
||||
@ -199,7 +209,7 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
|
||||
|
||||
// Reposition at same binlog file.
|
||||
lastAppliedRowsEventHint = this.binlogReader.LastAppliedRowsEventHint
|
||||
this.migrationContext.Log.Infof("Reconnecting... Will resume at %+v", lastAppliedRowsEventHint)
|
||||
log.Infof("Reconnecting... Will resume at %+v", lastAppliedRowsEventHint)
|
||||
if err := this.initBinlogReader(this.GetReconnectBinlogCoordinates()); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -210,10 +220,6 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
|
||||
|
||||
func (this *EventsStreamer) Close() (err error) {
|
||||
err = this.binlogReader.Close()
|
||||
this.migrationContext.Log.Infof("Closed streamer connection. err=%+v", err)
|
||||
log.Infof("Closed streamer connection. err=%+v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (this *EventsStreamer) Teardown() {
|
||||
this.db.Close()
|
||||
}
|
||||
|
@ -1,98 +1,44 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/github/gh-ost/go/base"
|
||||
"github.com/github/gh-ost/go/mysql"
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
"github.com/outbrain/golib/log"
|
||||
)
|
||||
|
||||
var (
|
||||
httpStatusMessages = map[int]string{
|
||||
200: "OK",
|
||||
404: "Not found",
|
||||
417: "Expectation failed",
|
||||
429: "Too many requests",
|
||||
500: "Internal server error",
|
||||
-1: "Connection error",
|
||||
}
|
||||
// See https://github.com/github/freno/blob/master/doc/http.md
|
||||
httpStatusFrenoMessages = map[int]string{
|
||||
200: "OK",
|
||||
404: "freno: unknown metric",
|
||||
417: "freno: access forbidden",
|
||||
429: "freno: threshold exceeded",
|
||||
500: "freno: internal error",
|
||||
-1: "freno: connection error",
|
||||
}
|
||||
)
|
||||
|
||||
const frenoMagicHint = "freno"
|
||||
|
||||
// Throttler collects metrics related to throttling and makes informed decision
|
||||
// Throttler collects metrics related to throttling and makes informed decisison
|
||||
// whether throttling should take place.
|
||||
type Throttler struct {
|
||||
appVersion string
|
||||
migrationContext *base.MigrationContext
|
||||
applier *Applier
|
||||
httpClient *http.Client
|
||||
httpClientTimeout time.Duration
|
||||
inspector *Inspector
|
||||
finishedMigrating int64
|
||||
migrationContext *base.MigrationContext
|
||||
applier *Applier
|
||||
inspector *Inspector
|
||||
}
|
||||
|
||||
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector, appVersion string) *Throttler {
|
||||
func NewThrottler(applier *Applier, inspector *Inspector) *Throttler {
|
||||
return &Throttler{
|
||||
appVersion: appVersion,
|
||||
migrationContext: migrationContext,
|
||||
applier: applier,
|
||||
httpClient: &http.Client{},
|
||||
httpClientTimeout: time.Duration(migrationContext.ThrottleHTTPTimeoutMillis) * time.Millisecond,
|
||||
inspector: inspector,
|
||||
finishedMigrating: 0,
|
||||
migrationContext: base.GetMigrationContext(),
|
||||
applier: applier,
|
||||
inspector: inspector,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Throttler) throttleHttpMessage(statusCode int) string {
|
||||
statusCodesMap := httpStatusMessages
|
||||
if throttleHttp := this.migrationContext.GetThrottleHTTP(); strings.Contains(throttleHttp, frenoMagicHint) {
|
||||
statusCodesMap = httpStatusFrenoMessages
|
||||
}
|
||||
if message, ok := statusCodesMap[statusCode]; ok {
|
||||
return fmt.Sprintf("%s (http=%d)", message, statusCode)
|
||||
}
|
||||
return fmt.Sprintf("http=%d", statusCode)
|
||||
}
|
||||
|
||||
// shouldThrottle performs checks to see whether we should currently be throttling.
|
||||
// It merely observes the metrics collected by other components, it does not issue
|
||||
// its own metric collection.
|
||||
func (this *Throttler) shouldThrottle() (result bool, reason string, reasonHint base.ThrottleReasonHint) {
|
||||
if hibernateUntil := atomic.LoadInt64(&this.migrationContext.HibernateUntil); hibernateUntil > 0 {
|
||||
hibernateUntilTime := time.Unix(0, hibernateUntil)
|
||||
return true, fmt.Sprintf("critical-load-hibernate until %+v", hibernateUntilTime), base.NoThrottleReasonHint
|
||||
}
|
||||
generalCheckResult := this.migrationContext.GetThrottleGeneralCheckResult()
|
||||
if generalCheckResult.ShouldThrottle {
|
||||
return generalCheckResult.ShouldThrottle, generalCheckResult.Reason, generalCheckResult.ReasonHint
|
||||
}
|
||||
// HTTP throttle
|
||||
statusCode := atomic.LoadInt64(&this.migrationContext.ThrottleHTTPStatusCode)
|
||||
if statusCode != 0 && statusCode != http.StatusOK {
|
||||
return true, this.throttleHttpMessage(int(statusCode)), base.NoThrottleReasonHint
|
||||
}
|
||||
|
||||
// Replication lag throttle
|
||||
maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)
|
||||
lag := atomic.LoadInt64(&this.migrationContext.CurrentLag)
|
||||
@ -116,155 +62,69 @@ func (this *Throttler) shouldThrottle() (result bool, reason string, reasonHint
|
||||
return false, "", base.NoThrottleReasonHint
|
||||
}
|
||||
|
||||
// parseChangelogHeartbeat parses a string timestamp and deduces replication lag
|
||||
func parseChangelogHeartbeat(heartbeatValue string) (lag time.Duration, err error) {
|
||||
// parseChangelogHeartbeat is called when a heartbeat event is intercepted
|
||||
func (this *Throttler) parseChangelogHeartbeat(heartbeatValue string) (err error) {
|
||||
heartbeatTime, err := time.Parse(time.RFC3339Nano, heartbeatValue)
|
||||
if err != nil {
|
||||
return lag, err
|
||||
return log.Errore(err)
|
||||
}
|
||||
lag = time.Since(heartbeatTime)
|
||||
return lag, nil
|
||||
lag := time.Since(heartbeatTime)
|
||||
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseChangelogHeartbeat parses a string timestamp and deduces replication lag
|
||||
func (this *Throttler) parseChangelogHeartbeat(heartbeatValue string) (err error) {
|
||||
if lag, err := parseChangelogHeartbeat(heartbeatValue); err != nil {
|
||||
return this.migrationContext.Log.Errore(err)
|
||||
} else {
|
||||
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// collectReplicationLag reads the latest changelog heartbeat value
|
||||
func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- bool) {
|
||||
collectFunc := func() error {
|
||||
if atomic.LoadInt64(&this.migrationContext.CleanupImminentFlag) > 0 {
|
||||
return nil
|
||||
}
|
||||
if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
|
||||
// when running on replica, the heartbeat injection is also done on the replica.
|
||||
// This means we will always get a good heartbeat value.
|
||||
// When running on replica, we should instead check the `SHOW SLAVE STATUS` output.
|
||||
if lag, err := mysql.GetReplicationLagFromSlaveStatus(this.inspector.informationSchemaDb); err != nil {
|
||||
return this.migrationContext.Log.Errore(err)
|
||||
} else {
|
||||
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
|
||||
// collectHeartbeat reads the latest changelog heartbeat value
|
||||
func (this *Throttler) collectHeartbeat() {
|
||||
ticker := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||
for range ticker {
|
||||
go func() error {
|
||||
if atomic.LoadInt64(&this.migrationContext.CleanupImminentFlag) > 0 {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if heartbeatValue, err := this.inspector.readChangelogState("heartbeat"); err != nil {
|
||||
return this.migrationContext.Log.Errore(err)
|
||||
} else {
|
||||
changelogState, err := this.inspector.readChangelogState()
|
||||
if err != nil {
|
||||
return log.Errore(err)
|
||||
}
|
||||
if heartbeatValue, ok := changelogState["heartbeat"]; ok {
|
||||
this.parseChangelogHeartbeat(heartbeatValue)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
collectFunc()
|
||||
firstThrottlingCollected <- true
|
||||
|
||||
ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
go collectFunc()
|
||||
return nil
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// collectControlReplicasLag polls all the control replicas to get maximum lag value
|
||||
func (this *Throttler) collectControlReplicasLag() {
|
||||
if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
replicationLagQuery := fmt.Sprintf(`
|
||||
select value from %s.%s where hint = 'heartbeat' and id <= 255
|
||||
`,
|
||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
||||
)
|
||||
|
||||
readReplicaLag := func(connectionConfig *mysql.ConnectionConfig) (lag time.Duration, err error) {
|
||||
dbUri := connectionConfig.GetDBUri("information_schema")
|
||||
|
||||
var heartbeatValue string
|
||||
db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri)
|
||||
if err != nil {
|
||||
return lag, err
|
||||
}
|
||||
|
||||
if err := db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
|
||||
return lag, err
|
||||
}
|
||||
|
||||
lag, err = parseChangelogHeartbeat(heartbeatValue)
|
||||
return lag, err
|
||||
}
|
||||
|
||||
readControlReplicasLag := func() (result *mysql.ReplicationLagResult) {
|
||||
instanceKeyMap := this.migrationContext.GetThrottleControlReplicaKeys()
|
||||
if instanceKeyMap.Len() == 0 {
|
||||
return result
|
||||
}
|
||||
lagResults := make(chan *mysql.ReplicationLagResult, instanceKeyMap.Len())
|
||||
for replicaKey := range *instanceKeyMap {
|
||||
connectionConfig := this.migrationContext.InspectorConnectionConfig.Duplicate()
|
||||
connectionConfig.Key = replicaKey
|
||||
|
||||
lagResult := &mysql.ReplicationLagResult{Key: connectionConfig.Key}
|
||||
go func() {
|
||||
lagResult.Lag, lagResult.Err = readReplicaLag(connectionConfig)
|
||||
lagResults <- lagResult
|
||||
}()
|
||||
}
|
||||
for range *instanceKeyMap {
|
||||
lagResult := <-lagResults
|
||||
if result == nil {
|
||||
result = lagResult
|
||||
} else if lagResult.Err != nil {
|
||||
result = lagResult
|
||||
} else if lagResult.Lag.Nanoseconds() > result.Lag.Nanoseconds() {
|
||||
result = lagResult
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
checkControlReplicasLag := func() {
|
||||
readControlReplicasLag := func(replicationLagQuery string) error {
|
||||
if (this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica) && (atomic.LoadInt64(&this.migrationContext.AllEventsUpToLockProcessedInjectedFlag) > 0) {
|
||||
// No need to read lag
|
||||
return
|
||||
return nil
|
||||
}
|
||||
this.migrationContext.SetControlReplicasLagResult(readControlReplicasLag())
|
||||
lagResult := mysql.GetMaxReplicationLag(
|
||||
this.migrationContext.InspectorConnectionConfig,
|
||||
this.migrationContext.GetThrottleControlReplicaKeys(),
|
||||
replicationLagQuery,
|
||||
)
|
||||
this.migrationContext.SetControlReplicasLagResult(lagResult)
|
||||
return nil
|
||||
}
|
||||
|
||||
aggressiveTicker := time.Tick(100 * time.Millisecond)
|
||||
relaxedFactor := 10
|
||||
counter := 0
|
||||
shouldReadLagAggressively := false
|
||||
replicationLagQuery := ""
|
||||
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
for range aggressiveTicker {
|
||||
if counter%relaxedFactor == 0 {
|
||||
// we only check if we wish to be aggressive once per second. The parameters for being aggressive
|
||||
// do not typically change at all throughout the migration, but nonetheless we check them.
|
||||
counter = 0
|
||||
maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)
|
||||
shouldReadLagAggressively = (maxLagMillisecondsThrottleThreshold < 1000)
|
||||
replicationLagQuery = this.migrationContext.GetReplicationLagQuery()
|
||||
shouldReadLagAggressively = (replicationLagQuery != "" && maxLagMillisecondsThrottleThreshold < 1000)
|
||||
}
|
||||
if counter == 0 || shouldReadLagAggressively {
|
||||
// We check replication lag every so often, or if we wish to be aggressive
|
||||
checkControlReplicasLag()
|
||||
readControlReplicasLag(replicationLagQuery)
|
||||
}
|
||||
counter++
|
||||
}
|
||||
@ -284,73 +144,8 @@ func (this *Throttler) criticalLoadIsMet() (met bool, variableName string, value
|
||||
return false, variableName, value, threshold, nil
|
||||
}
|
||||
|
||||
// collectReplicationLag reads the latest changelog heartbeat value
|
||||
func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<- bool) {
|
||||
collectFunc := func() (sleep bool, err error) {
|
||||
if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
url := this.migrationContext.GetThrottleHTTP()
|
||||
if url == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), this.httpClientTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("User-Agent", fmt.Sprintf("gh-ost/%s", this.appVersion))
|
||||
|
||||
resp, err := this.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(resp.StatusCode))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
_, err := collectFunc()
|
||||
if err != nil {
|
||||
// If not told to ignore errors, we'll throttle on HTTP connection issues
|
||||
if !this.migrationContext.IgnoreHTTPErrors {
|
||||
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1))
|
||||
}
|
||||
}
|
||||
|
||||
firstThrottlingCollected <- true
|
||||
|
||||
collectInterval := time.Duration(this.migrationContext.ThrottleHTTPIntervalMillis) * time.Millisecond
|
||||
ticker := time.NewTicker(collectInterval)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sleep, err := collectFunc()
|
||||
if err != nil {
|
||||
// If not told to ignore errors, we'll throttle on HTTP connection issues
|
||||
if !this.migrationContext.IgnoreHTTPErrors {
|
||||
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1))
|
||||
}
|
||||
}
|
||||
|
||||
if sleep {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// collectGeneralThrottleMetrics reads the once-per-sec metrics, and stores them onto this.migrationContext
|
||||
func (this *Throttler) collectGeneralThrottleMetrics() error {
|
||||
if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
setThrottle := func(throttle bool, reason string, reasonHint base.ThrottleReasonHint) error {
|
||||
this.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(throttle, reason, reasonHint))
|
||||
@ -368,25 +163,11 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
|
||||
if err != nil {
|
||||
return setThrottle(true, fmt.Sprintf("%s %s", variableName, err), base.NoThrottleReasonHint)
|
||||
}
|
||||
|
||||
if criticalLoadMet && this.migrationContext.CriticalLoadHibernateSeconds > 0 {
|
||||
hibernateDuration := time.Duration(this.migrationContext.CriticalLoadHibernateSeconds) * time.Second
|
||||
hibernateUntilTime := time.Now().Add(hibernateDuration)
|
||||
atomic.StoreInt64(&this.migrationContext.HibernateUntil, hibernateUntilTime.UnixNano())
|
||||
this.migrationContext.Log.Errorf("critical-load met: %s=%d, >=%d. Will hibernate for the duration of %+v, until %+v", variableName, value, threshold, hibernateDuration, hibernateUntilTime)
|
||||
go func() {
|
||||
time.Sleep(hibernateDuration)
|
||||
this.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(true, "leaving hibernation", base.LeavingHibernationThrottleReasonHint))
|
||||
atomic.StoreInt64(&this.migrationContext.HibernateUntil, 0)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds == 0 {
|
||||
this.migrationContext.PanicAbort <- fmt.Errorf("critical-load met: %s=%d, >=%d", variableName, value, threshold)
|
||||
}
|
||||
if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds > 0 {
|
||||
this.migrationContext.Log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, this.migrationContext.CriticalLoadIntervalMilliseconds)
|
||||
log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, this.migrationContext.CriticalLoadIntervalMilliseconds)
|
||||
go func() {
|
||||
timer := time.NewTimer(time.Millisecond * time.Duration(this.migrationContext.CriticalLoadIntervalMilliseconds))
|
||||
<-timer.C
|
||||
@ -438,28 +219,23 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
|
||||
// that may affect throttling. There are several components, all running independently,
|
||||
// that collect such metrics.
|
||||
func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan<- bool) {
|
||||
go this.collectReplicationLag(firstThrottlingCollected)
|
||||
go this.collectHeartbeat()
|
||||
go this.collectControlReplicasLag()
|
||||
go this.collectThrottleHTTPStatus(firstThrottlingCollected)
|
||||
|
||||
go func() {
|
||||
throttlerMetricsTick := time.Tick(1 * time.Second)
|
||||
this.collectGeneralThrottleMetrics()
|
||||
firstThrottlingCollected <- true
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for range throttlerMetricsTick {
|
||||
this.collectGeneralThrottleMetrics()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// initiateThrottlerChecks initiates the throttle ticker and sets the basic behavior of throttling.
|
||||
func (this *Throttler) initiateThrottlerChecks() {
|
||||
func (this *Throttler) initiateThrottlerChecks() error {
|
||||
throttlerTick := time.Tick(100 * time.Millisecond)
|
||||
|
||||
throttlerFunction := func() {
|
||||
alreadyThrottling, currentReason, _ := this.migrationContext.IsThrottled()
|
||||
shouldThrottle, throttleReason, throttleReasonHint := this.shouldThrottle()
|
||||
@ -476,15 +252,11 @@ func (this *Throttler) initiateThrottlerChecks() {
|
||||
this.migrationContext.SetThrottled(shouldThrottle, throttleReason, throttleReasonHint)
|
||||
}
|
||||
throttlerFunction()
|
||||
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
for range throttlerTick {
|
||||
throttlerFunction()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// throttle sees if throttling needs take place, and if so, continuously sleeps (blocks)
|
||||
@ -502,8 +274,3 @@ func (this *Throttler) throttle(onThrottled func()) {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Throttler) Teardown() {
|
||||
this.migrationContext.Log.Debugf("Tearing down...")
|
||||
atomic.StoreInt64(&this.finishedMigrating, 1)
|
||||
}
|
||||
|
@ -1,21 +1,36 @@
|
||||
/*
|
||||
Copyright 2015 Shlomi Noach, courtesy Booking.com
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var detachPattern *regexp.Regexp
|
||||
|
||||
func init() {
|
||||
detachPattern, _ = regexp.Compile(`//([^/:]+):([\d]+)`) // e.g. `//binlog.01234:567890`
|
||||
}
|
||||
|
||||
type BinlogType int
|
||||
|
||||
const (
|
||||
BinaryLog BinlogType = iota
|
||||
RelayLog
|
||||
)
|
||||
|
||||
// BinlogCoordinates described binary log coordinates in the form of log file & log position.
|
||||
type BinlogCoordinates struct {
|
||||
LogFile string
|
||||
LogPos int64
|
||||
Type BinlogType
|
||||
}
|
||||
|
||||
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
|
||||
@ -42,12 +57,12 @@ func (this BinlogCoordinates) String() string {
|
||||
return this.DisplayString()
|
||||
}
|
||||
|
||||
// Equals tests equality of this coordinate and another one.
|
||||
// Equals tests equality of this corrdinate and another one.
|
||||
func (this *BinlogCoordinates) Equals(other *BinlogCoordinates) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
|
||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos && this.Type == other.Type
|
||||
}
|
||||
|
||||
// IsEmpty returns true if the log file is empty, unnamed
|
||||
@ -72,5 +87,76 @@ func (this *BinlogCoordinates) SmallerThanOrEquals(other *BinlogCoordinates) boo
|
||||
if this.SmallerThan(other) {
|
||||
return true
|
||||
}
|
||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
|
||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos // No Type comparison
|
||||
}
|
||||
|
||||
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
|
||||
func (this *BinlogCoordinates) FileSmallerThan(other *BinlogCoordinates) bool {
|
||||
return this.LogFile < other.LogFile
|
||||
}
|
||||
|
||||
// FileNumberDistance returns the numeric distance between this corrdinate's file number and the other's.
|
||||
// Effectively it means "how many roatets/FLUSHes would make these coordinates's file reach the other's"
|
||||
func (this *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int {
|
||||
thisNumber, _ := this.FileNumber()
|
||||
otherNumber, _ := other.FileNumber()
|
||||
return otherNumber - thisNumber
|
||||
}
|
||||
|
||||
// FileNumber returns the numeric value of the file, and the length in characters representing the number in the filename.
|
||||
// Example: FileNumber() of mysqld.log.000789 is (789, 6)
|
||||
func (this *BinlogCoordinates) FileNumber() (int, int) {
|
||||
tokens := strings.Split(this.LogFile, ".")
|
||||
numPart := tokens[len(tokens)-1]
|
||||
numLen := len(numPart)
|
||||
fileNum, err := strconv.Atoi(numPart)
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
return fileNum, numLen
|
||||
}
|
||||
|
||||
// PreviousFileCoordinatesBy guesses the filename of the previous binlog/relaylog, by given offset (number of files back)
|
||||
func (this *BinlogCoordinates) PreviousFileCoordinatesBy(offset int) (BinlogCoordinates, error) {
|
||||
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
|
||||
|
||||
fileNum, numLen := this.FileNumber()
|
||||
if fileNum == 0 {
|
||||
return result, errors.New("Log file number is zero, cannot detect previous file")
|
||||
}
|
||||
newNumStr := fmt.Sprintf("%d", (fileNum - offset))
|
||||
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
|
||||
|
||||
tokens := strings.Split(this.LogFile, ".")
|
||||
tokens[len(tokens)-1] = newNumStr
|
||||
result.LogFile = strings.Join(tokens, ".")
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
|
||||
func (this *BinlogCoordinates) PreviousFileCoordinates() (BinlogCoordinates, error) {
|
||||
return this.PreviousFileCoordinatesBy(1)
|
||||
}
|
||||
|
||||
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
|
||||
func (this *BinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) {
|
||||
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
|
||||
|
||||
fileNum, numLen := this.FileNumber()
|
||||
newNumStr := fmt.Sprintf("%d", (fileNum + 1))
|
||||
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
|
||||
|
||||
tokens := strings.Split(this.LogFile, ".")
|
||||
tokens[len(tokens)-1] = newNumStr
|
||||
result.LogFile = strings.Join(tokens, ".")
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
|
||||
func (this *BinlogCoordinates) DetachedCoordinates() (isDetached bool, detachedLogFile string, detachedLogPos string) {
|
||||
detachedCoordinatesSubmatch := detachPattern.FindStringSubmatch(this.LogFile)
|
||||
if len(detachedCoordinatesSubmatch) == 0 {
|
||||
return false, "", ""
|
||||
}
|
||||
return true, detachedCoordinatesSubmatch[1], detachedCoordinatesSubmatch[2]
|
||||
}
|
||||
|
@ -8,8 +8,8 @@ package mysql
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
test "github.com/openark/golib/tests"
|
||||
"github.com/outbrain/golib/log"
|
||||
test "github.com/outbrain/golib/tests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -37,6 +37,57 @@ func TestBinlogCoordinates(t *testing.T) {
|
||||
test.S(t).ExpectTrue(c1.SmallerThanOrEquals(&c3))
|
||||
}
|
||||
|
||||
func TestBinlogNext(t *testing.T) {
|
||||
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||
cres, err := c1.NextFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00018")
|
||||
|
||||
c2 := BinlogCoordinates{LogFile: "mysql-bin.00099", LogPos: 104}
|
||||
cres, err = c2.NextFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00100")
|
||||
|
||||
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00099", LogPos: 104}
|
||||
cres, err = c3.NextFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00100")
|
||||
}
|
||||
|
||||
func TestBinlogPrevious(t *testing.T) {
|
||||
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||
cres, err := c1.PreviousFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00016")
|
||||
|
||||
c2 := BinlogCoordinates{LogFile: "mysql-bin.00100", LogPos: 104}
|
||||
cres, err = c2.PreviousFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00099")
|
||||
|
||||
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00100", LogPos: 104}
|
||||
cres, err = c3.PreviousFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00099")
|
||||
|
||||
c4 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00000", LogPos: 104}
|
||||
_, err = c4.PreviousFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNotNil(err)
|
||||
}
|
||||
|
||||
func TestBinlogCoordinatesAsKey(t *testing.T) {
|
||||
m := make(map[BinlogCoordinates]bool)
|
||||
|
||||
@ -52,3 +103,20 @@ func TestBinlogCoordinatesAsKey(t *testing.T) {
|
||||
|
||||
test.S(t).ExpectEquals(len(m), 3)
|
||||
}
|
||||
|
||||
func TestBinlogFileNumber(t *testing.T) {
|
||||
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||
c2 := BinlogCoordinates{LogFile: "mysql-bin.00022", LogPos: 104}
|
||||
|
||||
test.S(t).ExpectEquals(c1.FileNumberDistance(&c1), 0)
|
||||
test.S(t).ExpectEquals(c1.FileNumberDistance(&c2), 5)
|
||||
test.S(t).ExpectEquals(c2.FileNumberDistance(&c1), -5)
|
||||
}
|
||||
|
||||
func TestBinlogFileNumberDistance(t *testing.T) {
|
||||
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||
fileNum, numLen := c1.FileNumber()
|
||||
|
||||
test.S(t).ExpectEquals(fileNum, 17)
|
||||
test.S(t).ExpectEquals(numLen, 5)
|
||||
}
|
||||
|
@ -1,35 +1,21 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
)
|
||||
|
||||
const (
|
||||
TLS_CONFIG_KEY = "ghost"
|
||||
)
|
||||
|
||||
// ConnectionConfig is the minimal configuration required to connect to a MySQL server
|
||||
type ConnectionConfig struct {
|
||||
Key InstanceKey
|
||||
User string
|
||||
Password string
|
||||
ImpliedKey *InstanceKey
|
||||
tlsConfig *tls.Config
|
||||
Timeout float64
|
||||
TransactionIsolation string
|
||||
Key InstanceKey
|
||||
User string
|
||||
Password string
|
||||
ImpliedKey *InstanceKey
|
||||
}
|
||||
|
||||
func NewConnectionConfig() *ConnectionConfig {
|
||||
@ -43,12 +29,9 @@ func NewConnectionConfig() *ConnectionConfig {
|
||||
// DuplicateCredentials creates a new connection config with given key and with same credentials as this config
|
||||
func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionConfig {
|
||||
config := &ConnectionConfig{
|
||||
Key: key,
|
||||
User: this.User,
|
||||
Password: this.Password,
|
||||
tlsConfig: this.tlsConfig,
|
||||
Timeout: this.Timeout,
|
||||
TransactionIsolation: this.TransactionIsolation,
|
||||
Key: key,
|
||||
User: this.User,
|
||||
Password: this.Password,
|
||||
}
|
||||
config.ImpliedKey = &config.Key
|
||||
return config
|
||||
@ -59,55 +42,13 @@ func (this *ConnectionConfig) Duplicate() *ConnectionConfig {
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) String() string {
|
||||
return fmt.Sprintf("%s, user=%s, usingTLS=%t", this.Key.DisplayString(), this.User, this.tlsConfig != nil)
|
||||
return fmt.Sprintf("%s, user=%s", this.Key.DisplayString(), this.User)
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) Equals(other *ConnectionConfig) bool {
|
||||
return this.Key.Equals(&other.Key) || this.ImpliedKey.Equals(other.ImpliedKey)
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clientKey string, allowInsecure bool) error {
|
||||
var rootCertPool *x509.CertPool
|
||||
var certs []tls.Certificate
|
||||
var err error
|
||||
|
||||
if caCertificatePath == "" {
|
||||
rootCertPool, err = x509.SystemCertPool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
rootCertPool = x509.NewCertPool()
|
||||
pem, err := ioutil.ReadFile(caCertificatePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
|
||||
return errors.New("could not add ca certificate to cert pool")
|
||||
}
|
||||
}
|
||||
if clientCertificate != "" || clientKey != "" {
|
||||
cert, err := tls.LoadX509KeyPair(clientCertificate, clientKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
certs = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
this.tlsConfig = &tls.Config{
|
||||
ServerName: this.Key.Hostname,
|
||||
Certificates: certs,
|
||||
RootCAs: rootCertPool,
|
||||
InsecureSkipVerify: allowInsecure,
|
||||
}
|
||||
|
||||
return mysql.RegisterTLSConfig(TLS_CONFIG_KEY, this.tlsConfig)
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) TLSConfig() *tls.Config {
|
||||
return this.tlsConfig
|
||||
}
|
||||
|
||||
func (this *ConnectionConfig) GetDBUri(databaseName string) string {
|
||||
hostname := this.Key.Hostname
|
||||
var ip = net.ParseIP(hostname)
|
||||
@ -115,23 +56,5 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string {
|
||||
// Wrap IPv6 literals in square brackets
|
||||
hostname = fmt.Sprintf("[%s]", hostname)
|
||||
}
|
||||
|
||||
// go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to
|
||||
// simplify construction of the DSN below.
|
||||
tlsOption := "false"
|
||||
if this.tlsConfig != nil {
|
||||
tlsOption = TLS_CONFIG_KEY
|
||||
}
|
||||
connectionParams := []string{
|
||||
"autocommit=true",
|
||||
"charset=utf8mb4,utf8,latin1",
|
||||
"interpolateParams=true",
|
||||
fmt.Sprintf("tls=%s", tlsOption),
|
||||
fmt.Sprintf("transaction_isolation=%q", this.TransactionIsolation),
|
||||
fmt.Sprintf("timeout=%fs", this.Timeout),
|
||||
fmt.Sprintf("readTimeout=%fs", this.Timeout),
|
||||
fmt.Sprintf("writeTimeout=%fs", this.Timeout),
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?%s", this.User, this.Password, hostname, this.Key.Port, databaseName, strings.Join(connectionParams, "&"))
|
||||
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4,utf8,latin1", this.User, this.Password, hostname, this.Key.Port, databaseName)
|
||||
}
|
||||
|
@ -1,20 +1,15 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"testing"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
test "github.com/openark/golib/tests"
|
||||
)
|
||||
|
||||
const (
|
||||
transactionIsolation = "REPEATABLE-READ"
|
||||
"github.com/outbrain/golib/log"
|
||||
test "github.com/outbrain/golib/tests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -29,7 +24,6 @@ func TestNewConnectionConfig(t *testing.T) {
|
||||
test.S(t).ExpectEquals(c.ImpliedKey.Port, 0)
|
||||
test.S(t).ExpectEquals(c.User, "")
|
||||
test.S(t).ExpectEquals(c.Password, "")
|
||||
test.S(t).ExpectEquals(c.TransactionIsolation, "")
|
||||
}
|
||||
|
||||
func TestDuplicateCredentials(t *testing.T) {
|
||||
@ -37,11 +31,6 @@ func TestDuplicateCredentials(t *testing.T) {
|
||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||
c.User = "gromit"
|
||||
c.Password = "penguin"
|
||||
c.tlsConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: "feathers",
|
||||
}
|
||||
c.TransactionIsolation = transactionIsolation
|
||||
|
||||
dup := c.DuplicateCredentials(InstanceKey{Hostname: "otherhost", Port: 3310})
|
||||
test.S(t).ExpectEquals(dup.Key.Hostname, "otherhost")
|
||||
@ -50,8 +39,6 @@ func TestDuplicateCredentials(t *testing.T) {
|
||||
test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3310)
|
||||
test.S(t).ExpectEquals(dup.User, "gromit")
|
||||
test.S(t).ExpectEquals(dup.Password, "penguin")
|
||||
test.S(t).ExpectEquals(dup.tlsConfig, c.tlsConfig)
|
||||
test.S(t).ExpectEquals(dup.TransactionIsolation, c.TransactionIsolation)
|
||||
}
|
||||
|
||||
func TestDuplicate(t *testing.T) {
|
||||
@ -59,7 +46,6 @@ func TestDuplicate(t *testing.T) {
|
||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||
c.User = "gromit"
|
||||
c.Password = "penguin"
|
||||
c.TransactionIsolation = transactionIsolation
|
||||
|
||||
dup := c.Duplicate()
|
||||
test.S(t).ExpectEquals(dup.Key.Hostname, "myhost")
|
||||
@ -68,30 +54,4 @@ func TestDuplicate(t *testing.T) {
|
||||
test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3306)
|
||||
test.S(t).ExpectEquals(dup.User, "gromit")
|
||||
test.S(t).ExpectEquals(dup.Password, "penguin")
|
||||
test.S(t).ExpectEquals(dup.TransactionIsolation, transactionIsolation)
|
||||
}
|
||||
|
||||
func TestGetDBUri(t *testing.T) {
|
||||
c := NewConnectionConfig()
|
||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||
c.User = "gromit"
|
||||
c.Password = "penguin"
|
||||
c.Timeout = 1.2345
|
||||
c.TransactionIsolation = transactionIsolation
|
||||
|
||||
uri := c.GetDBUri("test")
|
||||
test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=false&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
|
||||
}
|
||||
|
||||
func TestGetDBUriWithTLSSetup(t *testing.T) {
|
||||
c := NewConnectionConfig()
|
||||
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
|
||||
c.User = "gromit"
|
||||
c.Password = "penguin"
|
||||
c.Timeout = 1.2345
|
||||
c.tlsConfig = &tls.Config{}
|
||||
c.TransactionIsolation = transactionIsolation
|
||||
|
||||
uri := c.GetDBUri("test")
|
||||
test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=ghost&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
/*
|
||||
Copyright 2015 Shlomi Noach, courtesy Booking.com
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -8,24 +7,15 @@ package mysql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const DefaultInstancePort = 3306
|
||||
|
||||
var (
|
||||
ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$")
|
||||
ipv4HostRegexp = regexp.MustCompile("^([^:]+)$")
|
||||
|
||||
// e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
|
||||
ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") //nolint:gosimple
|
||||
// e.g. 2001:db8:1f70::999:de8:7648:6e8
|
||||
ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$")
|
||||
const (
|
||||
DefaultInstancePort = 3306
|
||||
)
|
||||
|
||||
// InstanceKey is an instance indicator, identified by hostname and port
|
||||
// InstanceKey is an instance indicator, identifued by hostname and port
|
||||
type InstanceKey struct {
|
||||
Hostname string
|
||||
Port int
|
||||
@ -35,34 +25,25 @@ const detachHint = "//"
|
||||
|
||||
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
|
||||
func NewRawInstanceKey(hostPort string) (*InstanceKey, error) {
|
||||
var hostname, port string
|
||||
if submatch := ipv4HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
|
||||
hostname = submatch[1]
|
||||
port = submatch[2]
|
||||
} else if submatch := ipv4HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
|
||||
hostname = submatch[1]
|
||||
} else if submatch := ipv6HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
|
||||
hostname = submatch[1]
|
||||
port = submatch[2]
|
||||
} else if submatch := ipv6HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
|
||||
hostname = submatch[1]
|
||||
} else {
|
||||
return nil, fmt.Errorf("Cannot parse address: %s", hostPort)
|
||||
tokens := strings.SplitN(hostPort, ":", 2)
|
||||
if len(tokens) != 2 {
|
||||
return nil, fmt.Errorf("Cannot parse InstanceKey from %s. Expected format is host:port", hostPort)
|
||||
}
|
||||
instanceKey := &InstanceKey{Hostname: hostname, Port: DefaultInstancePort}
|
||||
if port != "" {
|
||||
var err error
|
||||
if instanceKey.Port, err = strconv.Atoi(port); err != nil {
|
||||
return instanceKey, fmt.Errorf("Invalid port: %s", port)
|
||||
}
|
||||
instanceKey := &InstanceKey{Hostname: tokens[0]}
|
||||
var err error
|
||||
if instanceKey.Port, err = strconv.Atoi(tokens[1]); err != nil {
|
||||
return instanceKey, fmt.Errorf("Invalid port: %s", tokens[1])
|
||||
}
|
||||
|
||||
return instanceKey, nil
|
||||
}
|
||||
|
||||
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306.
|
||||
// ParseRawInstanceKeyLoose will parse an InstanceKey from a string representation such as 127.0.0.1:3306.
|
||||
// The port part is optional; there will be no name resolve
|
||||
func ParseInstanceKey(hostPort string) (*InstanceKey, error) {
|
||||
func ParseRawInstanceKeyLoose(hostPort string) (*InstanceKey, error) {
|
||||
if !strings.Contains(hostPort, ":") {
|
||||
return &InstanceKey{Hostname: hostPort, Port: DefaultInstancePort}, nil
|
||||
}
|
||||
return NewRawInstanceKey(hostPort)
|
||||
}
|
||||
|
||||
@ -102,7 +83,7 @@ func (this *InstanceKey) IsValid() bool {
|
||||
return len(this.Hostname) > 0 && this.Port > 0
|
||||
}
|
||||
|
||||
// DetachedKey returns an instance key whose hostname is detached: invalid, but recoverable
|
||||
// DetachedKey returns an instance key whose hostname is detahced: invalid, but recoverable
|
||||
func (this *InstanceKey) DetachedKey() *InstanceKey {
|
||||
if this.IsDetached() {
|
||||
return this
|
||||
@ -110,7 +91,7 @@ func (this *InstanceKey) DetachedKey() *InstanceKey {
|
||||
return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, this.Hostname), Port: this.Port}
|
||||
}
|
||||
|
||||
// ReattachedKey returns an instance key whose hostname is detached: invalid, but recoverable
|
||||
// ReattachedKey returns an instance key whose hostname is detahced: invalid, but recoverable
|
||||
func (this *InstanceKey) ReattachedKey() *InstanceKey {
|
||||
if !this.IsDetached() {
|
||||
return this
|
||||
|
@ -92,7 +92,7 @@ func (this *InstanceKeyMap) ReadCommaDelimitedList(list string) error {
|
||||
}
|
||||
tokens := strings.Split(list, ",")
|
||||
for _, token := range tokens {
|
||||
key, err := ParseInstanceKey(token)
|
||||
key, err := ParseRawInstanceKeyLoose(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,74 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
test "github.com/openark/golib/tests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.SetLevel(log.ERROR)
|
||||
}
|
||||
|
||||
func TestParseInstanceKey(t *testing.T) {
|
||||
{
|
||||
key, err := ParseInstanceKey("myhost:1234")
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(key.Hostname, "myhost")
|
||||
test.S(t).ExpectEquals(key.Port, 1234)
|
||||
}
|
||||
{
|
||||
key, err := ParseInstanceKey("myhost")
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(key.Hostname, "myhost")
|
||||
test.S(t).ExpectEquals(key.Port, 3306)
|
||||
}
|
||||
{
|
||||
key, err := ParseInstanceKey("10.0.0.3:3307")
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(key.Hostname, "10.0.0.3")
|
||||
test.S(t).ExpectEquals(key.Port, 3307)
|
||||
}
|
||||
{
|
||||
key, err := ParseInstanceKey("10.0.0.3")
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(key.Hostname, "10.0.0.3")
|
||||
test.S(t).ExpectEquals(key.Port, 3306)
|
||||
}
|
||||
{
|
||||
key, err := ParseInstanceKey("[2001:db8:1f70::999:de8:7648:6e8]:3308")
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(key.Hostname, "2001:db8:1f70::999:de8:7648:6e8")
|
||||
test.S(t).ExpectEquals(key.Port, 3308)
|
||||
}
|
||||
{
|
||||
key, err := ParseInstanceKey("::1")
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(key.Hostname, "::1")
|
||||
test.S(t).ExpectEquals(key.Port, 3306)
|
||||
}
|
||||
{
|
||||
key, err := ParseInstanceKey("0:0:0:0:0:0:0:0")
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(key.Hostname, "0:0:0:0:0:0:0:0")
|
||||
test.S(t).ExpectEquals(key.Port, 3306)
|
||||
}
|
||||
{
|
||||
_, err := ParseInstanceKey("[2001:xxxx:1f70::999:de8:7648:6e8]:3308")
|
||||
test.S(t).ExpectNotNil(err)
|
||||
}
|
||||
{
|
||||
_, err := ParseInstanceKey("10.0.0.4:")
|
||||
test.S(t).ExpectNotNil(err)
|
||||
}
|
||||
{
|
||||
_, err := ParseInstanceKey("10.0.0.4:5.6.7")
|
||||
test.S(t).ExpectNotNil(err)
|
||||
}
|
||||
}
|
@ -8,20 +8,12 @@ package mysql
|
||||
import (
|
||||
gosql "database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/github/gh-ost/go/sql"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
"github.com/openark/golib/sqlutils"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxTableNameLength = 64
|
||||
MaxReplicationPasswordLength = 32
|
||||
MaxDBPoolConnections = 3
|
||||
"github.com/outbrain/golib/log"
|
||||
"github.com/outbrain/golib/sqlutils"
|
||||
)
|
||||
|
||||
type ReplicationLagResult struct {
|
||||
@ -30,88 +22,73 @@ type ReplicationLagResult struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewNoReplicationLagResult() *ReplicationLagResult {
|
||||
return &ReplicationLagResult{Lag: 0, Err: nil}
|
||||
}
|
||||
|
||||
func (this *ReplicationLagResult) HasLag() bool {
|
||||
return this.Lag > 0
|
||||
}
|
||||
|
||||
// knownDBs is a DB cache by uri
|
||||
var knownDBs map[string]*gosql.DB = make(map[string]*gosql.DB)
|
||||
var knownDBsMutex = &sync.Mutex{}
|
||||
|
||||
func GetDB(migrationUuid string, mysql_uri string) (db *gosql.DB, exists bool, err error) {
|
||||
cacheKey := migrationUuid + ":" + mysql_uri
|
||||
|
||||
knownDBsMutex.Lock()
|
||||
defer knownDBsMutex.Unlock()
|
||||
|
||||
if db, exists = knownDBs[cacheKey]; !exists {
|
||||
db, err = gosql.Open("mysql", mysql_uri)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
db.SetMaxOpenConns(MaxDBPoolConnections)
|
||||
db.SetMaxIdleConns(MaxDBPoolConnections)
|
||||
knownDBs[cacheKey] = db
|
||||
// GetReplicationLag returns replication lag for a given connection config; either by explicit query
|
||||
// or via SHOW SLAVE STATUS
|
||||
func GetReplicationLag(connectionConfig *ConnectionConfig, replicationLagQuery string) (replicationLag time.Duration, err error) {
|
||||
dbUri := connectionConfig.GetDBUri("information_schema")
|
||||
var db *gosql.DB
|
||||
if db, _, err = sqlutils.GetDB(dbUri); err != nil {
|
||||
return replicationLag, err
|
||||
}
|
||||
return db, exists, nil
|
||||
}
|
||||
|
||||
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
|
||||
func GetReplicationLagFromSlaveStatus(informationSchemaDb *gosql.DB) (replicationLag time.Duration, err error) {
|
||||
err = sqlutils.QueryRowsMap(informationSchemaDb, `show slave status`, func(m sqlutils.RowMap) error {
|
||||
slaveIORunning := m.GetString("Slave_IO_Running")
|
||||
slaveSQLRunning := m.GetString("Slave_SQL_Running")
|
||||
if replicationLagQuery != "" {
|
||||
var floatLag float64
|
||||
err = db.QueryRow(replicationLagQuery).Scan(&floatLag)
|
||||
return time.Duration(int64(floatLag*1000)) * time.Millisecond, err
|
||||
}
|
||||
// No explicit replication lag query.
|
||||
err = sqlutils.QueryRowsMap(db, `show slave status`, func(m sqlutils.RowMap) error {
|
||||
secondsBehindMaster := m.GetNullInt64("Seconds_Behind_Master")
|
||||
if !secondsBehindMaster.Valid {
|
||||
return fmt.Errorf("replication not running; Slave_IO_Running=%+v, Slave_SQL_Running=%+v", slaveIORunning, slaveSQLRunning)
|
||||
return fmt.Errorf("replication not running")
|
||||
}
|
||||
replicationLag = time.Duration(secondsBehindMaster.Int64) * time.Second
|
||||
return nil
|
||||
})
|
||||
|
||||
return replicationLag, err
|
||||
}
|
||||
|
||||
// GetMaxReplicationLag concurrently checks for replication lag on given list of instance keys,
|
||||
// each via GetReplicationLag
|
||||
func GetMaxReplicationLag(baseConnectionConfig *ConnectionConfig, instanceKeyMap *InstanceKeyMap, replicationLagQuery string) (result *ReplicationLagResult) {
|
||||
result = &ReplicationLagResult{Lag: 0}
|
||||
if instanceKeyMap.Len() == 0 {
|
||||
return result
|
||||
}
|
||||
lagResults := make(chan *ReplicationLagResult, instanceKeyMap.Len())
|
||||
for key := range *instanceKeyMap {
|
||||
connectionConfig := baseConnectionConfig.Duplicate()
|
||||
connectionConfig.Key = key
|
||||
result := &ReplicationLagResult{Key: connectionConfig.Key}
|
||||
go func() {
|
||||
result.Lag, result.Err = GetReplicationLag(connectionConfig, replicationLagQuery)
|
||||
lagResults <- result
|
||||
}()
|
||||
}
|
||||
for range *instanceKeyMap {
|
||||
lagResult := <-lagResults
|
||||
if lagResult.Err != nil {
|
||||
result = lagResult
|
||||
} else if lagResult.Lag.Nanoseconds() > result.Lag.Nanoseconds() {
|
||||
result = lagResult
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey *InstanceKey, err error) {
|
||||
currentUri := connectionConfig.GetDBUri("information_schema")
|
||||
// This function is only called once, okay to not have a cached connection pool
|
||||
db, err := gosql.Open("mysql", currentUri)
|
||||
db, _, err := sqlutils.GetDB(currentUri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
err = sqlutils.QueryRowsMap(db, `show slave status`, func(rowMap sqlutils.RowMap) error {
|
||||
// We wish to recognize the case where the topology's master actually has replication configuration.
|
||||
// This can happen when a DBA issues a `RESET SLAVE` instead of `RESET SLAVE ALL`.
|
||||
|
||||
// An empty log file indicates this is a master:
|
||||
if rowMap.GetString("Master_Log_File") == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
slaveIORunning := rowMap.GetString("Slave_IO_Running")
|
||||
slaveSQLRunning := rowMap.GetString("Slave_SQL_Running")
|
||||
|
||||
if slaveIORunning != "Yes" || slaveSQLRunning != "Yes" {
|
||||
return fmt.Errorf("Replication on %+v is broken: Slave_IO_Running: %s, Slave_SQL_Running: %s. Please make sure replication runs before using gh-ost.",
|
||||
connectionConfig.Key,
|
||||
slaveIORunning,
|
||||
slaveSQLRunning,
|
||||
)
|
||||
}
|
||||
|
||||
masterKey = &InstanceKey{
|
||||
Hostname: rowMap.GetString("Master_Host"),
|
||||
Port: rowMap.GetInt("Master_Port"),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return masterKey, err
|
||||
}
|
||||
|
||||
@ -176,7 +153,7 @@ func GetInstanceKey(db *gosql.DB) (instanceKey *InstanceKey, err error) {
|
||||
}
|
||||
|
||||
// GetTableColumns reads column list from given table
|
||||
func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnList, *sql.ColumnList, error) {
|
||||
func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnList, error) {
|
||||
query := fmt.Sprintf(`
|
||||
show columns from %s.%s
|
||||
`,
|
||||
@ -184,30 +161,18 @@ func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnL
|
||||
sql.EscapeName(tableName),
|
||||
)
|
||||
columnNames := []string{}
|
||||
virtualColumnNames := []string{}
|
||||
err := sqlutils.QueryRowsMap(db, query, func(rowMap sqlutils.RowMap) error {
|
||||
columnName := rowMap.GetString("Field")
|
||||
columnNames = append(columnNames, columnName)
|
||||
if strings.Contains(rowMap.GetString("Extra"), " GENERATED") {
|
||||
log.Debugf("%s is a generated column", columnName)
|
||||
virtualColumnNames = append(virtualColumnNames, columnName)
|
||||
}
|
||||
columnNames = append(columnNames, rowMap.GetString("Field"))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
if len(columnNames) == 0 {
|
||||
return nil, nil, log.Errorf("Found 0 columns on %s.%s. Bailing out",
|
||||
return nil, log.Errorf("Found 0 columns on %s.%s. Bailing out",
|
||||
sql.EscapeName(databaseName),
|
||||
sql.EscapeName(tableName),
|
||||
)
|
||||
}
|
||||
return sql.NewColumnList(columnNames), sql.NewColumnList(virtualColumnNames), nil
|
||||
}
|
||||
|
||||
// Kill executes a KILL QUERY by connection id
|
||||
func Kill(db *gosql.DB, connectionID string) error {
|
||||
_, err := db.Exec(`KILL QUERY %s`, connectionID)
|
||||
return err
|
||||
return sql.NewColumnList(columnNames), nil
|
||||
}
|
||||
|
65
go/os/process.go
Normal file
65
go/os/process.go
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright 2014 Outbrain Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package os
|
||||
|
||||
import (
|
||||
"github.com/outbrain/golib/log"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func execCmd(commandText string, arguments ...string) (*exec.Cmd, string, error) {
|
||||
commandBytes := []byte(commandText)
|
||||
tmpFile, err := ioutil.TempFile("", "gh-ost-process-cmd-")
|
||||
if err != nil {
|
||||
return nil, "", log.Errore(err)
|
||||
}
|
||||
ioutil.WriteFile(tmpFile.Name(), commandBytes, 0644)
|
||||
log.Debugf("execCmd: %s", commandText)
|
||||
shellArguments := append([]string{}, tmpFile.Name())
|
||||
shellArguments = append(shellArguments, arguments...)
|
||||
log.Debugf("%+v", shellArguments)
|
||||
return exec.Command("bash", shellArguments...), tmpFile.Name(), nil
|
||||
}
|
||||
|
||||
// CommandRun executes a command
|
||||
func CommandRun(commandText string, arguments ...string) error {
|
||||
cmd, tmpFileName, err := execCmd(commandText, arguments...)
|
||||
defer os.Remove(tmpFileName)
|
||||
if err != nil {
|
||||
return log.Errore(err)
|
||||
}
|
||||
err = cmd.Run()
|
||||
return log.Errore(err)
|
||||
}
|
||||
|
||||
// RunCommandWithOutput executes a command and return output bytes
|
||||
func RunCommandWithOutput(commandText string) ([]byte, error) {
|
||||
cmd, tmpFileName, err := execCmd(commandText)
|
||||
defer os.Remove(tmpFileName)
|
||||
if err != nil {
|
||||
return nil, log.Errore(err)
|
||||
}
|
||||
|
||||
outputBytes, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, log.Errore(err)
|
||||
}
|
||||
|
||||
return outputBytes, nil
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -15,11 +15,11 @@ type ValueComparisonSign string
|
||||
|
||||
const (
|
||||
LessThanComparisonSign ValueComparisonSign = "<"
|
||||
LessThanOrEqualsComparisonSign ValueComparisonSign = "<="
|
||||
EqualsComparisonSign ValueComparisonSign = "="
|
||||
GreaterThanOrEqualsComparisonSign ValueComparisonSign = ">="
|
||||
GreaterThanComparisonSign ValueComparisonSign = ">"
|
||||
NotEqualsComparisonSign ValueComparisonSign = "!="
|
||||
LessThanOrEqualsComparisonSign = "<="
|
||||
EqualsComparisonSign = "="
|
||||
GreaterThanOrEqualsComparisonSign = ">="
|
||||
GreaterThanComparisonSign = ">"
|
||||
NotEqualsComparisonSign = "!="
|
||||
)
|
||||
|
||||
// EscapeName will escape a db/table/column/... name by wrapping with backticks.
|
||||
@ -33,15 +33,17 @@ func EscapeName(name string) string {
|
||||
}
|
||||
|
||||
func buildColumnsPreparedValues(columns *ColumnList) []string {
|
||||
values := make([]string, columns.Len())
|
||||
values := make([]string, columns.Len(), columns.Len())
|
||||
for i, column := range columns.Columns() {
|
||||
var token string
|
||||
if column.timezoneConversion != nil {
|
||||
token = fmt.Sprintf("convert_tz(?, '%s', '%s')", column.timezoneConversion.ToTimezone, "+00:00")
|
||||
} else if column.enumToTextConversion {
|
||||
token = fmt.Sprintf("ELT(?, %s)", column.EnumValues)
|
||||
} else if column.Type == JSONColumnType {
|
||||
token = "convert(? using utf8mb4)"
|
||||
} else if column.CharsetUnchanged {
|
||||
token = "?"
|
||||
// token = fmt.Sprintf("BINARY ?")
|
||||
//token = fmt.Sprintf("_%s ?", column.Charset)
|
||||
// token = fmt.Sprintf("convert(? using %s)", column.Charset)
|
||||
// token = fmt.Sprintf("convert(?, char character set %s)", column.Charset)
|
||||
} else {
|
||||
token = "?"
|
||||
}
|
||||
@ -51,7 +53,7 @@ func buildColumnsPreparedValues(columns *ColumnList) []string {
|
||||
}
|
||||
|
||||
func buildPreparedValues(length int) []string {
|
||||
values := make([]string, length)
|
||||
values := make([]string, length, length)
|
||||
for i := 0; i < length; i++ {
|
||||
values[i] = "?"
|
||||
}
|
||||
@ -59,7 +61,7 @@ func buildPreparedValues(length int) []string {
|
||||
}
|
||||
|
||||
func duplicateNames(names []string) []string {
|
||||
duplicate := make([]string, len(names))
|
||||
duplicate := make([]string, len(names), len(names))
|
||||
copy(duplicate, names)
|
||||
return duplicate
|
||||
}
|
||||
@ -110,10 +112,6 @@ func BuildSetPreparedClause(columns *ColumnList) (result string, err error) {
|
||||
var setToken string
|
||||
if column.timezoneConversion != nil {
|
||||
setToken = fmt.Sprintf("%s=convert_tz(?, '%s', '%s')", EscapeName(column.Name), column.timezoneConversion.ToTimezone, "+00:00")
|
||||
} else if column.enumToTextConversion {
|
||||
setToken = fmt.Sprintf("%s=ELT(?, %s)", EscapeName(column.Name), column.EnumValues)
|
||||
} else if column.Type == JSONColumnType {
|
||||
setToken = fmt.Sprintf("%s=convert(? using utf8mb4)", EscapeName(column.Name))
|
||||
} else {
|
||||
setToken = fmt.Sprintf("%s=?", EscapeName(column.Name))
|
||||
}
|
||||
@ -144,12 +142,13 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
|
||||
comparisons := []string{}
|
||||
|
||||
for i, column := range columns {
|
||||
//
|
||||
value := values[i]
|
||||
rangeComparison, err := BuildValueComparison(column, value, comparisonSign)
|
||||
if err != nil {
|
||||
return "", explodedArgs, err
|
||||
}
|
||||
if i > 0 {
|
||||
if len(columns[0:i]) > 0 {
|
||||
equalitiesComparison, err := BuildEqualsComparison(columns[0:i], values[0:i])
|
||||
if err != nil {
|
||||
return "", explodedArgs, err
|
||||
@ -167,7 +166,7 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
|
||||
if includeEquals {
|
||||
comparison, err := BuildEqualsComparison(columns, values)
|
||||
if err != nil {
|
||||
return "", explodedArgs, err
|
||||
return "", explodedArgs, nil
|
||||
}
|
||||
comparisons = append(comparisons, comparison)
|
||||
explodedArgs = append(explodedArgs, args...)
|
||||
@ -238,7 +237,7 @@ func BuildRangeInsertPreparedQuery(databaseName, originalTableName, ghostTableNa
|
||||
return BuildRangeInsertQuery(databaseName, originalTableName, ghostTableName, sharedColumns, mappedSharedColumns, uniqueKey, uniqueKeyColumns, rangeStartValues, rangeEndValues, rangeStartArgs, rangeEndArgs, includeRangeStartValues, transactionalTable)
|
||||
}
|
||||
|
||||
func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string, uniqueKeyColumns *ColumnList, rangeStartArgs, rangeEndArgs []interface{}, chunkSize int64, includeRangeStartValues bool, hint string) (result string, explodedArgs []interface{}, err error) {
|
||||
func BuildUniqueKeyRangeEndPreparedQuery(databaseName, tableName string, uniqueKeyColumns *ColumnList, rangeStartArgs, rangeEndArgs []interface{}, chunkSize int64, includeRangeStartValues bool, hint string) (result string, explodedArgs []interface{}, err error) {
|
||||
if uniqueKeyColumns.Len() == 0 {
|
||||
return "", explodedArgs, fmt.Errorf("Got 0 columns in BuildUniqueKeyRangeEndPreparedQuery")
|
||||
}
|
||||
@ -261,66 +260,11 @@ func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string
|
||||
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
|
||||
|
||||
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
|
||||
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
|
||||
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
|
||||
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
|
||||
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
|
||||
for i, column := range uniqueKeyColumns.Columns() {
|
||||
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
|
||||
if column.Type == EnumColumnType {
|
||||
uniqueKeyColumnAscending[i] = fmt.Sprintf("concat(%s) asc", uniqueKeyColumnNames[i])
|
||||
uniqueKeyColumnDescending[i] = fmt.Sprintf("concat(%s) desc", uniqueKeyColumnNames[i])
|
||||
} else {
|
||||
uniqueKeyColumnAscending[i] = fmt.Sprintf("%s asc", uniqueKeyColumnNames[i])
|
||||
uniqueKeyColumnDescending[i] = fmt.Sprintf("%s desc", uniqueKeyColumnNames[i])
|
||||
}
|
||||
}
|
||||
result = fmt.Sprintf(`
|
||||
select /* gh-ost %s.%s %s */
|
||||
%s
|
||||
from
|
||||
%s.%s
|
||||
where %s and %s
|
||||
order by
|
||||
%s
|
||||
limit 1
|
||||
offset %d
|
||||
`, databaseName, tableName, hint,
|
||||
strings.Join(uniqueKeyColumnNames, ", "),
|
||||
databaseName, tableName,
|
||||
rangeStartComparison, rangeEndComparison,
|
||||
strings.Join(uniqueKeyColumnAscending, ", "),
|
||||
(chunkSize - 1),
|
||||
)
|
||||
return result, explodedArgs, nil
|
||||
}
|
||||
|
||||
func BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, tableName string, uniqueKeyColumns *ColumnList, rangeStartArgs, rangeEndArgs []interface{}, chunkSize int64, includeRangeStartValues bool, hint string) (result string, explodedArgs []interface{}, err error) {
|
||||
if uniqueKeyColumns.Len() == 0 {
|
||||
return "", explodedArgs, fmt.Errorf("Got 0 columns in BuildUniqueKeyRangeEndPreparedQuery")
|
||||
}
|
||||
databaseName = EscapeName(databaseName)
|
||||
tableName = EscapeName(tableName)
|
||||
|
||||
var startRangeComparisonSign ValueComparisonSign = GreaterThanComparisonSign
|
||||
if includeRangeStartValues {
|
||||
startRangeComparisonSign = GreaterThanOrEqualsComparisonSign
|
||||
}
|
||||
rangeStartComparison, rangeExplodedArgs, err := BuildRangePreparedComparison(uniqueKeyColumns, rangeStartArgs, startRangeComparisonSign)
|
||||
if err != nil {
|
||||
return "", explodedArgs, err
|
||||
}
|
||||
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
|
||||
rangeEndComparison, rangeExplodedArgs, err := BuildRangePreparedComparison(uniqueKeyColumns, rangeEndArgs, LessThanOrEqualsComparisonSign)
|
||||
if err != nil {
|
||||
return "", explodedArgs, err
|
||||
}
|
||||
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
|
||||
|
||||
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
|
||||
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
|
||||
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
|
||||
for i, column := range uniqueKeyColumns.Columns() {
|
||||
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
|
||||
if column.Type == EnumColumnType {
|
||||
if column.Type == EnumColumnValue {
|
||||
uniqueKeyColumnAscending[i] = fmt.Sprintf("concat(%s) asc", uniqueKeyColumnNames[i])
|
||||
uniqueKeyColumnDescending[i] = fmt.Sprintf("concat(%s) desc", uniqueKeyColumnNames[i])
|
||||
} else {
|
||||
@ -368,10 +312,10 @@ func buildUniqueKeyMinMaxValuesPreparedQuery(databaseName, tableName string, uni
|
||||
tableName = EscapeName(tableName)
|
||||
|
||||
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
|
||||
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames))
|
||||
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
|
||||
for i, column := range uniqueKeyColumns.Columns() {
|
||||
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
|
||||
if column.Type == EnumColumnType {
|
||||
if column.Type == EnumColumnValue {
|
||||
uniqueKeyColumnOrder[i] = fmt.Sprintf("concat(%s) %s", uniqueKeyColumnNames[i], order)
|
||||
} else {
|
||||
uniqueKeyColumnOrder[i] = fmt.Sprintf("%s %s", uniqueKeyColumnNames[i], order)
|
||||
@ -400,7 +344,7 @@ func BuildDMLDeleteQuery(databaseName, tableName string, tableColumns, uniqueKey
|
||||
}
|
||||
for _, column := range uniqueKeyColumns.Columns() {
|
||||
tableOrdinal := tableColumns.Ordinals[column.Name]
|
||||
arg := column.convertArg(args[tableOrdinal], true)
|
||||
arg := column.convertArg(args[tableOrdinal])
|
||||
uniqueKeyArgs = append(uniqueKeyArgs, arg)
|
||||
}
|
||||
databaseName = EscapeName(databaseName)
|
||||
@ -437,7 +381,7 @@ func BuildDMLInsertQuery(databaseName, tableName string, tableColumns, sharedCol
|
||||
|
||||
for _, column := range sharedColumns.Columns() {
|
||||
tableOrdinal := tableColumns.Ordinals[column.Name]
|
||||
arg := column.convertArg(args[tableOrdinal], false)
|
||||
arg := column.convertArg(args[tableOrdinal])
|
||||
sharedArgs = append(sharedArgs, arg)
|
||||
}
|
||||
|
||||
@ -485,33 +429,27 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
|
||||
|
||||
for _, column := range sharedColumns.Columns() {
|
||||
tableOrdinal := tableColumns.Ordinals[column.Name]
|
||||
arg := column.convertArg(valueArgs[tableOrdinal], false)
|
||||
arg := column.convertArg(valueArgs[tableOrdinal])
|
||||
sharedArgs = append(sharedArgs, arg)
|
||||
}
|
||||
|
||||
for _, column := range uniqueKeyColumns.Columns() {
|
||||
tableOrdinal := tableColumns.Ordinals[column.Name]
|
||||
arg := column.convertArg(whereArgs[tableOrdinal], true)
|
||||
arg := column.convertArg(whereArgs[tableOrdinal])
|
||||
uniqueKeyArgs = append(uniqueKeyArgs, arg)
|
||||
}
|
||||
|
||||
setClause, err := BuildSetPreparedClause(mappedSharedColumns)
|
||||
if err != nil {
|
||||
return "", sharedArgs, uniqueKeyArgs, err
|
||||
}
|
||||
|
||||
equalsComparison, err := BuildEqualsPreparedComparison(uniqueKeyColumns.Names())
|
||||
if err != nil {
|
||||
return "", sharedArgs, uniqueKeyArgs, err
|
||||
}
|
||||
result = fmt.Sprintf(`
|
||||
update /* gh-ost %s.%s */
|
||||
%s.%s
|
||||
update /* gh-ost %s.%s */
|
||||
%s.%s
|
||||
set
|
||||
%s
|
||||
where
|
||||
%s
|
||||
`, databaseName, tableName,
|
||||
%s
|
||||
`, databaseName, tableName,
|
||||
databaseName, tableName,
|
||||
setClause,
|
||||
equalsComparison,
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
test "github.com/openark/golib/tests"
|
||||
"github.com/outbrain/golib/log"
|
||||
test "github.com/outbrain/golib/tests"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -283,7 +283,7 @@ func TestBuildUniqueKeyRangeEndPreparedQuery(t *testing.T) {
|
||||
rangeStartArgs := []interface{}{3, 17}
|
||||
rangeEndArgs := []interface{}{103, 117}
|
||||
|
||||
query, explodedArgs, err := BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, originalTableName, uniqueKeyColumns, rangeStartArgs, rangeEndArgs, chunkSize, false, "test")
|
||||
query, explodedArgs, err := BuildUniqueKeyRangeEndPreparedQuery(databaseName, originalTableName, uniqueKeyColumns, rangeStartArgs, rangeEndArgs, chunkSize, false, "test")
|
||||
test.S(t).ExpectNil(err)
|
||||
expected := `
|
||||
select /* gh-ost mydb.tbl test */ name, position
|
||||
|
@ -8,7 +8,6 @@ package sql
|
||||
import (
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
)
|
||||
|
||||
type charsetEncoding map[string]encoding.Encoding
|
||||
@ -19,5 +18,4 @@ func init() {
|
||||
charsetEncodingMap = make(map[string]encoding.Encoding)
|
||||
// Begin mappings
|
||||
charsetEncodingMap["latin1"] = charmap.Windows1252
|
||||
charsetEncodingMap["gbk"] = simplifiedchinese.GBK
|
||||
}
|
||||
|
194
go/sql/parser.go
194
go/sql/parser.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -8,157 +8,38 @@ package sql
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
sanitizeQuotesRegexp = regexp.MustCompile("('[^']*')")
|
||||
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
|
||||
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
|
||||
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
|
||||
autoIncrementRegexp = regexp.MustCompile(`(?i)\bauto_increment[\s]*=[\s]*([0-9]+)`)
|
||||
alterTableExplicitSchemaTableRegexps = []*regexp.Regexp{
|
||||
// ALTER TABLE `scm`.`tbl` something
|
||||
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
|
||||
// ALTER TABLE `scm`.tbl something
|
||||
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]([\S]+)\s+(.*$)`),
|
||||
// ALTER TABLE scm.`tbl` something
|
||||
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
|
||||
// ALTER TABLE scm.tbl something
|
||||
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]([\S]+)\s+(.*$)`),
|
||||
}
|
||||
alterTableExplicitTableRegexps = []*regexp.Regexp{
|
||||
// ALTER TABLE `tbl` something
|
||||
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
|
||||
// ALTER TABLE tbl something
|
||||
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)\s+(.*$)`),
|
||||
}
|
||||
enumValuesRegexp = regexp.MustCompile("^enum[(](.*)[)]$")
|
||||
renameColumnRegexp = regexp.MustCompile(`(?i)change\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
|
||||
)
|
||||
|
||||
type AlterTableParser struct {
|
||||
columnRenameMap map[string]string
|
||||
droppedColumns map[string]bool
|
||||
isRenameTable bool
|
||||
isAutoIncrementDefined bool
|
||||
|
||||
alterStatementOptions string
|
||||
alterTokens []string
|
||||
|
||||
explicitSchema string
|
||||
explicitTable string
|
||||
type Parser struct {
|
||||
columnRenameMap map[string]string
|
||||
}
|
||||
|
||||
func NewAlterTableParser() *AlterTableParser {
|
||||
return &AlterTableParser{
|
||||
func NewParser() *Parser {
|
||||
return &Parser{
|
||||
columnRenameMap: make(map[string]string),
|
||||
droppedColumns: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func NewParserFromAlterStatement(alterStatement string) *AlterTableParser {
|
||||
parser := NewAlterTableParser()
|
||||
parser.ParseAlterStatement(alterStatement)
|
||||
return parser
|
||||
}
|
||||
func (this *Parser) ParseAlterStatement(alterStatement string) (err error) {
|
||||
allStringSubmatch := renameColumnRegexp.FindAllStringSubmatch(alterStatement, -1)
|
||||
for _, submatch := range allStringSubmatch {
|
||||
if unquoted, err := strconv.Unquote(submatch[2]); err == nil {
|
||||
submatch[2] = unquoted
|
||||
}
|
||||
if unquoted, err := strconv.Unquote(submatch[3]); err == nil {
|
||||
submatch[3] = unquoted
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tokens []string) {
|
||||
terminatingQuote := rune(0)
|
||||
f := func(c rune) bool {
|
||||
switch {
|
||||
case c == terminatingQuote:
|
||||
terminatingQuote = rune(0)
|
||||
return false
|
||||
case terminatingQuote != rune(0):
|
||||
return false
|
||||
case c == '\'':
|
||||
terminatingQuote = c
|
||||
return false
|
||||
case c == '(':
|
||||
terminatingQuote = ')'
|
||||
return false
|
||||
default:
|
||||
return c == ','
|
||||
}
|
||||
}
|
||||
|
||||
tokens = strings.FieldsFunc(alterStatement, f)
|
||||
for i := range tokens {
|
||||
tokens[i] = strings.TrimSpace(tokens[i])
|
||||
}
|
||||
return tokens
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) {
|
||||
strippedStatement = alterStatement
|
||||
strippedStatement = sanitizeQuotesRegexp.ReplaceAllString(strippedStatement, "''")
|
||||
return strippedStatement
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) parseAlterToken(alterToken string) {
|
||||
{
|
||||
// rename
|
||||
allStringSubmatch := renameColumnRegexp.FindAllStringSubmatch(alterToken, -1)
|
||||
for _, submatch := range allStringSubmatch {
|
||||
if unquoted, err := strconv.Unquote(submatch[2]); err == nil {
|
||||
submatch[2] = unquoted
|
||||
}
|
||||
if unquoted, err := strconv.Unquote(submatch[3]); err == nil {
|
||||
submatch[3] = unquoted
|
||||
}
|
||||
this.columnRenameMap[submatch[2]] = submatch[3]
|
||||
}
|
||||
}
|
||||
{
|
||||
// drop
|
||||
allStringSubmatch := dropColumnRegexp.FindAllStringSubmatch(alterToken, -1)
|
||||
for _, submatch := range allStringSubmatch {
|
||||
if unquoted, err := strconv.Unquote(submatch[2]); err == nil {
|
||||
submatch[2] = unquoted
|
||||
}
|
||||
this.droppedColumns[submatch[2]] = true
|
||||
}
|
||||
}
|
||||
{
|
||||
// rename table
|
||||
if renameTableRegexp.MatchString(alterToken) {
|
||||
this.isRenameTable = true
|
||||
}
|
||||
}
|
||||
{
|
||||
// auto_increment
|
||||
if autoIncrementRegexp.MatchString(alterToken) {
|
||||
this.isAutoIncrementDefined = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) {
|
||||
this.alterStatementOptions = alterStatement
|
||||
for _, alterTableRegexp := range alterTableExplicitSchemaTableRegexps {
|
||||
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
|
||||
this.explicitSchema = submatch[1]
|
||||
this.explicitTable = submatch[2]
|
||||
this.alterStatementOptions = submatch[3]
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, alterTableRegexp := range alterTableExplicitTableRegexps {
|
||||
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
|
||||
this.explicitTable = submatch[1]
|
||||
this.alterStatementOptions = submatch[2]
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, alterToken := range this.tokenizeAlterStatement(this.alterStatementOptions) {
|
||||
alterToken = this.sanitizeQuotesFromAlterStatement(alterToken)
|
||||
this.parseAlterToken(alterToken)
|
||||
this.alterTokens = append(this.alterTokens, alterToken)
|
||||
this.columnRenameMap[submatch[2]] = submatch[3]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) GetNonTrivialRenames() map[string]string {
|
||||
func (this *Parser) GetNonTrivialRenames() map[string]string {
|
||||
result := make(map[string]string)
|
||||
for column, renamed := range this.columnRenameMap {
|
||||
if column != renamed {
|
||||
@ -168,45 +49,6 @@ func (this *AlterTableParser) GetNonTrivialRenames() map[string]string {
|
||||
return result
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) HasNonTrivialRenames() bool {
|
||||
func (this *Parser) HasNonTrivialRenames() bool {
|
||||
return len(this.GetNonTrivialRenames()) > 0
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) DroppedColumnsMap() map[string]bool {
|
||||
return this.droppedColumns
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) IsRenameTable() bool {
|
||||
return this.isRenameTable
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) IsAutoIncrementDefined() bool {
|
||||
return this.isAutoIncrementDefined
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) GetExplicitSchema() string {
|
||||
return this.explicitSchema
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) HasExplicitSchema() bool {
|
||||
return this.GetExplicitSchema() != ""
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) GetExplicitTable() string {
|
||||
return this.explicitTable
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) HasExplicitTable() bool {
|
||||
return this.GetExplicitTable() != ""
|
||||
}
|
||||
|
||||
func (this *AlterTableParser) GetAlterStatementOptions() string {
|
||||
return this.alterStatementOptions
|
||||
}
|
||||
|
||||
func ParseEnumValues(enumColumnType string) string {
|
||||
if submatch := enumValuesRegexp.FindStringSubmatch(enumColumnType); len(submatch) > 0 {
|
||||
return submatch[1]
|
||||
}
|
||||
return enumColumnType
|
||||
}
|
||||
|
@ -1,16 +1,15 @@
|
||||
/*
|
||||
Copyright 2022 GitHub Inc.
|
||||
Copyright 2016 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package sql
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
test "github.com/openark/golib/tests"
|
||||
"github.com/outbrain/golib/log"
|
||||
test "github.com/outbrain/golib/tests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -19,53 +18,28 @@ func init() {
|
||||
|
||||
func TestParseAlterStatement(t *testing.T) {
|
||||
statement := "add column t int, engine=innodb"
|
||||
parser := NewAlterTableParser()
|
||||
parser := NewParser()
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
||||
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
|
||||
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
|
||||
}
|
||||
|
||||
func TestParseAlterStatementTrivialRename(t *testing.T) {
|
||||
statement := "add column t int, change ts ts timestamp, engine=innodb"
|
||||
parser := NewAlterTableParser()
|
||||
parser := NewParser()
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
||||
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
|
||||
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
|
||||
test.S(t).ExpectEquals(len(parser.columnRenameMap), 1)
|
||||
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
|
||||
}
|
||||
|
||||
func TestParseAlterStatementWithAutoIncrement(t *testing.T) {
|
||||
statements := []string{
|
||||
"auto_increment=7",
|
||||
"auto_increment = 7",
|
||||
"AUTO_INCREMENT = 71",
|
||||
"add column t int, change ts ts timestamp, auto_increment=7 engine=innodb",
|
||||
"add column t int, change ts ts timestamp, auto_increment =7 engine=innodb",
|
||||
"add column t int, change ts ts timestamp, AUTO_INCREMENT = 7 engine=innodb",
|
||||
"add column t int, change ts ts timestamp, engine=innodb auto_increment=73425",
|
||||
}
|
||||
for _, statement := range statements {
|
||||
parser := NewAlterTableParser()
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
||||
test.S(t).ExpectTrue(parser.IsAutoIncrementDefined())
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAlterStatementTrivialRenames(t *testing.T) {
|
||||
statement := "add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb"
|
||||
parser := NewAlterTableParser()
|
||||
parser := NewParser()
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
||||
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
|
||||
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
|
||||
test.S(t).ExpectEquals(len(parser.columnRenameMap), 2)
|
||||
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
|
||||
test.S(t).ExpectEquals(parser.columnRenameMap["f"], "f")
|
||||
@ -83,256 +57,12 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, statement := range statements {
|
||||
parser := NewAlterTableParser()
|
||||
parser := NewParser()
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
||||
renames := parser.GetNonTrivialRenames()
|
||||
test.S(t).ExpectEquals(len(renames), 2)
|
||||
test.S(t).ExpectEquals(renames["i"], "count")
|
||||
test.S(t).ExpectEquals(renames["f"], "fl")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenizeAlterStatement(t *testing.T) {
|
||||
parser := NewAlterTableParser()
|
||||
{
|
||||
alterStatement := "add column t int"
|
||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int"}))
|
||||
}
|
||||
{
|
||||
alterStatement := "add column t int, change column i int"
|
||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int"}))
|
||||
}
|
||||
{
|
||||
alterStatement := "add column t int, change column i int 'some comment'"
|
||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment'"}))
|
||||
}
|
||||
{
|
||||
alterStatement := "add column t int, change column i int 'some comment, with comma'"
|
||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment, with comma'"}))
|
||||
}
|
||||
{
|
||||
alterStatement := "add column t int, add column d decimal(10,2)"
|
||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column d decimal(10,2)"}))
|
||||
}
|
||||
{
|
||||
alterStatement := "add column t int, add column e enum('a','b','c')"
|
||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column e enum('a','b','c')"}))
|
||||
}
|
||||
{
|
||||
alterStatement := "add column t int(11), add column e enum('a','b','c')"
|
||||
tokens := parser.tokenizeAlterStatement(alterStatement)
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int(11)", "add column e enum('a','b','c')"}))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
|
||||
parser := NewAlterTableParser()
|
||||
{
|
||||
alterStatement := "add column e enum('a','b','c')"
|
||||
strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement)
|
||||
test.S(t).ExpectEquals(strippedStatement, "add column e enum('','','')")
|
||||
}
|
||||
{
|
||||
alterStatement := "change column i int 'some comment, with comma'"
|
||||
strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement)
|
||||
test.S(t).ExpectEquals(strippedStatement, "change column i int ''")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAlterStatementDroppedColumns(t *testing.T) {
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(len(parser.droppedColumns), 1)
|
||||
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "drop column b, drop key c_idx, drop column `d`"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
||||
test.S(t).ExpectEquals(len(parser.droppedColumns), 2)
|
||||
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
||||
test.S(t).ExpectTrue(parser.droppedColumns["d"])
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "drop column b, drop key c_idx, drop column `d`, drop `e`, drop primary key, drop foreign key fk_1"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(len(parser.droppedColumns), 3)
|
||||
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
||||
test.S(t).ExpectTrue(parser.droppedColumns["d"])
|
||||
test.S(t).ExpectTrue(parser.droppedColumns["e"])
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "drop column b, drop bad statement, add column i int"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(len(parser.droppedColumns), 1)
|
||||
test.S(t).ExpectTrue(parser.droppedColumns["b"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAlterStatementRenameTable(t *testing.T) {
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectFalse(parser.isRenameTable)
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "rename as something_else"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "drop column b, rename as something_else"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
|
||||
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "engine=innodb rename as something_else"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "rename as something_else, engine=innodb"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectTrue(parser.isRenameTable)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAlterStatementExplicitTable(t *testing.T) {
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.explicitSchema, "")
|
||||
test.S(t).ExpectEquals(parser.explicitTable, "")
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "alter table tbl drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.explicitSchema, "")
|
||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "alter table `tbl` drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.explicitSchema, "")
|
||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "alter table `scm with spaces`.`tbl` drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm with spaces")
|
||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "alter table `scm`.`tbl with spaces` drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl with spaces")
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "alter table `scm`.tbl drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "alter table scm.`tbl` drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "alter table scm.tbl drop column b"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
|
||||
}
|
||||
{
|
||||
parser := NewAlterTableParser()
|
||||
statement := "alter table scm.tbl drop column b, add index idx(i)"
|
||||
err := parser.ParseAlterStatement(statement)
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
|
||||
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
|
||||
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b, add index idx(i)")
|
||||
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b", "add index idx(i)"}))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEnumValues(t *testing.T) {
|
||||
{
|
||||
s := "enum('red','green','blue','orange')"
|
||||
values := ParseEnumValues(s)
|
||||
test.S(t).ExpectEquals(values, "'red','green','blue','orange'")
|
||||
}
|
||||
{
|
||||
s := "('red','green','blue','orange')"
|
||||
values := ParseEnumValues(s)
|
||||
test.S(t).ExpectEquals(values, "('red','green','blue','orange')")
|
||||
}
|
||||
{
|
||||
s := "zzz"
|
||||
values := ParseEnumValues(s)
|
||||
test.S(t).ExpectEquals(values, "zzz")
|
||||
}
|
||||
}
|
||||
|
101
go/sql/types.go
101
go/sql/types.go
@ -6,74 +6,49 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/outbrain/golib/log"
|
||||
)
|
||||
|
||||
type ColumnType int
|
||||
|
||||
const (
|
||||
UnknownColumnType ColumnType = iota
|
||||
TimestampColumnType
|
||||
DateTimeColumnType
|
||||
EnumColumnType
|
||||
MediumIntColumnType
|
||||
JSONColumnType
|
||||
FloatColumnType
|
||||
BinaryColumnType
|
||||
UnknownColumnType ColumnType = iota
|
||||
TimestampColumnType = iota
|
||||
DateTimeColumnType = iota
|
||||
EnumColumnValue = iota
|
||||
)
|
||||
|
||||
const maxMediumintUnsigned int32 = 16777215
|
||||
|
||||
type TimezoneConversion struct {
|
||||
type TimezoneConvertion struct {
|
||||
ToTimezone string
|
||||
}
|
||||
|
||||
type CharacterSetConversion struct {
|
||||
ToCharset string
|
||||
FromCharset string
|
||||
}
|
||||
|
||||
type Column struct {
|
||||
Name string
|
||||
IsUnsigned bool
|
||||
Charset string
|
||||
Type ColumnType
|
||||
EnumValues string
|
||||
timezoneConversion *TimezoneConversion
|
||||
enumToTextConversion bool
|
||||
// add Octet length for binary type, fix bytes with suffix "00" get clipped in mysql binlog.
|
||||
// https://github.com/github/gh-ost/issues/909
|
||||
BinaryOctetLength uint
|
||||
charsetConversion *CharacterSetConversion
|
||||
Name string
|
||||
IsUnsigned bool
|
||||
Charset string
|
||||
CharsetUnchanged bool
|
||||
Type ColumnType
|
||||
timezoneConversion *TimezoneConvertion
|
||||
}
|
||||
|
||||
func (this *Column) convertArg(arg interface{}, isUniqueKeyColumn bool) interface{} {
|
||||
func (this *Column) convertArg(arg interface{}) interface{} {
|
||||
if s, ok := arg.(string); ok {
|
||||
arg2Bytes := []byte(s)
|
||||
// convert to bytes if character string without charsetConversion.
|
||||
if this.Charset != "" && this.charsetConversion == nil {
|
||||
arg = arg2Bytes
|
||||
} else {
|
||||
if encoding, ok := charsetEncodingMap[this.Charset]; ok {
|
||||
arg, _ = encoding.NewDecoder().String(s)
|
||||
}
|
||||
if this.CharsetUnchanged {
|
||||
log.Errorf("============== charset unchanged for %+v: %+v, <%+v>", this.Name, this.Charset, arg)
|
||||
// return []byte(s)
|
||||
//return s
|
||||
}
|
||||
|
||||
if this.Type == BinaryColumnType && isUniqueKeyColumn {
|
||||
size := len(arg2Bytes)
|
||||
if uint(size) < this.BinaryOctetLength {
|
||||
buf := bytes.NewBuffer(arg2Bytes)
|
||||
for i := uint(0); i < (this.BinaryOctetLength - uint(size)); i++ {
|
||||
buf.Write([]byte{0})
|
||||
}
|
||||
arg = buf.String()
|
||||
}
|
||||
log.Errorf("============== charset for %+v: %+v, <%+v>", this.Name, this.Charset, arg)
|
||||
// string, charset conversion
|
||||
if encoding, ok := charsetEncodingMap[this.Charset]; ok {
|
||||
arg, _ = encoding.NewDecoder().String(s)
|
||||
log.Errorf("============== charset converted for %+v: %+v, <%+v>", this.Name, this.Charset, arg)
|
||||
}
|
||||
|
||||
return arg
|
||||
}
|
||||
|
||||
@ -85,14 +60,6 @@ func (this *Column) convertArg(arg interface{}, isUniqueKeyColumn bool) interfac
|
||||
return uint16(i)
|
||||
}
|
||||
if i, ok := arg.(int32); ok {
|
||||
if this.Type == MediumIntColumnType {
|
||||
// problem with mediumint is that it's a 3-byte type. There is no compatible golang type to match that.
|
||||
// So to convert from negative to positive we'd need to convert the value manually
|
||||
if i >= 0 {
|
||||
return i
|
||||
}
|
||||
return uint32(maxMediumintUnsigned + i + 1)
|
||||
}
|
||||
return uint32(i)
|
||||
}
|
||||
if i, ok := arg.(int64); ok {
|
||||
@ -185,6 +152,10 @@ func (this *ColumnList) IsUnsigned(columnName string) bool {
|
||||
return this.GetColumn(columnName).IsUnsigned
|
||||
}
|
||||
|
||||
func (this *ColumnList) SetCharsetUnchanged(columnName string) {
|
||||
this.GetColumn(columnName).CharsetUnchanged = true
|
||||
}
|
||||
|
||||
func (this *ColumnList) SetCharset(columnName string, charset string) {
|
||||
this.GetColumn(columnName).Charset = charset
|
||||
}
|
||||
@ -202,25 +173,13 @@ func (this *ColumnList) GetColumnType(columnName string) ColumnType {
|
||||
}
|
||||
|
||||
func (this *ColumnList) SetConvertDatetimeToTimestamp(columnName string, toTimezone string) {
|
||||
this.GetColumn(columnName).timezoneConversion = &TimezoneConversion{ToTimezone: toTimezone}
|
||||
this.GetColumn(columnName).timezoneConversion = &TimezoneConvertion{ToTimezone: toTimezone}
|
||||
}
|
||||
|
||||
func (this *ColumnList) HasTimezoneConversion(columnName string) bool {
|
||||
return this.GetColumn(columnName).timezoneConversion != nil
|
||||
}
|
||||
|
||||
func (this *ColumnList) SetEnumToTextConversion(columnName string) {
|
||||
this.GetColumn(columnName).enumToTextConversion = true
|
||||
}
|
||||
|
||||
func (this *ColumnList) IsEnumToTextConversion(columnName string) bool {
|
||||
return this.GetColumn(columnName).enumToTextConversion
|
||||
}
|
||||
|
||||
func (this *ColumnList) SetEnumValues(columnName string, enumValues string) {
|
||||
this.GetColumn(columnName).EnumValues = enumValues
|
||||
}
|
||||
|
||||
func (this *ColumnList) String() string {
|
||||
return strings.Join(this.Names(), ",")
|
||||
}
|
||||
@ -248,10 +207,6 @@ func (this *ColumnList) Len() int {
|
||||
return len(this.columns)
|
||||
}
|
||||
|
||||
func (this *ColumnList) SetCharsetConversion(columnName string, fromCharset string, toCharset string) {
|
||||
this.GetColumn(columnName).charsetConversion = &CharacterSetConversion{FromCharset: fromCharset, ToCharset: toCharset}
|
||||
}
|
||||
|
||||
// UniqueKey is the combination of a key's name and columns
|
||||
type UniqueKey struct {
|
||||
Name string
|
||||
|
@ -10,8 +10,8 @@ import (
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/openark/golib/log"
|
||||
test "github.com/openark/golib/tests"
|
||||
"github.com/outbrain/golib/log"
|
||||
test "github.com/outbrain/golib/tests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -1,13 +0,0 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
color varchar(32),
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
|
||||
insert into gh_ost_test values (null, 11, 'red');
|
||||
insert into gh_ost_test values (null, 13, 'green');
|
||||
insert into gh_ost_test values (null, 17, 'blue');
|
@ -1 +0,0 @@
|
||||
--attempt-instant-ddl
|
@ -1,17 +0,0 @@
|
||||
drop event if exists gh_ost_test;
|
||||
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
insert into gh_ost_test values (NULL, 11);
|
||||
insert into gh_ost_test values (NULL, 13);
|
||||
insert into gh_ost_test values (NULL, 17);
|
||||
insert into gh_ost_test values (NULL, 23);
|
||||
insert into gh_ost_test values (NULL, 29);
|
||||
insert into gh_ost_test values (NULL, 31);
|
||||
insert into gh_ost_test values (NULL, 37);
|
||||
delete from gh_ost_test where id>=5;
|
@ -1 +0,0 @@
|
||||
AUTO_INCREMENT=7
|
@ -1 +0,0 @@
|
||||
--alter='AUTO_INCREMENT=7'
|
@ -1,17 +0,0 @@
|
||||
drop event if exists gh_ost_test;
|
||||
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
insert into gh_ost_test values (NULL, 11);
|
||||
insert into gh_ost_test values (NULL, 13);
|
||||
insert into gh_ost_test values (NULL, 17);
|
||||
insert into gh_ost_test values (NULL, 23);
|
||||
insert into gh_ost_test values (NULL, 29);
|
||||
insert into gh_ost_test values (NULL, 31);
|
||||
insert into gh_ost_test values (NULL, 37);
|
||||
delete from gh_ost_test where id>=5;
|
@ -1 +0,0 @@
|
||||
AUTO_INCREMENT=8
|
@ -1,13 +0,0 @@
|
||||
drop event if exists gh_ost_test;
|
||||
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
insert into gh_ost_test values (NULL, 11);
|
||||
insert into gh_ost_test values (NULL, 13);
|
||||
insert into gh_ost_test values (NULL, 17);
|
||||
insert into gh_ost_test values (NULL, 23);
|
@ -1 +0,0 @@
|
||||
AUTO_INCREMENT=5
|
@ -1,9 +0,0 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
set session sql_mode='NO_AUTO_VALUE_ON_ZERO';
|
||||
insert into gh_ost_test values (0, 23);
|
@ -1,21 +0,0 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id bigint auto_increment,
|
||||
val bigint not null,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 18446744073709551615);
|
||||
insert into gh_ost_test values (null, 18446744073709551614);
|
||||
insert into gh_ost_test values (null, 18446744073709551613);
|
||||
end ;;
|
@ -1 +0,0 @@
|
||||
--alter="change val val bigint"
|
@ -1 +0,0 @@
|
||||
--alter="add column is_good bit null default 0"
|
@ -1 +0,0 @@
|
||||
id, i
|
@ -1 +0,0 @@
|
||||
id, i
|
@ -1,24 +0,0 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
is_good bit null default 0,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11, 0);
|
||||
insert into gh_ost_test values (null, 13, 1);
|
||||
insert into gh_ost_test values (null, 17, 1);
|
||||
|
||||
update gh_ost_test set is_good=0 where i=13 order by id desc limit 1;
|
||||
end ;;
|
@ -1 +0,0 @@
|
||||
--alter="modify column is_good bit not null default 0" --approve-renamed-columns
|
@ -1,40 +0,0 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
ts0 timestamp(6) default current_timestamp(6),
|
||||
updated tinyint unsigned default 0,
|
||||
primary key(id, ts0)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 13, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 17, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 19, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 23, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 29, sysdate(6), 0);
|
||||
insert into gh_ost_test values (null, 31, sysdate(6), 0);
|
||||
insert into gh_ost_test values (null, 37, sysdate(6), 0);
|
||||
insert into gh_ost_test values (null, 41, sysdate(6), 0);
|
||||
delete from gh_ost_test where i = 31 order by id desc limit 1;
|
||||
end ;;
|
@ -1,40 +0,0 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
v varchar(128),
|
||||
updated tinyint unsigned default 0,
|
||||
primary key(id, v)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11, 'eleven', 0);
|
||||
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 13, 'thirteen', 0);
|
||||
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 17, 'seventeen', 0);
|
||||
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 19, 'nineteen', 0);
|
||||
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 23, 'twenty three', 0);
|
||||
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 29, 'twenty nine', 0);
|
||||
insert into gh_ost_test values (null, 31, 'thirty one', 0);
|
||||
insert into gh_ost_test values (null, 37, 'thirty seven', 0);
|
||||
insert into gh_ost_test values (null, 41, 'forty one', 0);
|
||||
delete from gh_ost_test where i = 31 order by id desc limit 1;
|
||||
end ;;
|
@ -1,28 +0,0 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
t varchar(128) charset utf8 collate utf8_general_ci,
|
||||
tl varchar(128) charset latin1 not null,
|
||||
ta varchar(128) charset ascii not null,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
insert into gh_ost_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial');
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, md5(rand()), 'átesting-a', 'a');
|
||||
insert into gh_ost_test values (null, 'novo proprietário', 'átesting-b', 'b');
|
||||
insert into gh_ost_test values (null, '2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm', 'átesting-c', 'c');
|
||||
insert into gh_ost_test values (null, 'usuário', 'átesting-x', 'x');
|
||||
|
||||
delete from gh_ost_test where ta='x' order by id desc limit 1;
|
||||
end ;;
|
@ -1 +0,0 @@
|
||||
--alter='convert to character set utf8mb4'
|
@ -1,27 +0,0 @@
|
||||
set session time_zone='+00:00';
|
||||
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
create_time timestamp NULL DEFAULT '0000-00-00 00:00:00',
|
||||
update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
counter int(10) unsigned DEFAULT NULL,
|
||||
primary key(id)
|
||||
) auto_increment=1;
|
||||
|
||||
set session time_zone='+00:00';
|
||||
insert into gh_ost_test values (1, '0000-00-00 00:00:00', now(), 0);
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
set session time_zone='+00:00';
|
||||
update gh_ost_test set counter = counter + 1 where id = 1;
|
||||
end ;;
|
@ -1 +0,0 @@
|
||||
--alter='add column name varchar(1)'
|
@ -1 +0,0 @@
|
||||
id, create_time, update_time, counter
|
@ -1 +0,0 @@
|
||||
id, create_time, update_time, counter
|
@ -1,27 +0,0 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
dt0 datetime(6),
|
||||
dt1 datetime(6),
|
||||
ts2 timestamp(6),
|
||||
updated tinyint unsigned default 0,
|
||||
primary key(id),
|
||||
key i_idx(i)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11, '2016-10-31 11:22:33.0123', now(), '2016-10-31 11:22:33.0369', 0);
|
||||
update gh_ost_test set dt1='2016-10-31 11:22:33.0246', updated = 1 where i = 11 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 13, '2016-10-31 11:22:33.0123', '2016-10-31 11:22:33.789', '2016-10-31 11:22:33.0369', 0);
|
||||
end ;;
|
@ -17,7 +17,7 @@ create event gh_ost_test
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
disable on slave
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11, now(), now(), now(), 0);
|
||||
|
@ -3,9 +3,9 @@ create table gh_ost_test (
|
||||
id int unsigned auto_increment,
|
||||
i int not null,
|
||||
ts0 timestamp default current_timestamp,
|
||||
ts1 timestamp null,
|
||||
ts1 timestamp,
|
||||
dt2 datetime,
|
||||
t datetime default current_timestamp,
|
||||
t datetime,
|
||||
updated tinyint unsigned default 0,
|
||||
primary key(id, t),
|
||||
key i_idx(i)
|
||||
|
@ -1 +1 @@
|
||||
--alter="change column t t timestamp default current_timestamp"
|
||||
--alter="change column t t timestamp not null"
|
||||
|
@ -3,9 +3,9 @@ create table gh_ost_test (
|
||||
id int unsigned auto_increment,
|
||||
i int not null,
|
||||
ts0 timestamp default current_timestamp,
|
||||
ts1 timestamp null,
|
||||
ts1 timestamp,
|
||||
dt2 datetime,
|
||||
t datetime null,
|
||||
t datetime,
|
||||
updated tinyint unsigned default 0,
|
||||
primary key(id),
|
||||
key i_idx(i)
|
||||
|
@ -1 +1 @@
|
||||
--alter="change column t t timestamp null"
|
||||
--alter="change column t t timestamp not null"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user