Merge branch 'master' into tests-updates

This commit is contained in:
Shlomi Noach 2019-02-10 11:24:19 +02:00 committed by GitHub
commit c37ea5dea0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
268 changed files with 14276 additions and 4617 deletions

View File

@ -11,7 +11,7 @@ Related issue: https://github.com/github/gh-ost/issues/0123456789
### Description
This PR [briefly explain what is does]
This PR [briefly explain what it does]
> In case this PR introduced Go code changes:

View File

@ -1,20 +1,31 @@
# http://docs.travis-ci.com/user/languages/go/
language: go
go: 1.8
go:
- "1.9"
- "1.10"
os:
- linux
env:
- MYSQL_USER=root
- CURRENT_CI_ENV=travis
addons:
apt:
packages:
- git
- numactl
- libaio1
before_install:
- mysql -e 'CREATE DATABASE IF NOT EXISTS test;'
install: true
script: script/cibuild
script:
- script/cibuild
notifications:
email: false

View File

@ -94,7 +94,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
`gh-ost` is a Go project; it is built with Go `1.8` (though `1.7` should work as well). To build on your own, use either:
`gh-ost` is a Go project; it is built with Go `1.9` and above. To build on your own, use either:
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
@ -107,3 +107,5 @@ Generally speaking, `master` branch is stable, but only [releases](https://githu
- [@ggunson](https://github.com/ggunson)
- [@tomkrouper](https://github.com/tomkrouper)
- [@shlomi-noach](https://github.com/shlomi-noach)
- [@jessbreckenridge](https://github.com/jessbreckenridge)
- [@gtowey](https://github.com/gtowey)

View File

@ -1 +1 @@
1.0.42
1.0.47

View File

@ -2,36 +2,72 @@
#
#
RELEASE_VERSION=$(cat RELEASE_VERSION)
RELEASE_VERSION=
buildpath=
function build {
osname=$1
osshort=$2
GOOS=$3
GOARCH=$4
echo "Building ${osname} binary"
export GOOS
export GOARCH
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
if [ $? -ne 0 ]; then
echo "Build failed for ${osname}"
exit 1
fi
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
function setuptree() {
b=$( mktemp -d $buildpath/gh-ostXXXXXX ) || return 1
mkdir -p $b/gh-ost
mkdir -p $b/gh-ost/usr/bin
echo $b
}
buildpath=/tmp/gh-ost
target=gh-ost
timestamp=$(date "+%Y%m%d%H%M%S")
ldflags="-X main.AppVersion=${RELEASE_VERSION}"
export GO15VENDOREXPERIMENT=1
function build {
osname=$1
osshort=$2
GOOS=$3
GOARCH=$4
mkdir -p ${buildpath}
build macOS osx darwin amd64
build GNU/Linux linux linux amd64
echo "Binaries found in:"
ls -1 $buildpath/gh-ost-binary*${timestamp}.tar.gz
if ! go version | egrep -q 'go(1[.]9|1[.]1[0-9])' ; then
echo "go version is too low. Must use 1.9 or above"
exit 1
fi
echo "Building ${osname} binary"
export GOOS
export GOARCH
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
if [ $? -ne 0 ]; then
echo "Build failed for ${osname}"
exit 1
fi
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
if [ "$GOOS" == "linux" ] ; then
echo "Creating Distro full packages"
builddir=$(setuptree)
cp $buildpath/$target $builddir/gh-ost/usr/bin
cd $buildpath
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m shlomi-noach --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m shlomi-noach --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
fi
}
main() {
if [ -z "${RELEASE_VERSION}" ] ; then
RELEASE_VERSION=$(git describe --abbrev=0 --tags | tr -d 'v')
fi
if [ -z "${RELEASE_VERSION}" ] ; then
RELEASE_VERSION=$(cat RELEASE_VERSION)
fi
buildpath=/tmp/gh-ost-release
target=gh-ost
timestamp=$(date "+%Y%m%d%H%M%S")
ldflags="-X main.AppVersion=${RELEASE_VERSION}"
mkdir -p ${buildpath}
rm -rf ${buildpath:?}/*
build macOS osx darwin amd64
build GNU/Linux linux linux amd64
echo "Binaries found in:"
ls -1 $buildpath/gh-ost-binary*${timestamp}.tar.gz
}
main "$@"

View File

@ -2,6 +2,10 @@
A more in-depth discussion of various `gh-ost` command line flags: implementation, implication, use cases.
### aliyun-rds
Add this flag when executing on Aliyun RDS.
### allow-master-master
See [`--assume-master-host`](#assume-master-host).
@ -65,6 +69,10 @@ This is somewhat similar to a Nagios `n`-times test, where `n` in our case is al
Optional. Default is `safe`. See more discussion in [`cut-over`](cut-over.md)
### cut-over-lock-timeout-seconds
Default `3`. Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout).
### discard-foreign-keys
**Danger**: this flag will _silently_ discard any foreign keys existing on your table.
@ -82,7 +90,7 @@ The `--dml-batch-size` flag controls the size of the batched write. Allowed valu
Why is this behavior configurable? Different workloads have different characteristics. Some workloads have very large writes, such that aggregating even `50` writes into a transaction makes for a significant transaction size. On other workloads write rate is high such that one just can't allow for a hundred more syncs to disk per second. The default value of `10` is a modest compromise that should probably work very well for most workloads. Your mileage may vary.
Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `gh-ost` blocks or waits on writes. The batch size is an upper limit on transaction size, not a minimal one. If `gh-ost` doesn't have "enough" events in the pipe, it does not wait on the binary log, it just writes what it already has. This conveniently suggests that if write load is light enough for `gh-ost` to only see a few events in the binary log at a given time, then it is also light neough for `gh-ost` to apply a fraction of the batch size.
Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `gh-ost` blocks or waits on writes. The batch size is an upper limit on transaction size, not a minimal one. If `gh-ost` doesn't have "enough" events in the pipe, it does not wait on the binary log, it just writes what it already has. This conveniently suggests that if write load is light enough for `gh-ost` to only see a few events in the binary log at a given time, then it is also light enough for `gh-ost` to apply a fraction of the batch size.
### exact-rowcount
@ -103,6 +111,14 @@ While the ongoing estimated number of rows is still heuristic, it's almost exact
Without this parameter, migration is a _noop_: testing table creation and validity of migration, but not touching data.
### force-table-names
Table name prefix to be used on the temporary tables.
### gcp
Add this flag when executing on a 1st generation Google Cloud Platform (GCP).
### heartbeat-interval-millis
Default 100. See [`subsecond-lag`](subsecond-lag.md) for details.
@ -117,6 +133,10 @@ We think `gh-ost` should not take chances or make assumptions about the user's t
See [`initially-drop-ghost-table`](#initially-drop-ghost-table)
### initially-drop-socket-file
Default False. Should `gh-ost` forcibly delete an existing socket file. Be careful: this might drop the socket file of a running migration!
### max-lag-millis
On a replication topology, this is perhaps the most important migration throttling factor: the maximum lag allowed for migration to work. If lag exceeds this value, migration throttles.
@ -151,7 +171,7 @@ See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the
### skip-foreign-key-checks
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not referenece other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
### skip-renamed-columns
@ -161,6 +181,10 @@ See [`approve-renamed-columns`](#approve-renamed-columns)
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [`testing-on-replica`](testing-on-replica.md)
### test-on-replica-skip-replica-stop
Default `False`. When `--test-on-replica` is enabled, do not issue commands stop replication (requires `--test-on-replica`).
### throttle-control-replicas
Provide a command delimited list of replicas; `gh-ost` will throttle when any of the given replicas lag beyond [`--max-lag-millis`](#max-lag-millis). The list can be queried and updated dynamically via [interactive commands](interactive-commands.md)

View File

@ -69,6 +69,7 @@ The following variables are available on all hooks:
- `GH_OST_INSPECTED_HOST`
- `GH_OST_EXECUTING_HOST`
- `GH_OST_HOOKS_HINT` - copy of `--hooks-hint` value
- `GH_OST_DRY_RUN` - whether or not the `gh-ost` run is a dry run
The following variable are available on particular hooks:

View File

@ -43,7 +43,7 @@ Both interfaces may serve at the same time. Both respond to simple text command,
### Querying for data
For commands that accept an argumetn as value, pass `?` (question mark) to _get_ current value rather than _set_ a new one.
For commands that accept an argument as value, pass `?` (question mark) to _get_ current value rather than _set_ a new one.
### Examples

View File

@ -28,3 +28,9 @@ It is therefore unlikely that `gh-ost` will support this behavior.
Yes. TL;DR if running all on same replica/master, make sure to provide `--replica-server-id`. [Read more](cheatsheet.md#concurrent-migrations)
# Why
### Why Is the "Connect to Replica" mode preferred?
To avoid placing extra load on the master. `gh-ost` connects as a replication client. Each additional replica adds some load to the master.
To monitor replication lag from a replica. This makes the replication lag throttle, `--max-lag-millis`, more representative of the lag experienced by other replicas following the master (perhaps N levels deep in a tree of replicas).

View File

@ -1,4 +1,4 @@
`gh-ost` has been updated to work with Amazon RDS however due to GitHub not relying using AWS for databases, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
`gh-ost` has been updated to work with Amazon RDS however due to GitHub not using AWS for databases, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
# Amazon RDS

View File

@ -26,10 +26,6 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- Triggers are not supported. They may be supported in the future.
- MySQL 5.7 generated columns are not supported. They may be supported in the future.
- MySQL 5.7 `POINT` column type is not supported.
- MySQL 5.7 `JSON` columns are supported but not as part of `PRIMARY KEY`
- The two _before_ & _after_ tables must share a `PRIMARY KEY` or other `UNIQUE KEY`. This key will be used by `gh-ost` to iterate through the table rows when copying. [Read more](shared-key.md)
@ -43,12 +39,15 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- For example, you may not migrate `MyTable` if another table called `MYtable` exists in the same schema.
- Amazon RDS works, but has it's own [limitations](rds.md).
- Google Cloud SQL is currently not supported
- Google Cloud SQL works, `--gcp` flag required.
- Aliyun RDS works, `--aliyun-rds` flag required.
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)
- Master-master setup is only supported in active-passive setup. Active-active (where table is being written to on both masters concurrently) is unsupported. It may be supported in the future.
- If you have en `enum` field as part of your migration key (typically the `PRIMARY KEY`), migration performance will be degraded and potentially bad. [Read more](https://github.com/github/gh-ost/pull/277#issuecomment-254811520)
- If you have an `enum` field as part of your migration key (typically the `PRIMARY KEY`), migration performance will be degraded and potentially bad. [Read more](https://github.com/github/gh-ost/pull/277#issuecomment-254811520)
- Migrating a `FEDERATED` table is unsupported and is irrelevant to the problem `gh-ost` tackles.
- `ALTER TABLE ... RENAME TO some_other_name` is not supported (and you shouldn't use `gh-ost` for such a trivial operation).

View File

@ -1,12 +1,12 @@
# Shared key
A requirement for a migration to run is that the two _before_ and _after_ tables have a shared unique key. This is to elaborate and illustrate on the matter.
gh-ost requires for every migration that both the _before_ and _after_ versions of the table share the same unique not-null key columns. This page illustrates this rule.
### Introduction
Consider a classic, simple migration. The table is any normal:
Consider a simple migration, with a normal table,
```
```sql
CREATE TABLE tbl (
id bigint unsigned not null auto_increment,
data varchar(255),
@ -15,54 +15,72 @@ CREATE TABLE tbl (
)
```
And the migration is a simple `add column ts timestamp`.
In such migration there is no change in indexes, and in particular no change to any unique key, and specifically no change to the `PRIMARY KEY`. To run this migration, `gh-ost` would iterate the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` by order of `id`, and then apply binlog events onto `_tbl_gho`.
Applying the binlog events assumes the existence of a shared unique key. For example, an `UPDATE` statement in the binary log translate to a `REPLACE` statement which `gh-ost` applies to the _ghost_ table. Such statement expects to add or replace an existing row based on given row data. In particular, it would _replace_ an existing row if a unique key violation is met.
So `gh-ost` correlates `tbl` and `_tbl_gho` rows using a unique key. In the above example that would be the `PRIMARY KEY`.
### Rules
There must be a shared set of not-null columns for which there is a unique constraint in both the original table and the migration (_ghost_) table.
### Interpreting the rules
The same columns must be covered by a unique key in both tables. This doesn't have to be the `PRIMARY KEY`. This doesn't have to be a key of the same name.
Upon migration, `gh-ost` inspects both the original and _ghost_ table and attempts to find at least one such unique key (or rather, a set of columns) that is shared between the two. Typically this would just be the `PRIMARY KEY`, but sometimes you may change the `PRIMARY KEY` itself, in which case `gh-ost` will look for other options.
`gh-ost` expects unique keys where no `NULL` values are found, i.e. all columns covered by the unique key are defined as `NOT NULL`. This is implicitly true for `PRIMARY KEY`s. If no such key can be found, `gh-ost` bails out. In the event there is no such key, but you happen to _know_ your columns have no `NULL` values even though they're `NULL`-able, you may take responsibility and pass the `--allow-nullable-unique-key`. The migration will run well as long as no `NULL` values are found in the unique key's columns. Any actual `NULL`s may corrupt the migration.
### Examples: allowed and not allowed
and the migration `add column ts timestamp`. The _after_ table version would be:
```sql
CREATE TABLE tbl (
id bigint unsigned not null auto_increment,
data varchar(255),
more_data int,
ts timestamp,
PRIMARY KEY(id)
)
```
(This is also the definition of the _ghost_ table, except that that table would be called `_tbl_gho`).
In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tble` onto `_tbl_gho`.
The applying of the binlog events is what requires the shared unique key. For example, an `UPDATE` statement to `tbl` translates to a `REPLACE` statement which `gh-ost` applies to `_tbl_gho`. A `REPLACE` statement expects to insert or replace an existing row based on its row's values and the table's unique key constraints. In particular, if inserting that row would result in a unique key violation (e.g., a row with that primary key already exists), it would _replace_ that existing row with the new values.
So `gh-ost` correlates `tbl` and `_tbl_gho` rows one to one using a unique key. In the above example that would be the `PRIMARY KEY`.
### Interpreting the rule
The _before_ and _after_ versions of the table share the same unique not-null key, but:
- the key doesn't have to be the PRIMARY KEY
- the key can have a different name between the _before_ and _after_ versions (e.g., renamed via DROP INDEX and ADD INDEX) so long as it contains the exact same column(s)
At the start of the migration, `gh-ost` inspects both the original and _ghost_ table it created, and attempts to find at least one such unique key (or rather, a set of columns) that is shared between the two. Typically this would just be the `PRIMARY KEY`, but some tables don't have primary keys, or sometimes it is the primary key that is being modified by the migration. In these cases `gh-ost` will look for other options.
`gh-ost` expects unique keys where no `NULL` values are found, i.e. all columns contained in the unique key are defined as `NOT NULL`. This is implicitly true for primary keys. If no such key can be found, `gh-ost` bails out.
If the table contains a unique key with nullable columns, but you know your columns contain no `NULL` values, use the `--allow-nullable-unique-key` option. The migration will run well as long as no `NULL` values are found in the unique key's columns. **Any actual `NULL`s may corrupt the migration.**
### Examples: Allowed and Not Allowed
```sql
create table some_table (
id int auto_increment,
id int not null auto_increment,
ts timestamp,
name varchar(128) not null,
owner_id int not null,
loc_id int,
loc_id int not null,
primary key(id),
unique key name_uidx(name)
)
```
Following are examples of migrations that are _good to run_:
Note the two unique, not-null indexes: the primary key and `name_uidx`.
Allowed migrations:
- `add column i int`
- `add key owner_idx(owner_id)`
- `add unique key owner_name_idx(owner_id, name)` - though you need to make sure to not write conflicting rows while this migration runs
- `add key owner_idx (owner_id)`
- `add unique key owner_name_idx (owner_id, name)` - **be careful not to write conflicting rows while this migration runs**
- `drop key name_uidx` - `primary key` is shared between the tables
- `drop primary key, add primary key(owner_id, loc_id)` - `name_uidx` is shared between the tables and is used for migration
- `change id bigint unsigned` - the `'primary key` is used. The change of type still makes the `primary key` workable.
- `drop primary key, drop key name_uidx, create primary key(name), create unique key id_uidx(id)` - swapping the two keys. `gh-ost` is still happy because `id` is still unique in both tables. So is `name`.
- `drop primary key, add primary key(owner_id, loc_id)` - `name_uidx` is shared between the tables
- `change id bigint unsigned not null auto_increment` - the `primary key` changes datatype but not value, and can be used
- `drop primary key, drop key name_uidx, add primary key(name), add unique key id_uidx(id)` - swapping the two keys. Either `id` or `name` could be used
Not allowed:
- `drop primary key, drop key name_uidx` - the _ghost_ table has no unique key
- `drop primary key, drop key name_uidx, create primary key(name, owner_id)` - no shared columns to the unique keys on both tables. Even though `name` exists in the _ghost_ table's `primary key`, it is only part of the key and in itself does not guarantee uniqueness in the _ghost_ table.
Following are examples of migrations that _cannot run_:
### Workarounds
- `drop primary key, drop key name_uidx` - no unique key to _ghost_ table, so clearly cannot run
- `drop primary key, drop key name_uidx, create primary key(name, owner_id)` - no shared columns to both tables. Even though `name` exists in the _ghost_ table's `primary key`, it is only part of the key and in itself does not guarantee uniqueness in the _ghost_ table.
Also, you cannot run a migration on a table that doesn't have some form of `unique key` in the first place, such as `some_table (id int, ts timestamp)`
If you need to change your primary key or only not-null unique index to use different columns, you will want to do it as two separate migrations:
1. `ADD UNIQUE KEY temp_pk (temp_pk_column,...)`
1. `DROP PRIMARY KEY, DROP KEY temp_pk, ADD PRIMARY KEY (temp_pk_column,...)`

View File

@ -46,6 +46,14 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
An example query could be: `--throttle-query="select hour(now()) between 8 and 17"` which implies throttling auto-starts `8:00am` and migration auto-resumes at `18:00pm`.
#### HTTP Throttle
The `--throttle-http` flag allows for throttling via HTTP. Every 100ms `gh-ost` issues a `HEAD` request to the provided URL. If the response status code is not `200` throttling will kick in until a `200` response status code is returned.
If no URL is provided or the URL provided doesn't contain the scheme then the HTTP check will be disabled. For example `--throttle-http="http://1.2.3.4:6789/throttle"` will enable the HTTP check/throttling, but `--throttle-http="1.2.3.4:6789/throttle"` will not.
The URL can be queried and updated dynamically via [interactive interface](interactive-commands.md).
#### Manual control
In addition to the above, you are able to take control and throttle the operation any time you like.

View File

@ -24,15 +24,15 @@ Initial output lines may look like this:
2016-05-19 17:57:11 INFO connection validated on 127.0.0.1:3306
2016-05-19 17:57:11 INFO rotate to next log name: mysql-bin.002587
2016-05-19 17:57:11 INFO connection validated on 127.0.0.1:3306
2016-05-19 17:57:11 INFO Droppping table `mydb`.`_mytable_gst`
2016-05-19 17:57:11 INFO Dropping table `mydb`.`_mytable_gst`
2016-05-19 17:57:11 INFO Table dropped
2016-05-19 17:57:11 INFO Droppping table `mydb`.`_mytable_old`
2016-05-19 17:57:11 INFO Dropping table `mydb`.`_mytable_old`
2016-05-19 17:57:11 INFO Table dropped
2016-05-19 17:57:11 INFO Creating ghost table `mydb`.`_mytable_gst`
2016-05-19 17:57:11 INFO Ghost table created
2016-05-19 17:57:11 INFO Altering ghost table `mydb`.`_mytable_gst`
2016-05-19 17:57:11 INFO Ghost table altered
2016-05-19 17:57:11 INFO Droppping table `mydb`.`_mytable_osc`
2016-05-19 17:57:11 INFO Dropping table `mydb`.`_mytable_osc`
2016-05-19 17:57:11 INFO Table dropped
2016-05-19 17:57:11 INFO Creating changelog table `mydb`.`_mytable_osc`
2016-05-19 17:57:11 INFO Changelog table created

View File

@ -16,7 +16,7 @@ Use of triggers simplifies a lot of the flow in doing a live table migration, bu
Triggers are stored routines which are invoked on a per-row operation upon `INSERT`, `DELETE`, `UPDATE` on a table.
They were introduced in MySQL `5.0`.
A trigger may contain a set of queries, and these queries run in the same transaction space as the query that manipulates the table. This makes for an atomicy of both the original operation on the table and the trigger-invoked operations.
A trigger may contain a set of queries, and these queries run in the same transaction space as the query that manipulates the table. This makes for an atomicity of both the original operation on the table and the trigger-invoked operations.
### Triggers, overhead

View File

@ -14,6 +14,8 @@ import (
"sync/atomic"
"time"
"github.com/satori/go.uuid"
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
@ -26,23 +28,23 @@ type RowsEstimateMethod string
const (
TableStatusRowsEstimate RowsEstimateMethod = "TableStatusRowsEstimate"
ExplainRowsEstimate = "ExplainRowsEstimate"
CountRowsEstimate = "CountRowsEstimate"
ExplainRowsEstimate RowsEstimateMethod = "ExplainRowsEstimate"
CountRowsEstimate RowsEstimateMethod = "CountRowsEstimate"
)
type CutOver int
const (
CutOverAtomic CutOver = iota
CutOverTwoStep = iota
CutOverAtomic CutOver = iota
CutOverTwoStep
)
type ThrottleReasonHint string
const (
NoThrottleReasonHint ThrottleReasonHint = "NoThrottleReasonHint"
UserCommandThrottleReasonHint = "UserCommandThrottleReasonHint"
LeavingHibernationThrottleReasonHint = "LeavingHibernationThrottleReasonHint"
UserCommandThrottleReasonHint ThrottleReasonHint = "UserCommandThrottleReasonHint"
LeavingHibernationThrottleReasonHint ThrottleReasonHint = "LeavingHibernationThrottleReasonHint"
)
const (
@ -71,6 +73,8 @@ func NewThrottleCheckResult(throttle bool, reason string, reasonHint ThrottleRea
// MigrationContext has the general, global state of migration. It is used by
// all components throughout the migration process.
type MigrationContext struct {
Uuid string
DatabaseName string
OriginalTableName string
AlterStatement string
@ -87,6 +91,8 @@ type MigrationContext struct {
SkipRenamedColumns bool
IsTungsten bool
DiscardForeignKeys bool
AliyunRDS bool
GoogleCloudPlatform bool
config ContextConfig
configMutex *sync.Mutex
@ -114,6 +120,8 @@ type MigrationContext struct {
CriticalLoadHibernateSeconds int64
PostponeCutOverFlagFile string
CutOverLockTimeoutSeconds int64
CutOverExponentialBackoff bool
ExponentialBackoffMaxInterval int64
ForceNamedCutOverCommand bool
PanicFlagFile string
HooksPath string
@ -179,8 +187,10 @@ type MigrationContext struct {
OriginalTableColumnsOnApplier *sql.ColumnList
OriginalTableColumns *sql.ColumnList
OriginalTableVirtualColumns *sql.ColumnList
OriginalTableUniqueKeys [](*sql.UniqueKey)
GhostTableColumns *sql.ColumnList
GhostTableVirtualColumns *sql.ColumnList
GhostTableUniqueKeys [](*sql.UniqueKey)
UniqueKey *sql.UniqueKey
SharedColumns *sql.ColumnList
@ -195,8 +205,6 @@ type MigrationContext struct {
ForceTmpTableName string
recentBinlogCoordinates mysql.BinlogCoordinates
CanStopStreaming func() bool
}
type ContextConfig struct {
@ -212,14 +220,9 @@ type ContextConfig struct {
}
}
var context *MigrationContext
func init() {
context = newMigrationContext()
}
func newMigrationContext() *MigrationContext {
func NewMigrationContext() *MigrationContext {
return &MigrationContext{
Uuid: uuid.NewV4().String(),
defaultNumRetries: 60,
ChunkSize: 1000,
InspectorConnectionConfig: mysql.NewConnectionConfig(),
@ -239,11 +242,6 @@ func newMigrationContext() *MigrationContext {
}
}
// GetMigrationContext
func GetMigrationContext() *MigrationContext {
return context
}
func getSafeTableName(baseName string, suffix string) string {
name := fmt.Sprintf("_%s_%s", baseName, suffix)
if len(name) <= mysql.MaxTableNameLength {
@ -349,6 +347,14 @@ func (this *MigrationContext) SetCutOverLockTimeoutSeconds(timeoutSeconds int64)
return nil
}
func (this *MigrationContext) SetExponentialBackoffMaxInterval(intervalSeconds int64) error {
if intervalSeconds < 2 {
return fmt.Errorf("Minimal maximum interval is 2sec. Timeout remains at %d", this.ExponentialBackoffMaxInterval)
}
this.ExponentialBackoffMaxInterval = intervalSeconds
return nil
}
func (this *MigrationContext) SetDefaultNumRetries(retries int64) {
this.throttleMutex.Lock()
defer this.throttleMutex.Unlock()

View File

@ -19,27 +19,27 @@ func init() {
func TestGetTableNames(t *testing.T) {
{
context = newMigrationContext()
context := NewMigrationContext()
context.OriginalTableName = "some_table"
test.S(t).ExpectEquals(context.GetOldTableName(), "_some_table_del")
test.S(t).ExpectEquals(context.GetGhostTableName(), "_some_table_gho")
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_some_table_ghc")
}
{
context = newMigrationContext()
context := NewMigrationContext()
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890"
test.S(t).ExpectEquals(context.GetOldTableName(), "_a1234567890123456789012345678901234567890123456789012345678_del")
test.S(t).ExpectEquals(context.GetGhostTableName(), "_a1234567890123456789012345678901234567890123456789012345678_gho")
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_a1234567890123456789012345678901234567890123456789012345678_ghc")
}
{
context = newMigrationContext()
context := NewMigrationContext()
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890123"
oldTableName := context.GetOldTableName()
test.S(t).ExpectEquals(oldTableName, "_a1234567890123456789012345678901234567890123456789012345678_del")
}
{
context = newMigrationContext()
context := NewMigrationContext()
context.OriginalTableName = "a123456789012345678901234567890123456789012345678901234567890123"
context.TimestampOldTable = true
longForm := "Jan 2, 2006 at 3:04pm (MST)"
@ -48,7 +48,7 @@ func TestGetTableNames(t *testing.T) {
test.S(t).ExpectEquals(oldTableName, "_a1234567890123456789012345678901234567890123_20130203195400_del")
}
{
context = newMigrationContext()
context := NewMigrationContext()
context.OriginalTableName = "foo_bar_baz"
context.ForceTmpTableName = "tmp"
test.S(t).ExpectEquals(context.GetOldTableName(), "_tmp_del")

View File

@ -40,10 +40,9 @@ func FileExists(fileName string) bool {
func TouchFile(fileName string) error {
f, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE, 0755)
if err != nil {
return (err)
return err
}
defer f.Close()
return nil
return f.Close()
}
// StringContainsAll returns true if `s` contains all non empty given `substrings`
@ -64,17 +63,27 @@ func StringContainsAll(s string, substrings ...string) bool {
return nonEmptyStringsFound
}
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig) (string, error) {
query := `select @@global.port, @@global.version`
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext) (string, error) {
versionQuery := `select @@global.version`
var port, extraPort int
var version string
if err := db.QueryRow(query).Scan(&port, &version); err != nil {
if err := db.QueryRow(versionQuery).Scan(&version); err != nil {
return "", err
}
extraPortQuery := `select @@global.extra_port`
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil {
// swallow this error. not all servers support extra_port
}
// AliyunRDS set users port to "NULL", replace it by gh-ost param
// GCP set users port to "NULL", replace it by gh-ost param
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform {
port = connectionConfig.Key.Port
} else {
portQuery := `select @@global.port`
if err := db.QueryRow(portQuery).Scan(&port); err != nil {
return "", err
}
}
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
log.Infof("connection validated on %+v", connectionConfig.Key)

View File

@ -7,17 +7,18 @@ package binlog
import (
"fmt"
"github.com/github/gh-ost/go/sql"
"strings"
"github.com/github/gh-ost/go/sql"
)
type EventDML string
const (
NotDML EventDML = "NoDML"
InsertDML = "Insert"
UpdateDML = "Update"
DeleteDML = "Delete"
InsertDML EventDML = "Insert"
UpdateDML EventDML = "Update"
DeleteDML EventDML = "Delete"
)
func ToEventDML(description string) EventDML {

View File

@ -26,7 +26,7 @@ func NewBinlogEntry(logFile string, logPos uint64) *BinlogEntry {
return binlogEntry
}
// NewBinlogEntry creates an empty, ready to go BinlogEntry object
// NewBinlogEntryAt creates an empty, ready to go BinlogEntry object
func NewBinlogEntryAt(coordinates mysql.BinlogCoordinates) *BinlogEntry {
binlogEntry := &BinlogEntry{
Coordinates: coordinates,
@ -41,7 +41,7 @@ func (this *BinlogEntry) Duplicate() *BinlogEntry {
return binlogEntry
}
// Duplicate creates and returns a new binlog entry, with some of the attributes pre-assigned
// String() returns a string representation of this binlog entry
func (this *BinlogEntry) String() string {
return fmt.Sprintf("[BinlogEntry at %+v; dml:%+v]", this.Coordinates, this.DmlEvent)
}

View File

@ -26,28 +26,27 @@ type GoMySQLReader struct {
currentCoordinates mysql.BinlogCoordinates
currentCoordinatesMutex *sync.Mutex
LastAppliedRowsEventHint mysql.BinlogCoordinates
MigrationContext *base.MigrationContext
}
func NewGoMySQLReader(connectionConfig *mysql.ConnectionConfig) (binlogReader *GoMySQLReader, err error) {
func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *GoMySQLReader, err error) {
binlogReader = &GoMySQLReader{
connectionConfig: connectionConfig,
connectionConfig: migrationContext.InspectorConnectionConfig,
currentCoordinates: mysql.BinlogCoordinates{},
currentCoordinatesMutex: &sync.Mutex{},
binlogSyncer: nil,
binlogStreamer: nil,
MigrationContext: base.GetMigrationContext(),
}
serverId := uint32(binlogReader.MigrationContext.ReplicaServerId)
serverId := uint32(migrationContext.ReplicaServerId)
binlogSyncerConfig := &replication.BinlogSyncerConfig{
ServerID: serverId,
Flavor: "mysql",
Host: connectionConfig.Key.Hostname,
Port: uint16(connectionConfig.Key.Port),
User: connectionConfig.User,
Password: connectionConfig.Password,
binlogSyncerConfig := replication.BinlogSyncerConfig{
ServerID: serverId,
Flavor: "mysql",
Host: binlogReader.connectionConfig.Key.Hostname,
Port: uint16(binlogReader.connectionConfig.Key.Port),
User: binlogReader.connectionConfig.User,
Password: binlogReader.connectionConfig.Password,
UseDecimal: true,
}
binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
@ -57,12 +56,12 @@ func NewGoMySQLReader(connectionConfig *mysql.ConnectionConfig) (binlogReader *G
// ConnectBinlogStreamer
func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) {
if coordinates.IsEmpty() {
return log.Errorf("Emptry coordinates at ConnectBinlogStreamer()")
return log.Errorf("Empty coordinates at ConnectBinlogStreamer()")
}
this.currentCoordinates = coordinates
log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
// Start sync with sepcified binlog file and position
// Start sync with specified binlog file and position
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
return err
@ -113,8 +112,8 @@ func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEven
binlogEntry.DmlEvent.WhereColumnValues = sql.ToColumnValues(row)
}
}
// The channel will do the throttling. Whoever is reding from the channel
// decides whether action is taken sycnhronously (meaning we wait before
// The channel will do the throttling. Whoever is reading from the channel
// decides whether action is taken synchronously (meaning we wait before
// next iteration) or asynchronously (we keep pushing more events)
// In reality, reads will be synchronous
entriesChannel <- binlogEntry
@ -147,7 +146,7 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
defer this.currentCoordinatesMutex.Unlock()
this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
}()
log.Infof("rotate to next log name: %s", rotateEvent.NextLogName)
log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), rotateEvent.NextLogName)
} else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
return err
@ -160,10 +159,6 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
}
func (this *GoMySQLReader) Close() error {
// Historically there was a:
// this.binlogSyncer.Close()
// here. A new go-mysql version closes the binlog syncer connection independently.
// I will go against the sacred rules of comments and just leave this here.
// This is the year 2017. Let's see what year these comments get deleted.
this.binlogSyncer.Close()
return nil
}

View File

@ -43,10 +43,9 @@ func acceptSignals(migrationContext *base.MigrationContext) {
// main is the application's entry point. It will either spawn a CLI or HTTP interfaces.
func main() {
migrationContext := base.GetMigrationContext()
migrationContext := base.NewMigrationContext()
flag.StringVar(&migrationContext.InspectorConnectionConfig.Key.Hostname, "host", "127.0.0.1", "MySQL hostname (preferably a replica, not the master)")
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unabel to determine the master")
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unable to determine the master")
flag.IntVar(&migrationContext.InspectorConnectionConfig.Key.Port, "port", 3306, "MySQL port (preferably a replica, not the master)")
flag.StringVar(&migrationContext.CliUser, "user", "", "MySQL user")
flag.StringVar(&migrationContext.CliPassword, "password", "", "MySQL password")
@ -68,6 +67,8 @@ func main() {
flag.BoolVar(&migrationContext.IsTungsten, "tungsten", false, "explicitly let gh-ost know that you are running on a tungsten-replication based topology (you are likely to also provide --assume-master-host)")
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
@ -83,6 +84,8 @@ func main() {
flag.BoolVar(&migrationContext.SwitchToRowBinlogFormat, "switch-to-rbr", false, "let this tool automatically switch binary log format to 'ROW' on the replica, if needed. The format will NOT be switched back. I'm too scared to do that, and wish to protect you if you happen to execute another migration while this one is running")
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').")
exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.")
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
@ -238,11 +241,14 @@ func main() {
if err := migrationContext.SetCutOverLockTimeoutSeconds(*cutOverLockTimeoutSeconds); err != nil {
log.Errore(err)
}
if err := migrationContext.SetExponentialBackoffMaxInterval(*exponentialBackoffMaxInterval); err != nil {
log.Errore(err)
}
log.Infof("starting gh-ost %+v", AppVersion)
acceptSignals(migrationContext)
migrator := logic.NewMigrator()
migrator := logic.NewMigrator(migrationContext)
err := migrator.Migrate()
if err != nil {
migrator.ExecOnFailureHook()

View File

@ -24,50 +24,77 @@ const (
atomicCutOverMagicHint = "ghost-cut-over-sentry"
)
type dmlBuildResult struct {
query string
args []interface{}
rowsDelta int64
err error
}
func newDmlBuildResult(query string, args []interface{}, rowsDelta int64, err error) *dmlBuildResult {
return &dmlBuildResult{
query: query,
args: args,
rowsDelta: rowsDelta,
err: err,
}
}
func newDmlBuildResultError(err error) *dmlBuildResult {
return &dmlBuildResult{
err: err,
}
}
// Applier connects and writes the the applier-server, which is the server where migration
// happens. This is typically the master, but could be a replica when `--test-on-replica` or
// `--execute-on-replica` are given.
// Applier is the one to actually write row data and apply binlog events onto the ghost table.
// It is where the ghost & changelog tables get created. It is where the cut-over phase happens.
type Applier struct {
connectionConfig *mysql.ConnectionConfig
db *gosql.DB
singletonDB *gosql.DB
migrationContext *base.MigrationContext
connectionConfig *mysql.ConnectionConfig
db *gosql.DB
singletonDB *gosql.DB
migrationContext *base.MigrationContext
finishedMigrating int64
}
func NewApplier() *Applier {
func NewApplier(migrationContext *base.MigrationContext) *Applier {
return &Applier{
connectionConfig: base.GetMigrationContext().ApplierConnectionConfig,
migrationContext: base.GetMigrationContext(),
connectionConfig: migrationContext.ApplierConnectionConfig,
migrationContext: migrationContext,
finishedMigrating: 0,
}
}
func (this *Applier) InitDBConnections() (err error) {
applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
if this.db, _, err = sqlutils.GetDB(applierUri); err != nil {
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
return err
}
singletonApplierUri := fmt.Sprintf("%s?timeout=0", applierUri)
if this.singletonDB, _, err = sqlutils.GetDB(singletonApplierUri); err != nil {
if this.singletonDB, _, err = mysql.GetDB(this.migrationContext.Uuid, singletonApplierUri); err != nil {
return err
}
this.singletonDB.SetMaxOpenConns(1)
version, err := base.ValidateConnection(this.db, this.connectionConfig)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
if err != nil {
return err
}
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig); err != nil {
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext); err != nil {
return err
}
this.migrationContext.ApplierMySQLVersion = version
if err := this.validateAndReadTimeZone(); err != nil {
return err
}
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
this.connectionConfig.ImpliedKey = impliedKey
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
this.connectionConfig.ImpliedKey = impliedKey
}
}
if err := this.readTableColumns(); err != nil {
return err
@ -90,7 +117,7 @@ func (this *Applier) validateAndReadTimeZone() error {
// readTableColumns reads table columns on applier
func (this *Applier) readTableColumns() (err error) {
log.Infof("Examining table structure on applier")
this.migrationContext.OriginalTableColumnsOnApplier, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName)
this.migrationContext.OriginalTableColumnsOnApplier, _, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName)
if err != nil {
return err
}
@ -99,7 +126,6 @@ func (this *Applier) readTableColumns() (err error) {
// showTableStatus returns the output of `show table status like '...'` command
func (this *Applier) showTableStatus(tableName string) (rowMap sqlutils.RowMap) {
rowMap = nil
query := fmt.Sprintf(`show /* gh-ost */ table status from %s like '%s'`, sql.EscapeName(this.migrationContext.DatabaseName), tableName)
sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
rowMap = m
@ -213,7 +239,7 @@ func (this *Applier) dropTable(tableName string) error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(tableName),
)
log.Infof("Droppping table %s.%s",
log.Infof("Dropping table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(tableName),
)
@ -263,7 +289,7 @@ func (this *Applier) WriteChangelog(hint, value string) (string, error) {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
)
_, err := sqlutils.Exec(this.db, query, explicitId, hint, value)
_, err := sqlutils.ExecNoPrepare(this.db, query, explicitId, hint, value)
return hint, err
}
@ -298,6 +324,9 @@ func (this *Applier) InitiateHeartbeat() {
heartbeatTick := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
for range heartbeatTick {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
// Generally speaking, we would issue a goroutine, but I'd actually rather
// have this block the loop rather than spam the master in the event something
// goes wrong
@ -380,7 +409,7 @@ func (this *Applier) ReadMigrationRangeValues() error {
// CalculateNextIterationRangeEndValues reads the next-iteration-range-end unique key values,
// which will be used for copying the next chunk of rows. Ir returns "false" if there is
// no further chunk to work through, i.e. we're past the last chunk and are done with
// itrating the range (and this done with copying row chunks)
// iterating the range (and this done with copying row chunks)
func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange bool, err error) {
this.migrationContext.MigrationIterationRangeMinValues = this.migrationContext.MigrationIterationRangeMaxValues
if this.migrationContext.MigrationIterationRangeMinValues == nil {
@ -452,6 +481,7 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
if err != nil {
return nil, err
}
defer tx.Rollback()
sessionQuery := fmt.Sprintf(`SET
SESSION time_zone = '%s',
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
@ -899,79 +929,107 @@ func (this *Applier) ShowStatusVariable(variableName string) (result int64, err
return result, nil
}
// updateModifiesUniqueKeyColumns checks whether a UPDATE DML event actually
// modifies values of the migration's unique key (the iterated key). This will call
// for special handling.
func (this *Applier) updateModifiesUniqueKeyColumns(dmlEvent *binlog.BinlogDMLEvent) (modifiedColumn string, isModified bool) {
for _, column := range this.migrationContext.UniqueKey.Columns.Columns() {
tableOrdinal := this.migrationContext.OriginalTableColumns.Ordinals[column.Name]
whereColumnValue := dmlEvent.WhereColumnValues.AbstractValues()[tableOrdinal]
newColumnValue := dmlEvent.NewColumnValues.AbstractValues()[tableOrdinal]
if newColumnValue != whereColumnValue {
return column.Name, true
}
}
return "", false
}
// buildDMLEventQuery creates a query to operate on the ghost table, based on an intercepted binlog
// event entry on the original table.
func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (query string, args []interface{}, rowsDelta int64, err error) {
func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (results [](*dmlBuildResult)) {
switch dmlEvent.DML {
case binlog.DeleteDML:
{
query, uniqueKeyArgs, err := sql.BuildDMLDeleteQuery(dmlEvent.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.OriginalTableColumns, &this.migrationContext.UniqueKey.Columns, dmlEvent.WhereColumnValues.AbstractValues())
return query, uniqueKeyArgs, -1, err
return append(results, newDmlBuildResult(query, uniqueKeyArgs, -1, err))
}
case binlog.InsertDML:
{
query, sharedArgs, err := sql.BuildDMLInsertQuery(dmlEvent.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns, dmlEvent.NewColumnValues.AbstractValues())
return query, sharedArgs, 1, err
return append(results, newDmlBuildResult(query, sharedArgs, 1, err))
}
case binlog.UpdateDML:
{
if _, isModified := this.updateModifiesUniqueKeyColumns(dmlEvent); isModified {
dmlEvent.DML = binlog.DeleteDML
results = append(results, this.buildDMLEventQuery(dmlEvent)...)
dmlEvent.DML = binlog.InsertDML
results = append(results, this.buildDMLEventQuery(dmlEvent)...)
return results
}
query, sharedArgs, uniqueKeyArgs, err := sql.BuildDMLUpdateQuery(dmlEvent.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns, &this.migrationContext.UniqueKey.Columns, dmlEvent.NewColumnValues.AbstractValues(), dmlEvent.WhereColumnValues.AbstractValues())
args := sqlutils.Args()
args = append(args, sharedArgs...)
args = append(args, uniqueKeyArgs...)
return query, args, 0, err
return append(results, newDmlBuildResult(query, args, 0, err))
}
}
return "", args, 0, fmt.Errorf("Unknown dml event type: %+v", dmlEvent.DML)
return append(results, newDmlBuildResultError(fmt.Errorf("Unknown dml event type: %+v", dmlEvent.DML)))
}
// ApplyDMLEventQuery writes an entry to the ghost table, in response to an intercepted
// original-table binlog event
func (this *Applier) ApplyDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) error {
query, args, rowDelta, err := this.buildDMLEventQuery(dmlEvent)
if err != nil {
return err
}
// TODO The below is in preparation for transactional writes on the ghost tables.
// Such writes would be, for example:
// - prepended with sql_mode setup
// - prepended with time zone setup
// - prepended with SET SQL_LOG_BIN=0
// - prepended with SET FK_CHECKS=0
// etc.
//
// a known problem: https://github.com/golang/go/issues/9373 -- bitint unsigned values, not supported in database/sql
// is solved by silently converting unsigned bigints to string values.
//
err = func() error {
tx, err := this.db.Begin()
if err != nil {
return err
for _, buildResult := range this.buildDMLEventQuery(dmlEvent) {
if buildResult.err != nil {
return buildResult.err
}
sessionQuery := `SET
// TODO The below is in preparation for transactional writes on the ghost tables.
// Such writes would be, for example:
// - prepended with sql_mode setup
// - prepended with time zone setup
// - prepended with SET SQL_LOG_BIN=0
// - prepended with SET FK_CHECKS=0
// etc.
//
// a known problem: https://github.com/golang/go/issues/9373 -- bitint unsigned values, not supported in database/sql
// is solved by silently converting unsigned bigints to string values.
//
err := func() error {
tx, err := this.db.Begin()
if err != nil {
return err
}
rollback := func(err error) error {
tx.Rollback()
return err
}
sessionQuery := `SET
SESSION time_zone = '+00:00',
sql_mode = CONCAT(@@session.sql_mode, ',STRICT_ALL_TABLES')
`
if _, err := tx.Exec(sessionQuery); err != nil {
return err
}
if _, err := tx.Exec(query, args...); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}()
if _, err := tx.Exec(sessionQuery); err != nil {
return rollback(err)
}
if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil {
return rollback(err)
}
if err := tx.Commit(); err != nil {
return err
}
return nil
}()
if err != nil {
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), query, args)
return log.Errore(err)
}
// no error
atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, 1)
if this.migrationContext.CountTableRows {
atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, rowDelta)
if err != nil {
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args)
return log.Errore(err)
}
// no error
atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, 1)
if this.migrationContext.CountTableRows {
atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, buildResult.rowsDelta)
}
}
return nil
}
@ -1000,15 +1058,16 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
return rollback(err)
}
for _, dmlEvent := range dmlEvents {
query, args, rowDelta, err := this.buildDMLEventQuery(dmlEvent)
if err != nil {
return rollback(err)
for _, buildResult := range this.buildDMLEventQuery(dmlEvent) {
if buildResult.err != nil {
return rollback(buildResult.err)
}
if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil {
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args)
return rollback(err)
}
totalDelta += buildResult.rowsDelta
}
if _, err := tx.Exec(query, args...); err != nil {
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), query, args)
return rollback(err)
}
totalDelta += rowDelta
}
if err := tx.Commit(); err != nil {
return err
@ -1027,3 +1086,10 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents))
return nil
}
func (this *Applier) Teardown() {
log.Debugf("Tearing down...")
this.db.Close()
this.singletonDB.Close()
atomic.StoreInt64(&this.finishedMigrating, 1)
}

View File

@ -37,9 +37,9 @@ type HooksExecutor struct {
migrationContext *base.MigrationContext
}
func NewHooksExecutor() *HooksExecutor {
func NewHooksExecutor(migrationContext *base.MigrationContext) *HooksExecutor {
return &HooksExecutor{
migrationContext: base.GetMigrationContext(),
migrationContext: migrationContext,
}
}
@ -64,6 +64,7 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
for _, variable := range extraVariables {
env = append(env, variable)

View File

@ -26,30 +26,39 @@ const startSlavePostWaitMilliseconds = 500 * time.Millisecond
// Inspector reads data from the read-MySQL-server (typically a replica, but can be the master)
// It is used for gaining initial status and structure, and later also follow up on progress and changelog
type Inspector struct {
connectionConfig *mysql.ConnectionConfig
db *gosql.DB
migrationContext *base.MigrationContext
connectionConfig *mysql.ConnectionConfig
db *gosql.DB
informationSchemaDb *gosql.DB
migrationContext *base.MigrationContext
}
func NewInspector() *Inspector {
func NewInspector(migrationContext *base.MigrationContext) *Inspector {
return &Inspector{
connectionConfig: base.GetMigrationContext().InspectorConnectionConfig,
migrationContext: base.GetMigrationContext(),
connectionConfig: migrationContext.InspectorConnectionConfig,
migrationContext: migrationContext,
}
}
func (this *Inspector) InitDBConnections() (err error) {
inspectorUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
if this.db, _, err = sqlutils.GetDB(inspectorUri); err != nil {
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, inspectorUri); err != nil {
return err
}
informationSchemaUri := this.connectionConfig.GetDBUri("information_schema")
if this.informationSchemaDb, _, err = mysql.GetDB(this.migrationContext.Uuid, informationSchemaUri); err != nil {
return err
}
if err := this.validateConnection(); err != nil {
return err
}
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
this.connectionConfig.ImpliedKey = impliedKey
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
this.connectionConfig.ImpliedKey = impliedKey
}
}
if err := this.validateGrants(); err != nil {
return err
@ -80,24 +89,24 @@ func (this *Inspector) ValidateOriginalTable() (err error) {
return nil
}
func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) {
func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, virtualColumns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) {
uniqueKeys, err = this.getCandidateUniqueKeys(tableName)
if err != nil {
return columns, uniqueKeys, err
return columns, virtualColumns, uniqueKeys, err
}
if len(uniqueKeys) == 0 {
return columns, uniqueKeys, fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out")
return columns, virtualColumns, uniqueKeys, fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out")
}
columns, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, tableName)
columns, virtualColumns, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, tableName)
if err != nil {
return columns, uniqueKeys, err
return columns, virtualColumns, uniqueKeys, err
}
return columns, uniqueKeys, nil
return columns, virtualColumns, uniqueKeys, nil
}
func (this *Inspector) InspectOriginalTable() (err error) {
this.migrationContext.OriginalTableColumns, this.migrationContext.OriginalTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.OriginalTableName)
this.migrationContext.OriginalTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.OriginalTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.OriginalTableName)
if err != nil {
return err
}
@ -113,7 +122,7 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
return fmt.Errorf("It seems like table structure is not identical between master and replica. This scenario is not supported.")
}
this.migrationContext.GhostTableColumns, this.migrationContext.GhostTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.GetGhostTableName())
this.migrationContext.GhostTableColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.GhostTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.GetGhostTableName())
if err != nil {
return err
}
@ -156,21 +165,15 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
return fmt.Errorf("Chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", this.migrationContext.UniqueKey)
}
}
if !this.migrationContext.UniqueKey.IsPrimary() {
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
return fmt.Errorf("binlog_row_image is '%s' and chosen key is %s, which is not the primary key. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.migrationContext.OriginalBinlogRowImage, this.migrationContext.UniqueKey)
}
}
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.ColumnRenameMap)
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.ColumnRenameMap)
log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
// By fact that a non-empty unique key exists we also know the shared columns are non-empty
// This additional step looks at which columns are unsigned. We could have merged this within
// the `getTableColumns()` function, but it's a later patch and introduces some complexity; I feel
// comfortable in doing this as a separate step.
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns)
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &this.migrationContext.UniqueKey.Columns)
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, &this.migrationContext.UniqueKey.Columns)
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.GhostTableColumns, this.migrationContext.MappedSharedColumns)
for i := range this.migrationContext.SharedColumns.Columns() {
@ -196,13 +199,13 @@ func (this *Inspector) validateConnection() error {
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
}
version, err := base.ValidateConnection(this.db, this.connectionConfig)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
this.migrationContext.InspectorMySQLVersion = version
return err
}
// validateGrants verifies the user by which we're executing has necessary grants
// to do its thang.
// to do its thing.
func (this *Inspector) validateGrants() error {
query := `show /* gh-ost */ grants for current_user()`
foundAll := false
@ -229,6 +232,9 @@ func (this *Inspector) validateGrants() error {
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", this.migrationContext.DatabaseName)) {
foundDBAll = true
}
if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", strings.Replace(this.migrationContext.DatabaseName, "_", "\\_", -1))) {
foundDBAll = true
}
if base.StringContainsAll(grant, `ALTER`, `CREATE`, `DELETE`, `DROP`, `INDEX`, `INSERT`, `LOCK TABLES`, `SELECT`, `TRIGGER`, `UPDATE`, ` ON *.*`) {
foundDBAll = true
}
@ -261,7 +267,7 @@ func (this *Inspector) validateGrants() error {
// restartReplication is required so that we are _certain_ the binlog format and
// row image settings have actually been applied to the replication thread.
// It is entriely possible, for example, that the replication is using 'STATEMENT'
// It is entirely possible, for example, that the replication is using 'STATEMENT'
// binlog format even as the variable says 'ROW'
func (this *Inspector) restartReplication() error {
log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
@ -349,6 +355,9 @@ func (this *Inspector) validateBinlogs() error {
this.migrationContext.OriginalBinlogRowImage = "FULL"
}
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
return fmt.Errorf("%s:%d has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogRowImage)
}
log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return nil
@ -377,7 +386,7 @@ func (this *Inspector) validateLogSlaveUpdates() error {
}
if this.migrationContext.InspectorIsAlsoApplier() {
log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return nil
}
@ -545,44 +554,35 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
columnName := m.GetString("COLUMN_NAME")
columnType := m.GetString("COLUMN_TYPE")
if strings.Contains(columnType, "unsigned") {
for _, columnsList := range columnsLists {
columnsList.SetUnsigned(columnName)
for _, columnsList := range columnsLists {
column := columnsList.GetColumn(columnName)
if column == nil {
continue
}
}
if strings.Contains(columnType, "mediumint") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.MediumIntColumnType
if strings.Contains(columnType, "unsigned") {
column.IsUnsigned = true
}
}
if strings.Contains(columnType, "timestamp") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.TimestampColumnType
if strings.Contains(columnType, "mediumint") {
column.Type = sql.MediumIntColumnType
}
}
if strings.Contains(columnType, "datetime") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.DateTimeColumnType
if strings.Contains(columnType, "timestamp") {
column.Type = sql.TimestampColumnType
}
}
if strings.Contains(columnType, "json") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.JSONColumnType
if strings.Contains(columnType, "datetime") {
column.Type = sql.DateTimeColumnType
}
}
if strings.Contains(columnType, "float") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.FloatColumnType
if strings.Contains(columnType, "json") {
column.Type = sql.JSONColumnType
}
}
if strings.HasPrefix(columnType, "enum") {
for _, columnsList := range columnsLists {
columnsList.GetColumn(columnName).Type = sql.EnumColumnType
if strings.Contains(columnType, "float") {
column.Type = sql.FloatColumnType
}
}
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
for _, columnsList := range columnsLists {
columnsList.SetCharset(columnName, charset)
if strings.HasPrefix(columnType, "enum") {
column.Type = sql.EnumColumnType
}
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
column.Charset = charset
}
}
return nil
@ -622,8 +622,6 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*
GROUP BY TABLE_SCHEMA, TABLE_NAME, INDEX_NAME
) AS UNIQUES
ON (
COLUMNS.TABLE_SCHEMA = UNIQUES.TABLE_SCHEMA AND
COLUMNS.TABLE_NAME = UNIQUES.TABLE_NAME AND
COLUMNS.COLUMN_NAME = UNIQUES.FIRST_COLUMN_NAME
)
WHERE
@ -685,21 +683,34 @@ func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [
}
// getSharedColumns returns the intersection of two lists of columns in same order as the first list
func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) {
func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, originalVirtualColumns, ghostVirtualColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) {
sharedColumnNames := []string{}
for _, originalColumn := range originalColumns.Names() {
isSharedColumn := false
for _, ghostColumn := range ghostColumns.Names() {
if strings.EqualFold(originalColumn, ghostColumn) {
isSharedColumn = true
break
}
if strings.EqualFold(columnRenameMap[originalColumn], ghostColumn) {
isSharedColumn = true
break
}
}
for droppedColumn := range this.migrationContext.DroppedColumnsMap {
if strings.EqualFold(originalColumn, droppedColumn) {
isSharedColumn = false
break
}
}
for _, virtualColumn := range originalVirtualColumns.Names() {
if strings.EqualFold(originalColumn, virtualColumn) {
isSharedColumn = false
}
}
for _, virtualColumn := range ghostVirtualColumns.Names() {
if strings.EqualFold(originalColumn, virtualColumn) {
isSharedColumn = false
}
}
if isSharedColumn {
@ -748,8 +759,14 @@ func (this *Inspector) getMasterConnectionConfig() (applierConfig *mysql.Connect
}
func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err error) {
replicationLag, err = mysql.GetReplicationLag(
this.migrationContext.InspectorConnectionConfig,
replicationLag, err = mysql.GetReplicationLagFromSlaveStatus(
this.informationSchemaDb,
)
return replicationLag, err
}
func (this *Inspector) Teardown() {
this.db.Close()
this.informationSchemaDb.Close()
return
}

View File

@ -78,16 +78,18 @@ type Migrator struct {
rowCopyCompleteFlag int64
// copyRowsQueue should not be buffered; if buffered some non-damaging but
// excessive work happens at the end of the iteration as new copy-jobs arrive befroe realizing the copy is complete
// excessive work happens at the end of the iteration as new copy-jobs arrive before realizing the copy is complete
copyRowsQueue chan tableWriteFunc
applyEventsQueue chan *applyEventStruct
handledChangelogStates map[string]bool
finishedMigrating int64
}
func NewMigrator() *Migrator {
func NewMigrator(context *base.MigrationContext) *Migrator {
migrator := &Migrator{
migrationContext: base.GetMigrationContext(),
migrationContext: context,
parser: sql.NewParser(),
ghostTableMigrated: make(chan bool),
firstThrottlingCollected: make(chan bool, 3),
@ -97,13 +99,14 @@ func NewMigrator() *Migrator {
copyRowsQueue: make(chan tableWriteFunc),
applyEventsQueue: make(chan *applyEventStruct, base.MaxEventsBatchSize),
handledChangelogStates: make(map[string]bool),
finishedMigrating: 0,
}
return migrator
}
// initiateHooksExecutor
func (this *Migrator) initiateHooksExecutor() (err error) {
this.hooksExecutor = NewHooksExecutor()
this.hooksExecutor = NewHooksExecutor(this.migrationContext)
if err := this.hooksExecutor.initHooks(); err != nil {
return err
}
@ -146,6 +149,34 @@ func (this *Migrator) retryOperation(operation func() error, notFatalHint ...boo
return err
}
// `retryOperationWithExponentialBackoff` attempts running given function, waiting 2^(n-1)
// seconds between each attempt, where `n` is the running number of attempts. Exits
// as soon as the function returns with non-error, or as soon as `MaxRetries`
// attempts are reached. Wait intervals between attempts obey a maximum of
// `ExponentialBackoffMaxInterval`.
func (this *Migrator) retryOperationWithExponentialBackoff(operation func() error, notFatalHint ...bool) (err error) {
var interval int64
maxRetries := int(this.migrationContext.MaxRetries())
maxInterval := this.migrationContext.ExponentialBackoffMaxInterval
for i := 0; i < maxRetries; i++ {
newInterval := int64(math.Exp2(float64(i - 1)))
if newInterval <= maxInterval {
interval = newInterval
}
if i != 0 {
time.Sleep(time.Duration(interval) * time.Second)
}
err = operation()
if err == nil {
return nil
}
}
if len(notFatalHint) == 0 {
this.migrationContext.PanicAbort <- err
}
return err
}
// executeAndThrottleOnError executes a given function. If it errors, it
// throttles.
func (this *Migrator) executeAndThrottleOnError(operation func() error) (err error) {
@ -179,7 +210,7 @@ func (this *Migrator) canStopStreaming() bool {
// onChangelogStateEvent is called when a binlog event operation on the changelog table is intercepted.
func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
// Hey, I created the changlog table, I know the type of columns it has!
// Hey, I created the changelog table, I know the type of columns it has!
if hint := dmlEvent.NewColumnValues.StringColumn(2); hint != "state" {
return nil
}
@ -224,7 +255,11 @@ func (this *Migrator) listenOnPanicAbort() {
// validateStatement validates the `alter` statement meets criteria.
// At this time this means:
// - column renames are approved
// - no table rename allowed
func (this *Migrator) validateStatement() (err error) {
if this.parser.IsRenameTable() {
return fmt.Errorf("ALTER statement seems to RENAME the table. This is not supported, and you should run your RENAME outside gh-ost.")
}
if this.parser.HasNonTrivialRenames() && !this.migrationContext.SkipRenamedColumns {
this.migrationContext.ColumnRenameMap = this.parser.GetNonTrivialRenames()
if !this.migrationContext.ApproveRenamedColumns {
@ -299,6 +334,11 @@ func (this *Migrator) Migrate() (err error) {
if err := this.validateStatement(); err != nil {
return err
}
// After this point, we'll need to teardown anything that's been started
// so we don't leave things hanging around
defer this.teardown()
if err := this.initiateInspector(); err != nil {
return err
}
@ -364,7 +404,13 @@ func (this *Migrator) Migrate() (err error) {
if err := this.hooksExecutor.onBeforeCutOver(); err != nil {
return err
}
if err := this.retryOperation(this.cutOver); err != nil {
var retrier func(func() error, ...bool) error
if this.migrationContext.CutOverExponentialBackoff {
retrier = this.retryOperationWithExponentialBackoff
} else {
retrier = this.retryOperation
}
if err := retrier(this.cutOver); err != nil {
return err
}
atomic.StoreInt64(&this.migrationContext.CutOverCompleteFlag, 1)
@ -387,7 +433,7 @@ func (this *Migrator) ExecOnFailureHook() (err error) {
func (this *Migrator) handleCutOverResult(cutOverError error) (err error) {
if this.migrationContext.TestOnReplica {
// We're merly testing, we don't want to keep this state. Rollback the renames as possible
// We're merely testing, we don't want to keep this state. Rollback the renames as possible
this.applier.RenameTablesRollback()
}
if cutOverError == nil {
@ -653,7 +699,7 @@ func (this *Migrator) initiateServer() (err error) {
var f printStatusFunc = func(rule PrintStatusRule, writer io.Writer) {
this.printStatus(rule, writer)
}
this.server = NewServer(this.hooksExecutor, f)
this.server = NewServer(this.migrationContext, this.hooksExecutor, f)
if err := this.server.BindSocketFile(); err != nil {
return err
}
@ -673,7 +719,7 @@ func (this *Migrator) initiateServer() (err error) {
// - heartbeat
// When `--allow-on-master` is supplied, the inspector is actually the master.
func (this *Migrator) initiateInspector() (err error) {
this.inspector = NewInspector()
this.inspector = NewInspector(this.migrationContext)
if err := this.inspector.InitDBConnections(); err != nil {
return err
}
@ -733,6 +779,9 @@ func (this *Migrator) initiateStatus() error {
this.printStatus(ForcePrintStatusAndHintRule)
statusTick := time.Tick(1 * time.Second)
for range statusTick {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return nil
}
go this.printStatus(HeuristicPrintStatusRule)
}
@ -742,7 +791,7 @@ func (this *Migrator) initiateStatus() error {
// printMigrationStatusHint prints a detailed configuration dump, that is useful
// to keep in mind; such as the name of migrated table, throttle params etc.
// This gets printed at beginning and end of migration, every 10 minutes throughout
// migration, and as reponse to the "status" interactive command.
// migration, and as response to the "status" interactive command.
func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
w := io.MultiWriter(writers...)
fmt.Fprintln(w, fmt.Sprintf("# Migrating %s.%s; Ghost table is %s.%s",
@ -820,7 +869,7 @@ func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
}
}
// printStatus prints the prgoress status, and optionally additionally detailed
// printStatus prints the progress status, and optionally additionally detailed
// dump of configuration.
// `rule` indicates the type of output expected.
// By default the status is written to standard output, but other writers can
@ -932,7 +981,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
// initiateStreaming begins streaming of binary log events and registers listeners for such events
func (this *Migrator) initiateStreaming() error {
this.eventsStreamer = NewEventsStreamer()
this.eventsStreamer = NewEventsStreamer(this.migrationContext)
if err := this.eventsStreamer.InitDBConnections(); err != nil {
return err
}
@ -957,6 +1006,9 @@ func (this *Migrator) initiateStreaming() error {
go func() {
ticker := time.Tick(1 * time.Second)
for range ticker {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
this.migrationContext.SetRecentBinlogCoordinates(*this.eventsStreamer.GetCurrentBinlogCoordinates())
}
}()
@ -980,7 +1032,7 @@ func (this *Migrator) addDMLEventsListener() error {
// initiateThrottler kicks in the throttling collection and the throttling checks.
func (this *Migrator) initiateThrottler() error {
this.throttler = NewThrottler(this.applier, this.inspector)
this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector)
go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected)
log.Infof("Waiting for first throttle metrics to be collected")
@ -994,7 +1046,7 @@ func (this *Migrator) initiateThrottler() error {
}
func (this *Migrator) initiateApplier() error {
this.applier = NewApplier()
this.applier = NewApplier(this.migrationContext)
if err := this.applier.InitDBConnections(); err != nil {
return err
}
@ -1035,24 +1087,33 @@ func (this *Migrator) iterateChunks() error {
log.Debugf("No rows found in table. Rowcopy will be implicitly empty")
return terminateRowIteration(nil)
}
var hasNoFurtherRangeFlag int64
// Iterate per chunk:
for {
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 {
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 {
// Done
// There's another such check down the line
return nil
}
copyRowsFunc := func() error {
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 {
if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 {
// Done.
// There's another such check down the line
return nil
}
hasFurtherRange, err := this.applier.CalculateNextIterationRangeEndValues()
if err != nil {
// When hasFurtherRange is false, original table might be write locked and CalculateNextIterationRangeEndValues would hangs forever
hasFurtherRange := false
if err := this.retryOperation(func() (e error) {
hasFurtherRange, e = this.applier.CalculateNextIterationRangeEndValues()
return e
}); err != nil {
return terminateRowIteration(err)
}
if !hasFurtherRange {
atomic.StoreInt64(&hasNoFurtherRangeFlag, 1)
return terminateRowIteration(nil)
}
// Copy task:
@ -1070,7 +1131,7 @@ func (this *Migrator) iterateChunks() error {
}
_, rowsAffected, _, err := this.applier.ApplyIterationInsertQuery()
if err != nil {
return terminateRowIteration(err)
return err // wrapping call will retry
}
atomic.AddInt64(&this.migrationContext.TotalRowsCopied, rowsAffected)
atomic.AddInt64(&this.migrationContext.Iteration, 1)
@ -1147,6 +1208,10 @@ func (this *Migrator) executeWriteFuncs() error {
return nil
}
for {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return nil
}
this.throttler.throttle(nil)
// We give higher priority to event processing, then secondary priority to
@ -1226,3 +1291,27 @@ func (this *Migrator) finalCleanup() error {
return nil
}
func (this *Migrator) teardown() {
atomic.StoreInt64(&this.finishedMigrating, 1)
if this.inspector != nil {
log.Infof("Tearing down inspector")
this.inspector.Teardown()
}
if this.applier != nil {
log.Infof("Tearing down applier")
this.applier.Teardown()
}
if this.eventsStreamer != nil {
log.Infof("Tearing down streamer")
this.eventsStreamer.Teardown()
}
if this.throttler != nil {
log.Infof("Tearing down throttler")
this.throttler.Teardown()
}
}

View File

@ -30,9 +30,9 @@ type Server struct {
printStatus printStatusFunc
}
func NewServer(hooksExecutor *HooksExecutor, printStatus printStatusFunc) *Server {
func NewServer(migrationContext *base.MigrationContext, hooksExecutor *HooksExecutor, printStatus printStatusFunc) *Server {
return &Server{
migrationContext: base.GetMigrationContext(),
migrationContext: migrationContext,
hooksExecutor: hooksExecutor,
printStatus: printStatus,
}
@ -130,6 +130,9 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
arg := ""
if len(tokens) > 1 {
arg = strings.TrimSpace(tokens[1])
if unquoted, err := strconv.Unquote(arg); err == nil {
arg = unquoted
}
}
argIsQuestion := (arg == "?")
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged\n"
@ -147,7 +150,7 @@ sup # Print a short status message
coordinates # Print the currently inspected coordinates
chunk-size=<newsize> # Set a new chunk-size
dml-batch-size=<newsize> # Set a new dml-batch-size
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is agrressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
critical-load=<load> # Set a new set of max-load thresholds
max-lag-millis=<max-lag> # Set a new replication lag threshold
replication-lag-query=<query> # Set a new query that determines replication lag (no quotes)
@ -305,8 +308,8 @@ help # This message
return NoPrintStatusRule, err
}
if arg != "" && arg != this.migrationContext.OriginalTableName {
// User exlpicitly provided table name. This is a courtesy protection mechanism
err := fmt.Errorf("User commanded 'unpostpone' on %s, but migrated table is %s; ingoring request.", arg, this.migrationContext.OriginalTableName)
// User explicitly provided table name. This is a courtesy protection mechanism
err := fmt.Errorf("User commanded 'unpostpone' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName)
return NoPrintStatusRule, err
}
if atomic.LoadInt64(&this.migrationContext.IsPostponingCutOver) > 0 {

View File

@ -45,10 +45,10 @@ type EventsStreamer struct {
binlogReader *binlog.GoMySQLReader
}
func NewEventsStreamer() *EventsStreamer {
func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer {
return &EventsStreamer{
connectionConfig: base.GetMigrationContext().InspectorConnectionConfig,
migrationContext: base.GetMigrationContext(),
connectionConfig: migrationContext.InspectorConnectionConfig,
migrationContext: migrationContext,
listeners: [](*BinlogEventListener){},
listenersMutex: &sync.Mutex{},
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
@ -104,10 +104,10 @@ func (this *EventsStreamer) notifyListeners(binlogEvent *binlog.BinlogDMLEvent)
func (this *EventsStreamer) InitDBConnections() (err error) {
EventsStreamerUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
if this.db, _, err = sqlutils.GetDB(EventsStreamerUri); err != nil {
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
return err
}
if _, err := base.ValidateConnection(this.db, this.connectionConfig); err != nil {
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext); err != nil {
return err
}
if err := this.readCurrentBinlogCoordinates(); err != nil {
@ -122,7 +122,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext.InspectorConnectionConfig)
goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext)
if err != nil {
return err
}
@ -178,7 +178,14 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
var successiveFailures int64
var lastAppliedRowsEventHint mysql.BinlogCoordinates
for {
if canStopStreaming() {
return nil
}
if err := this.binlogReader.StreamEvents(canStopStreaming, this.eventsChannel); err != nil {
if canStopStreaming() {
return nil
}
log.Infof("StreamEvents encountered unexpected error: %+v", err)
this.migrationContext.MarkPointOfInterest()
time.Sleep(ReconnectStreamerSleepSeconds * time.Second)
@ -209,3 +216,8 @@ func (this *EventsStreamer) Close() (err error) {
log.Infof("Closed streamer connection. err=%+v", err)
return err
}
func (this *EventsStreamer) Teardown() {
this.db.Close()
return
}

View File

@ -16,7 +16,6 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
)
var (
@ -39,19 +38,21 @@ var (
const frenoMagicHint = "freno"
// Throttler collects metrics related to throttling and makes informed decisison
// Throttler collects metrics related to throttling and makes informed decision
// whether throttling should take place.
type Throttler struct {
migrationContext *base.MigrationContext
applier *Applier
inspector *Inspector
migrationContext *base.MigrationContext
applier *Applier
inspector *Inspector
finishedMigrating int64
}
func NewThrottler(applier *Applier, inspector *Inspector) *Throttler {
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector) *Throttler {
return &Throttler{
migrationContext: base.GetMigrationContext(),
applier: applier,
inspector: inspector,
migrationContext: migrationContext,
applier: applier,
inspector: inspector,
finishedMigrating: 0,
}
}
@ -139,8 +140,8 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
// when running on replica, the heartbeat injection is also done on the replica.
// This means we will always get a good heartbeat value.
// When runnign on replica, we should instead check the `SHOW SLAVE STATUS` output.
if lag, err := mysql.GetReplicationLag(this.inspector.connectionConfig); err != nil {
// When running on replica, we should instead check the `SHOW SLAVE STATUS` output.
if lag, err := mysql.GetReplicationLagFromSlaveStatus(this.inspector.informationSchemaDb); err != nil {
return log.Errore(err)
} else {
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
@ -160,6 +161,9 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
ticker := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
for range ticker {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
go collectFunc()
}
}
@ -182,11 +186,12 @@ func (this *Throttler) collectControlReplicasLag() {
dbUri := connectionConfig.GetDBUri("information_schema")
var heartbeatValue string
if db, _, err := sqlutils.GetDB(dbUri); err != nil {
if db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri); err != nil {
return lag, err
} else if err = db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
return lag, err
}
lag, err = parseChangelogHeartbeat(heartbeatValue)
return lag, err
}
@ -233,6 +238,9 @@ func (this *Throttler) collectControlReplicasLag() {
shouldReadLagAggressively := false
for range aggressiveTicker {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
if counter%relaxedFactor == 0 {
// we only check if we wish to be aggressive once per second. The parameters for being aggressive
// do not typically change at all throughout the migration, but nonetheless we check them.
@ -285,6 +293,10 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
ticker := time.Tick(100 * time.Millisecond)
for range ticker {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
if sleep, _ := collectFunc(); sleep {
time.Sleep(1 * time.Second)
}
@ -393,6 +405,10 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
throttlerMetricsTick := time.Tick(1 * time.Second)
for range throttlerMetricsTick {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
this.collectGeneralThrottleMetrics()
}
}()
@ -419,6 +435,9 @@ func (this *Throttler) initiateThrottlerChecks() error {
}
throttlerFunction()
for range throttlerTick {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return nil
}
throttlerFunction()
}
@ -440,3 +459,8 @@ func (this *Throttler) throttle(onThrottled func()) {
time.Sleep(250 * time.Millisecond)
}
}
func (this *Throttler) Teardown() {
log.Debugf("Tearing down...")
atomic.StoreInt64(&this.finishedMigrating, 1)
}

View File

@ -57,7 +57,7 @@ func (this BinlogCoordinates) String() string {
return this.DisplayString()
}
// Equals tests equality of this corrdinate and another one.
// Equals tests equality of this coordinate and another one.
func (this *BinlogCoordinates) Equals(other *BinlogCoordinates) bool {
if other == nil {
return false
@ -95,8 +95,8 @@ func (this *BinlogCoordinates) FileSmallerThan(other *BinlogCoordinates) bool {
return this.LogFile < other.LogFile
}
// FileNumberDistance returns the numeric distance between this corrdinate's file number and the other's.
// Effectively it means "how many roatets/FLUSHes would make these coordinates's file reach the other's"
// FileNumberDistance returns the numeric distance between this coordinate's file number and the other's.
// Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's"
func (this *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int {
thisNumber, _ := this.FileNumber()
otherNumber, _ := other.FileNumber()

View File

@ -56,5 +56,6 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string {
// Wrap IPv6 literals in square brackets
hostname = fmt.Sprintf("[%s]", hostname)
}
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1", this.User, this.Password, hostname, this.Key.Port, databaseName)
interpolateParams := true
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?interpolateParams=%t&autocommit=true&charset=utf8mb4,utf8,latin1", this.User, this.Password, hostname, this.Key.Port, databaseName, interpolateParams)
}

View File

@ -55,3 +55,13 @@ func TestDuplicate(t *testing.T) {
test.S(t).ExpectEquals(dup.User, "gromit")
test.S(t).ExpectEquals(dup.Password, "penguin")
}
func TestGetDBUri(t *testing.T) {
c := NewConnectionConfig()
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
uri := c.GetDBUri("test")
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1")
}

View File

@ -15,7 +15,7 @@ const (
DefaultInstancePort = 3306
)
// InstanceKey is an instance indicator, identifued by hostname and port
// InstanceKey is an instance indicator, identified by hostname and port
type InstanceKey struct {
Hostname string
Port int
@ -83,7 +83,7 @@ func (this *InstanceKey) IsValid() bool {
return len(this.Hostname) > 0 && this.Port > 0
}
// DetachedKey returns an instance key whose hostname is detahced: invalid, but recoverable
// DetachedKey returns an instance key whose hostname is detached: invalid, but recoverable
func (this *InstanceKey) DetachedKey() *InstanceKey {
if this.IsDetached() {
return this
@ -91,7 +91,7 @@ func (this *InstanceKey) DetachedKey() *InstanceKey {
return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, this.Hostname), Port: this.Port}
}
// ReattachedKey returns an instance key whose hostname is detahced: invalid, but recoverable
// ReattachedKey returns an instance key whose hostname is detached: invalid, but recoverable
func (this *InstanceKey) ReattachedKey() *InstanceKey {
if !this.IsDetached() {
return this

View File

@ -8,6 +8,8 @@ package mysql
import (
gosql "database/sql"
"fmt"
"strings"
"sync"
"time"
"github.com/github/gh-ost/go/sql"
@ -33,16 +35,32 @@ func (this *ReplicationLagResult) HasLag() bool {
return this.Lag > 0
}
// GetReplicationLag returns replication lag for a given connection config; either by explicit query
// or via SHOW SLAVE STATUS
func GetReplicationLag(connectionConfig *ConnectionConfig) (replicationLag time.Duration, err error) {
dbUri := connectionConfig.GetDBUri("information_schema")
var db *gosql.DB
if db, _, err = sqlutils.GetDB(dbUri); err != nil {
return replicationLag, err
}
// knownDBs is a DB cache by uri
var knownDBs map[string]*gosql.DB = make(map[string]*gosql.DB)
var knownDBsMutex = &sync.Mutex{}
err = sqlutils.QueryRowsMap(db, `show slave status`, func(m sqlutils.RowMap) error {
func GetDB(migrationUuid string, mysql_uri string) (*gosql.DB, bool, error) {
cacheKey := migrationUuid + ":" + mysql_uri
knownDBsMutex.Lock()
defer func() {
knownDBsMutex.Unlock()
}()
var exists bool
if _, exists = knownDBs[cacheKey]; !exists {
if db, err := gosql.Open("mysql", mysql_uri); err == nil {
knownDBs[cacheKey] = db
} else {
return db, exists, err
}
}
return knownDBs[cacheKey], exists, nil
}
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
func GetReplicationLagFromSlaveStatus(informationSchemaDb *gosql.DB) (replicationLag time.Duration, err error) {
err = sqlutils.QueryRowsMap(informationSchemaDb, `show slave status`, func(m sqlutils.RowMap) error {
slaveIORunning := m.GetString("Slave_IO_Running")
slaveSQLRunning := m.GetString("Slave_SQL_Running")
secondsBehindMaster := m.GetNullInt64("Seconds_Behind_Master")
@ -52,15 +70,19 @@ func GetReplicationLag(connectionConfig *ConnectionConfig) (replicationLag time.
replicationLag = time.Duration(secondsBehindMaster.Int64) * time.Second
return nil
})
return replicationLag, err
}
func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey *InstanceKey, err error) {
currentUri := connectionConfig.GetDBUri("information_schema")
db, _, err := sqlutils.GetDB(currentUri)
// This function is only called once, okay to not have a cached connection pool
db, err := gosql.Open("mysql", currentUri)
if err != nil {
return nil, err
}
defer db.Close()
err = sqlutils.QueryRowsMap(db, `show slave status`, func(rowMap sqlutils.RowMap) error {
// We wish to recognize the case where the topology's master actually has replication configuration.
// This can happen when a DBA issues a `RESET SLAVE` instead of `RESET SLAVE ALL`.
@ -73,7 +95,6 @@ func GetMasterKeyFromSlaveStatus(connectionConfig *ConnectionConfig) (masterKey
slaveIORunning := rowMap.GetString("Slave_IO_Running")
slaveSQLRunning := rowMap.GetString("Slave_SQL_Running")
//
if slaveIORunning != "Yes" || slaveSQLRunning != "Yes" {
return fmt.Errorf("Replication on %+v is broken: Slave_IO_Running: %s, Slave_SQL_Running: %s. Please make sure replication runs before using gh-ost.",
connectionConfig.Key,
@ -153,7 +174,7 @@ func GetInstanceKey(db *gosql.DB) (instanceKey *InstanceKey, err error) {
}
// GetTableColumns reads column list from given table
func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnList, error) {
func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnList, *sql.ColumnList, error) {
query := fmt.Sprintf(`
show columns from %s.%s
`,
@ -161,18 +182,24 @@ func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnL
sql.EscapeName(tableName),
)
columnNames := []string{}
virtualColumnNames := []string{}
err := sqlutils.QueryRowsMap(db, query, func(rowMap sqlutils.RowMap) error {
columnNames = append(columnNames, rowMap.GetString("Field"))
columnName := rowMap.GetString("Field")
columnNames = append(columnNames, columnName)
if strings.Contains(rowMap.GetString("Extra"), " GENERATED") {
log.Debugf("%s is a generated column", columnName)
virtualColumnNames = append(virtualColumnNames, columnName)
}
return nil
})
if err != nil {
return nil, err
return nil, nil, err
}
if len(columnNames) == 0 {
return nil, log.Errorf("Found 0 columns on %s.%s. Bailing out",
return nil, nil, log.Errorf("Found 0 columns on %s.%s. Bailing out",
sql.EscapeName(databaseName),
sql.EscapeName(tableName),
)
}
return sql.NewColumnList(columnNames), nil
return sql.NewColumnList(columnNames), sql.NewColumnList(virtualColumnNames), nil
}

View File

@ -15,11 +15,11 @@ type ValueComparisonSign string
const (
LessThanComparisonSign ValueComparisonSign = "<"
LessThanOrEqualsComparisonSign = "<="
EqualsComparisonSign = "="
GreaterThanOrEqualsComparisonSign = ">="
GreaterThanComparisonSign = ">"
NotEqualsComparisonSign = "!="
LessThanOrEqualsComparisonSign ValueComparisonSign = "<="
EqualsComparisonSign ValueComparisonSign = "="
GreaterThanOrEqualsComparisonSign ValueComparisonSign = ">="
GreaterThanComparisonSign ValueComparisonSign = ">"
NotEqualsComparisonSign ValueComparisonSign = "!="
)
// EscapeName will escape a db/table/column/... name by wrapping with backticks.
@ -140,13 +140,12 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
comparisons := []string{}
for i, column := range columns {
//
value := values[i]
rangeComparison, err := BuildValueComparison(column, value, comparisonSign)
if err != nil {
return "", explodedArgs, err
}
if len(columns[0:i]) > 0 {
if i > 0 {
equalitiesComparison, err := BuildEqualsComparison(columns[0:i], values[0:i])
if err != nil {
return "", explodedArgs, err

View File

@ -8,6 +8,7 @@ package sql
import (
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/simplifiedchinese"
)
type charsetEncoding map[string]encoding.Encoding
@ -18,4 +19,5 @@ func init() {
charsetEncodingMap = make(map[string]encoding.Encoding)
// Begin mappings
charsetEncodingMap["latin1"] = charmap.Windows1252
charsetEncodingMap["gbk"] = simplifiedchinese.GBK
}

View File

@ -15,11 +15,13 @@ var (
sanitizeQuotesRegexp = regexp.MustCompile("('[^']*')")
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
)
type Parser struct {
columnRenameMap map[string]string
droppedColumns map[string]bool
isRenameTable bool
}
func NewParser() *Parser {
@ -86,6 +88,12 @@ func (this *Parser) parseAlterToken(alterToken string) (err error) {
this.droppedColumns[submatch[2]] = true
}
}
{
// rename table
if renameTableRegexp.MatchString(alterToken) {
this.isRenameTable = true
}
}
return nil
}
@ -115,3 +123,7 @@ func (this *Parser) HasNonTrivialRenames() bool {
func (this *Parser) DroppedColumnsMap() map[string]bool {
return this.droppedColumns
}
func (this *Parser) IsRenameTable() bool {
return this.isRenameTable
}

View File

@ -159,3 +159,42 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
test.S(t).ExpectTrue(parser.droppedColumns["b"])
}
}
func TestParseAlterStatementRenameTable(t *testing.T) {
{
parser := NewParser()
statement := "drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectFalse(parser.isRenameTable)
}
{
parser := NewParser()
statement := "rename as something_else"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
{
parser := NewParser()
statement := "drop column b, rename as something_else"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
{
parser := NewParser()
statement := "engine=innodb rename as something_else"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
{
parser := NewParser()
statement := "rename as something_else, engine=innodb"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
}

View File

@ -15,18 +15,18 @@ import (
type ColumnType int
const (
UnknownColumnType ColumnType = iota
TimestampColumnType = iota
DateTimeColumnType = iota
EnumColumnType = iota
MediumIntColumnType = iota
JSONColumnType = iota
FloatColumnType = iota
UnknownColumnType ColumnType = iota
TimestampColumnType
DateTimeColumnType
EnumColumnType
MediumIntColumnType
JSONColumnType
FloatColumnType
)
const maxMediumintUnsigned int32 = 16777215
type TimezoneConvertion struct {
type TimezoneConversion struct {
ToTimezone string
}
@ -35,7 +35,7 @@ type Column struct {
IsUnsigned bool
Charset string
Type ColumnType
timezoneConversion *TimezoneConvertion
timezoneConversion *TimezoneConversion
}
func (this *Column) convertArg(arg interface{}) interface{} {
@ -172,7 +172,7 @@ func (this *ColumnList) GetColumnType(columnName string) ColumnType {
}
func (this *ColumnList) SetConvertDatetimeToTimestamp(columnName string, toTimezone string) {
this.GetColumn(columnName).timezoneConversion = &TimezoneConvertion{ToTimezone: toTimezone}
this.GetColumn(columnName).timezoneConversion = &TimezoneConversion{ToTimezone: toTimezone}
}
func (this *ColumnList) HasTimezoneConversion(columnName string) bool {

View File

@ -0,0 +1,20 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11);
insert into gh_ost_test values (null, 13);
end ;;

View File

@ -0,0 +1 @@
--alter="add column is_good bit null default 0"

View File

@ -0,0 +1 @@
id, i

View File

@ -0,0 +1 @@
id, i

View File

@ -0,0 +1,24 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
is_good bit null default 0,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, 0);
insert into gh_ost_test values (null, 13, 1);
insert into gh_ost_test values (null, 17, 1);
update gh_ost_test set is_good=0 where i=13 order by id desc limit 1;
end ;;

View File

@ -0,0 +1 @@
--alter="modify column is_good bit not null default 0" --approve-renamed-columns

View File

@ -0,0 +1,31 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
t varchar(128) charset utf8 collate utf8_general_ci,
tl varchar(128) charset latin1 not null,
ta varchar(128) charset ascii not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 'átesting');
insert into gh_ost_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, md5(rand()), 'átesting-a', 'a');
insert into gh_ost_test values (null, 'novo proprietário', 'átesting-b', 'b');
insert into gh_ost_test values (null, '2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm', 'átesting-c', 'c');
insert into gh_ost_test values (null, 'usuário', 'átesting-x', 'x');
delete from gh_ost_test where ta='x' order by id desc limit 1;
end ;;

View File

@ -0,0 +1 @@
--alter='convert to character set utf8mb4'

View File

@ -0,0 +1,27 @@
set session time_zone='+00:00';
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
create_time timestamp NULL DEFAULT '0000-00-00 00:00:00',
update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
counter int(10) unsigned DEFAULT NULL,
primary key(id)
) auto_increment=1;
set session time_zone='+00:00';
insert into gh_ost_test values (1, '0000-00-00 00:00:00', now(), 0);
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
set session time_zone='+00:00';
update gh_ost_test set counter = counter + 1 where id = 1;
end ;;

View File

@ -0,0 +1 @@
--alter='add column name varchar(1)'

View File

@ -0,0 +1 @@
id, create_time, update_time, counter

View File

@ -0,0 +1 @@
id, create_time, update_time, counter

View File

View File

@ -0,0 +1 @@
(5.5)

View File

@ -0,0 +1 @@
(5.5)

View File

@ -0,0 +1 @@
(5.5)

View File

@ -0,0 +1 @@
(5.5)

View File

@ -0,0 +1,23 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
dec0 decimal(65,30) unsigned NOT NULL DEFAULT '0.000000000000000000000000000000',
dec1 decimal(65,30) unsigned NOT NULL DEFAULT '1.000000000000000000000000000000',
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 0.0, 0.0);
insert into gh_ost_test values (null, 2.0, 4.0);
insert into gh_ost_test values (null, 99999999999999999999999999999999999.000, 6.0);
update gh_ost_test set dec1=4.5 where dec2=4.0 order by id desc limit 1;
end ;;

View File

@ -0,0 +1,22 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
ts timestamp,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, now());
insert into gh_ost_test values (null, 13, now());
insert into gh_ost_test values (null, 17, now());
end ;;

View File

@ -0,0 +1 @@
ALTER statement seems to RENAME the table

View File

@ -0,0 +1 @@
--alter="rename as something_else"

View File

@ -0,0 +1,52 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 101);
insert into gh_ost_test values (null, 102);
insert into gh_ost_test values (null, 103);
insert into gh_ost_test values (null, 104);
insert into gh_ost_test values (null, 105);
insert into gh_ost_test values (null, 106);
insert into gh_ost_test values (null, 107);
insert into gh_ost_test values (null, 108);
insert into gh_ost_test values (null, 109);
insert into gh_ost_test values (null, 110);
insert into gh_ost_test values (null, 111);
insert into gh_ost_test values (null, 112);
insert into gh_ost_test values (null, 113);
insert into gh_ost_test values (null, 114);
insert into gh_ost_test values (null, 115);
insert into gh_ost_test values (null, 116);
insert into gh_ost_test values (null, 117);
insert into gh_ost_test values (null, 118);
insert into gh_ost_test values (null, 119);
insert into gh_ost_test values (null, 120);
insert into gh_ost_test values (null, 121);
insert into gh_ost_test values (null, 122);
insert into gh_ost_test values (null, 123);
insert into gh_ost_test values (null, 124);
insert into gh_ost_test values (null, 125);
insert into gh_ost_test values (null, 126);
insert into gh_ost_test values (null, 127);
insert into gh_ost_test values (null, 128);
insert into gh_ost_test values (null, 129);
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp + interval 3 second
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
update gh_ost_test set id=-2 where id=21;
update gh_ost_test set id=55 where id=22;
update gh_ost_test set id=23 where id=23;
update gh_ost_test set i=5024 where id=24;
end ;;

View File

@ -0,0 +1,25 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int(11) NOT NULL AUTO_INCREMENT,
name varchar(512) DEFAULT NULL,
v varchar(255) DEFAULT NULL COMMENT '添加普通列测试',
PRIMARY KEY (id)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=gbk;
insert into gh_ost_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试');
insert into gh_ost_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (name) values ('gbk-test-default');
insert into gh_ost_test values (null, 'gbk-test', '添加普通列测试-添加普通列测试');
update gh_ost_test set v='添加普通列测试' where v='添加普通列测试-添加普通列测试' order by id desc limit 1;
end ;;

View File

View File

@ -0,0 +1,29 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
a int not null,
b int not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (id, a, b) values (null, 2,3);
insert into gh_ost_test (id, a, b) values (null, 2,4);
insert into gh_ost_test (id, a, b) values (null, 2,5);
insert into gh_ost_test (id, a, b) values (null, 2,6);
insert into gh_ost_test (id, a, b) values (null, 2,7);
insert into gh_ost_test (id, a, b) values (null, 2,8);
insert into gh_ost_test (id, a, b) values (null, 2,9);
insert into gh_ost_test (id, a, b) values (null, 2,0);
insert into gh_ost_test (id, a, b) values (null, 2,1);
insert into gh_ost_test (id, a, b) values (null, 2,2);
end ;;

View File

@ -0,0 +1 @@
--alter="add column sum_ab int as (a + b) virtual not null"

View File

@ -0,0 +1 @@
id, a, b

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -0,0 +1 @@
id

View File

@ -0,0 +1 @@
id, a, b

View File

@ -0,0 +1,30 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
a int not null,
b int not null,
sum_ab int as (a + b) virtual not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (id, a, b) values (null, 2,3);
insert into gh_ost_test (id, a, b) values (null, 2,4);
insert into gh_ost_test (id, a, b) values (null, 2,5);
insert into gh_ost_test (id, a, b) values (null, 2,6);
insert into gh_ost_test (id, a, b) values (null, 2,7);
insert into gh_ost_test (id, a, b) values (null, 2,8);
insert into gh_ost_test (id, a, b) values (null, 2,9);
insert into gh_ost_test (id, a, b) values (null, 2,0);
insert into gh_ost_test (id, a, b) values (null, 2,1);
insert into gh_ost_test (id, a, b) values (null, 2,2);
end ;;

View File

@ -0,0 +1 @@
--alter="change sum_ab total_ab int as (a + b) virtual not null" --approve-renamed-columns

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -0,0 +1,30 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
a int not null,
b int not null,
sum_ab int as (a + b) virtual not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (id, a, b) values (null, 2,3);
insert into gh_ost_test (id, a, b) values (null, 2,4);
insert into gh_ost_test (id, a, b) values (null, 2,5);
insert into gh_ost_test (id, a, b) values (null, 2,6);
insert into gh_ost_test (id, a, b) values (null, 2,7);
insert into gh_ost_test (id, a, b) values (null, 2,8);
insert into gh_ost_test (id, a, b) values (null, 2,9);
insert into gh_ost_test (id, a, b) values (null, 2,0);
insert into gh_ost_test (id, a, b) values (null, 2,1);
insert into gh_ost_test (id, a, b) values (null, 2,2);
end ;;

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -0,0 +1,21 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
g geometry,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, ST_GeomFromText('POINT(1 1)'));
insert into gh_ost_test values (null, ST_GeomFromText('POINT(2 2)'));
insert into gh_ost_test values (null, ST_GeomFromText('POINT(3 3)'));
end ;;

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -20,6 +20,7 @@ begin
insert into gh_ost_test (id, i, j) values (null, 11, '"sometext"');
insert into gh_ost_test (id, i, j) values (null, 13, '{"key":"val"}');
insert into gh_ost_test (id, i, j) values (null, 17, '{"is-it": true, "count": 3, "elements": []}');
insert into gh_ost_test (id, i, j) values (null, 19, '{"text":"Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet a, venenatis vitae, justo. Nullam dictum felis eu pede mollis pretium. Integer tincidunt. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. Aliquam lorem ante, dapibus in, viverra quis, feugiat a, tellus. Phasellus viverra nulla ut metus varius laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam eget dui. Etiam rhoncus. Maecenas tempus, tellus eget condimentum rhoncus, sem quam semper libero, sit amet adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. Donec vitae sapien ut libero venenatis faucibus. Nullam quis ante. Etiam sit amet orci eget eros faucibus tincidunt. Duis leo. Sed fringilla mauris sit amet nibh. Donec sodales sagittis magna. Sed consequat, leo eget bibendum sodales, augue velit cursus nunc, quis gravida magna mi a libero. Fusce vulputate eleifend sapien. Vestibulum purus quam, scelerisque ut, mollis sed, nonummy id, metus. Nullam accumsan lorem in dui. Cras ultricies mi eu turpis hendrerit fringilla. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In ac dui quis mi consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu tortor, suscipit eget, imperdiet nec, imperdiet iaculis, ipsum. Sed aliquam ultrices mauris. Integer ante arcu, accumsan a, consectetuer eget, posuere ut, mauris. Praesent adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc nonummy metus. Vestibulum volutpat pretium libero. Cras id dui. Aenean ut eros et nisl sagittis vestibulum. Nullam nulla eros, ultricies sit amet, nonummy id, imperdiet feugiat, pede. Sed lectus. Donec mollis hendrerit risus. Phasellus nec sem in justo pellentesque facilisis. Etiam imperdiet imperdiet orci. Nunc nec neque. Phasellus leo dolor, tempus non, auctor et, hendrerit quis, nisi. Curabitur ligula sapien, tincidunt non, euismod vitae, posuere imperdiet, leo. Maecenas malesuada. Praesent congue erat at massa. Sed cursus turpis vitae tortor. Donec posuere vulputate arcu. Phasellus accumsan cursus velit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed aliquam, nisi quis porttitor congue, elit erat euismod orci, ac"}');
update gh_ost_test set j = '{"updated": 11}', updated = 1 where i = 11 and updated = 0;
update gh_ost_test set j = json_set(j, '$.count', 13, '$.id', id), updated = 1 where i = 13 and updated = 0;

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -0,0 +1,13 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
color varchar(32),
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
insert into gh_ost_test values (null, 11, 'red');
insert into gh_ost_test values (null, 13, 'green');
insert into gh_ost_test values (null, 17, 'blue');

View File

@ -0,0 +1 @@
--alter='add column `index` int unsigned' \

View File

@ -0,0 +1 @@
id, i, color

View File

@ -0,0 +1 @@
id, i, color

View File

@ -0,0 +1,22 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
g geometry,
pt point,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, ST_GeomFromText('POINT(1 1)'), POINT(10,10));
insert into gh_ost_test values (null, ST_GeomFromText('POINT(2 2)'), POINT(20,20));
insert into gh_ost_test values (null, ST_GeomFromText('POINT(3 3)'), POINT(30,30));
end ;;

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -0,0 +1 @@
(5.5)

View File

@ -0,0 +1 @@
(5.5)

View File

@ -9,16 +9,31 @@
tests_path=$(dirname $0)
test_logfile=/tmp/gh-ost-test.log
ghost_binary=/tmp/gh-ost-test
default_ghost_binary=/tmp/gh-ost-test
ghost_binary=""
exec_command_file=/tmp/gh-ost-test.bash
orig_content_output_file=/gh-ost-test.orig.content.csv
ghost_content_output_file=/gh-ost-test.ghost.content.csv
test_pattern="${1:-.}"
orig_content_output_file=/tmp/gh-ost-test.orig.content.csv
ghost_content_output_file=/tmp/gh-ost-test.ghost.content.csv
throttle_flag_file=/tmp/gh-ost-test.ghost.throttle.flag
master_host=
master_port=
replica_host=
replica_port=
original_sql_mode=
OPTIND=1
while getopts "b:" OPTION
do
case $OPTION in
b)
ghost_binary="$OPTARG"
;;
esac
done
shift $((OPTIND-1))
test_pattern="${1:-.}"
verify_master_and_replica() {
if [ "$(gh-ost-test-mysql-master -e "select 1" -ss)" != "1" ] ; then
@ -26,6 +41,15 @@ verify_master_and_replica() {
exit 1
fi
read master_host master_port <<< $(gh-ost-test-mysql-master -e "select @@hostname, @@port" -ss)
[ "$master_host" == "$(hostname)" ] && master_host="127.0.0.1"
echo "# master verified at $master_host:$master_port"
if ! gh-ost-test-mysql-master -e "set global event_scheduler := 1" ; then
echo "Cannot enable event_scheduler on master"
exit 1
fi
original_sql_mode="$(gh-ost-test-mysql-master -e "select @@global.sql_mode" -s -s)"
echo "sql_mode on master is ${original_sql_mode}"
if [ "$(gh-ost-test-mysql-replica -e "select 1" -ss)" != "1" ] ; then
echo "Cannot verify gh-ost-test-mysql-replica"
exit 1
@ -35,6 +59,8 @@ verify_master_and_replica() {
exit 1
fi
read replica_host replica_port <<< $(gh-ost-test-mysql-replica -e "select @@hostname, @@port" -ss)
[ "$replica_host" == "$(hostname)" ] && replica_host="127.0.0.1"
echo "# replica verified at $replica_host:$replica_port"
}
exec_cmd() {
@ -66,11 +92,26 @@ test_single() {
local test_name
test_name="$1"
if [ -f $tests_path/$test_name/ignore_versions ] ; then
ignore_versions=$(cat $tests_path/$test_name/ignore_versions)
mysql_version=$(gh-ost-test-mysql-master -s -s -e "select @@version")
if echo "$mysql_version" | egrep -q "^${ignore_versions}" ; then
echo -n "Skipping: $test_name"
return 0
fi
fi
echo -n "Testing: $test_name"
echo_dot
start_replication
echo_dot
if [ -f $tests_path/$test_name/sql_mode ] ; then
gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'"
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'"
fi
gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/create.sql
extra_args=""
@ -98,6 +139,7 @@ test_single() {
--password=gh-ost \
--host=$replica_host \
--port=$replica_port \
--assume-master-host=${master_host}:${master_port}
--database=test \
--table=gh_ost_test \
--alter='engine=innodb' \
@ -106,6 +148,7 @@ test_single() {
--initially-drop-old-table \
--initially-drop-ghost-table \
--throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _gh_ost_test_ghc' \
--throttle-flag-file=$throttle_flag_file \
--serve-socket-file=/tmp/gh-ost.test.sock \
--initially-drop-socket-file \
--test-on-replica \
@ -122,6 +165,11 @@ test_single() {
execution_result=$?
if [ -f $tests_path/$test_name/sql_mode ] ; then
gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'"
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'"
fi
if [ -f $tests_path/$test_name/destroy.sql ] ; then
gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/destroy.sql
fi
@ -148,7 +196,8 @@ test_single() {
if [ $execution_result -ne 0 ] ; then
echo
echo "ERROR $test_name execution failure. cat $test_logfile"
echo "ERROR $test_name execution failure. cat $test_logfile:"
cat $test_logfile
return 1
fi
@ -171,7 +220,17 @@ test_single() {
build_binary() {
echo "Building"
rm -f $default_ghost_binary
[ "$ghost_binary" == "" ] && ghost_binary="$default_ghost_binary"
if [ -f "$ghost_binary" ] ; then
echo "Using binary: $ghost_binary"
return 0
fi
go build -o $ghost_binary go/cmd/gh-ost/main.go
if [ $? -ne 0 ] ; then
echo "Build failure"
exit 1
fi
}
test_all() {

View File

@ -0,0 +1 @@
(5.5)

View File

@ -0,0 +1 @@
(5.5)

View File

@ -0,0 +1 @@
(5.5)

View File

@ -0,0 +1 @@
(5.5)

View File

@ -0,0 +1,66 @@
#!/bin/bash
set -e
whoami
# Clone gh-ost-ci-env
# Only clone if not already running locally at latest commit
remote_commit=$(git ls-remote https://github.com/github/gh-ost-ci-env.git HEAD | cut -f1)
local_commit="unknown"
[ -d "gh-ost-ci-env" ] && local_commit=$(cd gh-ost-ci-env && git log --format="%H" -n 1)
echo "remote commit is: $remote_commit"
echo "local commit is: $local_commit"
if [ "$remote_commit" != "$local_commit" ] ; then
rm -rf ./gh-ost-ci-env
git clone https://github.com/github/gh-ost-ci-env.git
fi
test_mysql_version() {
local mysql_version
mysql_version="$1"
echo "##### Testing $mysql_version"
echo "### Setting up sandbox for $mysql_version"
find sandboxes -name "stop_all" | bash
mkdir -p sandbox/binary
rm -rf sandbox/binary/*
gh-ost-ci-env/bin/linux/dbdeployer unpack gh-ost-ci-env/mysql-tarballs/"$mysql_version".tar.gz --unpack-version="$mysql_version" --sandbox-binary ${PWD}/sandbox/binary
mkdir -p sandboxes
rm -rf sandboxes/*
if echo "$mysql_version" | egrep "5[.]5[.]" ; then
gtid=""
else
gtid="--gtid"
fi
gh-ost-ci-env/bin/linux/dbdeployer deploy replication "$mysql_version" --nodes 2 --sandbox-binary ${PWD}/sandbox/binary --sandbox-home ${PWD}/sandboxes ${gtid} --my-cnf-options log_slave_updates --my-cnf-options log_bin --my-cnf-options binlog_format=ROW --sandbox-directory rsandbox
sed '/sandboxes/d' -i gh-ost-ci-env/bin/gh-ost-test-mysql-master
echo 'sandboxes/rsandbox/m "$@"' >> gh-ost-ci-env/bin/gh-ost-test-mysql-master
sed '/sandboxes/d' -i gh-ost-ci-env/bin/gh-ost-test-mysql-replica
echo 'sandboxes/rsandbox/s1 "$@"' >> gh-ost-ci-env/bin/gh-ost-test-mysql-replica
export PATH="${PWD}/gh-ost-ci-env/bin/:${PATH}"
gh-ost-test-mysql-master -uroot -e "grant all on *.* to 'gh-ost'@'%' identified by 'gh-ost'"
echo "### Running gh-ost tests for $mysql_version"
./localtests/test.sh -b bin/gh-ost
find sandboxes -name "stop_all" | bash
}
echo "Building..."
. script/build
# Test all versions:
find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.gz" | while read f ; do basename $f ".tar.gz" ; done | sort -r | while read mysql_version ; do
test_mysql_version "$mysql_version"
done

View File

@ -1,19 +1,20 @@
#!/bin/bash
GO_VERSION=go1.7
PREFERRED_GO_VERSION=go1.9.2
SUPPORTED_GO_VERSIONS='go1.[89]'
GO_PKG_DARWIN=${GO_VERSION}.darwin-amd64.pkg
GO_PKG_DARWIN_SHA=e7089843bc7148ffcc147759985b213604d22bb9fd19bd930b515aa981bf1b22
GO_PKG_DARWIN=${PREFERRED_GO_VERSION}.darwin-amd64.pkg
GO_PKG_DARWIN_SHA=8b4f6ae6deae1150d2e341d02c247fd18a99af387516540eeb84702ffd76d3a1
GO_PKG_LINUX=${GO_VERSION}.linux-amd64.tar.gz
GO_PKG_LINUX_SHA=702ad90f705365227e902b42d91dd1a40e48ca7f67a2f4b2fd052aaa4295cd95
GO_PKG_LINUX=${PREFERRED_GO_VERSION}.linux-amd64.tar.gz
GO_PKG_LINUX_SHA=de874549d9a8d8d8062be05808509c09a88a248e77ec14eb77453530829ac02b
export ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
cd $ROOTDIR
# If Go isn't installed globally, setup environment variables for local install.
if [ -z "$(which go)" ] || [ -z "$(go version | grep $GO_VERSION)" ]; then
GODIR="$ROOTDIR/.vendor/go17"
if [ -z "$(which go)" ] || [ -z "$(go version | grep "$SUPPORTED_GO_VERSIONS")" ]; then
GODIR="$ROOTDIR/.vendor/go19"
if [ $(uname -s) = "Darwin" ]; then
export GOROOT="$GODIR/usr/local/go"
@ -25,7 +26,7 @@ if [ -z "$(which go)" ] || [ -z "$(go version | grep $GO_VERSION)" ]; then
fi
# Check if local install exists, and install otherwise.
if [ -z "$(which go)" ] || [ -z "$(go version | grep $GO_VERSION)" ]; then
if [ -z "$(which go)" ] || [ -z "$(go version | grep "$SUPPORTED_GO_VERSIONS")" ]; then
[ -d "$GODIR" ] && rm -rf $GODIR
mkdir -p "$GODIR"
cd "$GODIR";
@ -42,7 +43,9 @@ if [ -z "$(which go)" ] || [ -z "$(go version | grep $GO_VERSION)" ]; then
fi
# Prove we did something right
echo "$GO_VERSION installed in $GODIR: Go Binary: $(which go)"
echo "$(go version) installed in $GODIR: Go Binary: $(which go)"
else
echo "$(go version) found in $GODIR: Go Binary: $(which go)"
fi
cd $ROOTDIR

View File

@ -1,32 +1,34 @@
language: go
go:
- 1.6
- 1.7
- "1.9"
- "1.10"
dist: trusty
sudo: required
addons:
apt:
sources:
- mysql-5.7-trusty
packages:
- mysql-server-5.6
- mysql-client-core-5.6
- mysql-client-5.6
- mysql-server
- mysql-client
before_install:
- sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
- sudo mysql_upgrade
before_script:
# stop mysql and use row-based format binlog
- "sudo /etc/init.d/mysql stop || true"
- "sudo service mysql stop || true"
- "echo '[mysqld]' | sudo tee /etc/mysql/conf.d/replication.cnf"
- "echo 'server-id=1' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
- "echo 'log-bin=mysql' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
- "echo 'log-bin=mysql' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
- "echo 'binlog-format = row' | sudo tee -a /etc/mysql/conf.d/replication.cnf"
# Start mysql (avoid errors to have logs)
- "sudo /etc/init.d/mysql start || true"
- "sudo service mysql start || true"
- "sudo tail -1000 /var/log/syslog"
- mysql -e "CREATE DATABASE IF NOT EXISTS test;" -uroot
script:
- make test
- make test

78
vendor/github.com/siddontang/go-mysql/Gopkg.lock generated vendored Normal file
View File

@ -0,0 +1,78 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/BurntSushi/toml"
packages = ["."]
revision = "b26d9c308763d68093482582cea63d69be07a0f0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "github.com/go-sql-driver/mysql"
packages = ["."]
revision = "99ff426eb706cffe92ff3d058e168b278cabf7c7"
[[projects]]
branch = "master"
name = "github.com/jmoiron/sqlx"
packages = [
".",
"reflectx"
]
revision = "2aeb6a910c2b94f2d5eb53d9895d80e27264ec41"
[[projects]]
branch = "master"
name = "github.com/juju/errors"
packages = ["."]
revision = "c7d06af17c68cd34c835053720b21f6549d9b0ee"
[[projects]]
branch = "master"
name = "github.com/pingcap/check"
packages = ["."]
revision = "1c287c953996ab3a0bf535dba9d53d809d3dc0b6"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
name = "github.com/shopspring/decimal"
packages = ["."]
revision = "cd690d0c9e2447b1ef2a129a6b7b49077da89b8e"
version = "1.1.0"
[[projects]]
branch = "master"
name = "github.com/siddontang/go"
packages = [
"hack",
"sync2"
]
revision = "2b7082d296ba89ae7ead0f977816bddefb65df9d"
[[projects]]
branch = "master"
name = "github.com/siddontang/go-log"
packages = [
"log",
"loggers"
]
revision = "a4d157e46fa3e08b7e7ff329af341fa3ff86c02c"
[[projects]]
name = "google.golang.org/appengine"
packages = ["cloudsql"]
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
version = "v1.1.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "a1f9939938a58551bbb3f19411c9d1386995d36296de6f6fb5d858f5923db85e"
solver-name = "gps-cdcl"
solver-version = 1

56
vendor/github.com/siddontang/go-mysql/Gopkg.toml generated vendored Normal file
View File

@ -0,0 +1,56 @@
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
name = "github.com/BurntSushi/toml"
version = "v0.3.0"
[[constraint]]
name = "github.com/go-sql-driver/mysql"
branch = "master"
[[constraint]]
branch = "master"
name = "github.com/juju/errors"
[[constraint]]
name = "github.com/satori/go.uuid"
version = "v1.2.0"
[[constraint]]
name = "github.com/shopspring/decimal"
version = "v1.1.0"
[[constraint]]
branch = "master"
name = "github.com/siddontang/go"
[prune]
go-tests = true
unused-packages = true
non-go = true

View File

@ -1,33 +1,14 @@
all: build
build:
rm -rf vendor && ln -s _vendor/vendor vendor
go build -o bin/go-mysqlbinlog cmd/go-mysqlbinlog/main.go
go build -o bin/go-mysqldump cmd/go-mysqldump/main.go
go build -o bin/go-canal cmd/go-canal/main.go
go build -o bin/go-binlogparser cmd/go-binlogparser/main.go
rm -rf vendor
test:
rm -rf vendor && ln -s _vendor/vendor vendor
go test --race -timeout 2m ./...
rm -rf vendor
clean:
go clean -i ./...
@rm -rf ./bin
update_vendor:
which glide >/dev/null || curl https://glide.sh/get | sh
which glide-vc || go get -v -u github.com/sgotti/glide-vc
rm -r vendor && mv _vendor/vendor vendor || true
rm -rf _vendor
ifdef PKG
glide get --strip-vendor --skip-test ${PKG}
else
glide update --strip-vendor --skip-test
endif
@echo "removing test files"
glide vc --only-code --no-tests
mkdir -p _vendor
mv vendor _vendor/vendor
@rm -rf ./bin

View File

@ -25,9 +25,9 @@ cfg := replication.BinlogSyncerConfig {
User: "root",
Password: "",
}
syncer := replication.NewBinlogSyncer(&cfg)
syncer := replication.NewBinlogSyncer(cfg)
// Start sync with sepcified binlog file and position
// Start sync with specified binlog file and position
streamer, _ := syncer.StartSync(mysql.Position{binlogFile, binlogPos})
// or you can start a gtid replication like
@ -44,7 +44,7 @@ for {
// or we can use a timeout context
for {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
e, _ := s.GetEvent(ctx)
ev, err := s.GetEvent(ctx)
cancel()
if err == context.DeadlineExceeded {
@ -105,20 +105,21 @@ cfg.Dump.Tables = []string{"canal_test"}
c, err := NewCanal(cfg)
type myRowsEventHandler struct {
type MyEventHandler struct {
DummyEventHandler
}
func (h *myRowsEventHandler) Do(e *RowsEvent) error {
func (h *MyEventHandler) OnRow(e *RowsEvent) error {
log.Infof("%s %v\n", e.Action, e.Rows)
return nil
}
func (h *myRowsEventHandler) String() string {
return "myRowsEventHandler"
func (h *MyEventHandler) String() string {
return "MyEventHandler"
}
// Register a handler to handle RowsEvent
c.RegRowsEventHandler(&MyRowsEventHandler{})
c.SetEventHandler(&MyEventHandler{})
// Start canal
c.Start()
@ -137,9 +138,16 @@ import (
"github.com/siddontang/go-mysql/client"
)
// Connect MySQL at 127.0.0.1:3306, with user root, an empty passowrd and database test
// Connect MySQL at 127.0.0.1:3306, with user root, an empty password and database test
conn, _ := client.Connect("127.0.0.1:3306", "root", "", "test")
// Or to use SSL/TLS connection if MySQL server supports TLS
//conn, _ := client.Connect("127.0.0.1:3306", "root", "", "test", func(c *Conn) {c.UseSSL(true)})
// or to set your own client-side certificates for identity verification for security
//tlsConfig := NewClientTLSConfig(caPem, certPem, keyPem, false, "your-server-name")
//conn, _ := client.Connect("127.0.0.1:3306", "root", "", "test", func(c *Conn) {c.SetTLSConfig(tlsConfig)})
conn.Ping()
// Insert
@ -156,10 +164,17 @@ v, _ := r.GetInt(0, 0)
v, _ = r.GetIntByName(0, "id")
```
Tested MySQL versions for the client include:
- 5.5.x
- 5.6.x
- 5.7.x
- 8.0.x
## Server
Server package supplies a framework to implement a simple MySQL server which can handle the packets from the MySQL client.
You can use it to build your own MySQL proxy.
You can use it to build your own MySQL proxy. The server connection is compatible with MySQL 5.5, 5.6, 5.7, and 8.0 versions,
so that most MySQL clients should be able to connect to the Server without modifications.
### Example
@ -173,14 +188,14 @@ l, _ := net.Listen("tcp", "127.0.0.1:4000")
c, _ := l.Accept()
// Create a connection with user root and an empty passowrd
// We only an empty handler to handle command too
// Create a connection with user root and an empty password.
// You can use your own handler to handle command here.
conn, _ := server.NewConn(c, "root", "", server.EmptyHandler{})
for {
conn.HandleCommand()
}
```
```
Another shell
@ -189,6 +204,15 @@ mysql -h127.0.0.1 -P4000 -uroot -p
//Becuase empty handler does nothing, so here the MySQL client can only connect the proxy server. :-)
```
> ```NewConn()``` will use default server configurations:
> 1. automatically generate default server certificates and enable TLS/SSL support.
> 2. support three mainstream authentication methods **'mysql_native_password'**, **'caching_sha2_password'**, and **'sha256_password'**
> and use **'mysql_native_password'** as default.
> 3. use an in-memory user credential provider to store user and password.
>
> To customize server configurations, use ```NewServer()``` and create connection via ```NewCustomizedConn()```.
## Failover
Failover supports to promote a new master and let other slaves replicate from it automatically when the old master was down.
@ -205,10 +229,12 @@ Although there are many companies use MySQL 5.0 - 5.5, I think upgrade MySQL to
Driver is the package that you can use go-mysql with go database/sql like other drivers. A simple example:
```
package main
import (
"database/sql"
- "github.com/siddontang/go-mysql/driver"
_ "github.com/siddontang/go-mysql/driver"
)
func main() {
@ -221,6 +247,14 @@ func main() {
We pass all tests in https://github.com/bradfitz/go-sql-test using go-mysql driver. :-)
## Donate
If you like the project and want to buy me a cola, you can through:
|PayPal|微信|
|------|---|
|[![](https://www.paypalobjects.com/webstatic/paypalme/images/pp_logo_small.png)](https://paypal.me/siddontang)|[![](https://github.com/siddontang/blog/blob/master/donate/weixin.png)|
## Feedback
go-mysql is still in development, your feedback is very welcome.

View File

@ -1,112 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"database/sql/driver"
"io"
)
type mysqlField struct {
tableName string
name string
flags fieldFlag
fieldType byte
decimals byte
}
type mysqlRows struct {
mc *mysqlConn
columns []mysqlField
}
type binaryRows struct {
mysqlRows
}
type textRows struct {
mysqlRows
}
type emptyRows struct{}
func (rows *mysqlRows) Columns() []string {
columns := make([]string, len(rows.columns))
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
for i := range columns {
if tableName := rows.columns[i].tableName; len(tableName) > 0 {
columns[i] = tableName + "." + rows.columns[i].name
} else {
columns[i] = rows.columns[i].name
}
}
} else {
for i := range columns {
columns[i] = rows.columns[i].name
}
}
return columns
}
func (rows *mysqlRows) Close() error {
mc := rows.mc
if mc == nil {
return nil
}
if mc.netConn == nil {
return ErrInvalidConn
}
// Remove unread packets from stream
err := mc.readUntilEOF()
if err == nil {
if err = mc.discardResults(); err != nil {
return err
}
}
rows.mc = nil
return err
}
func (rows *binaryRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
if mc.netConn == nil {
return ErrInvalidConn
}
// Fetch next row from stream
return rows.readRow(dest)
}
return io.EOF
}
func (rows *textRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
if mc.netConn == nil {
return ErrInvalidConn
}
// Fetch next row from stream
return rows.readRow(dest)
}
return io.EOF
}
func (rows emptyRows) Columns() []string {
return nil
}
func (rows emptyRows) Close() error {
return nil
}
func (rows emptyRows) Next(dest []driver.Value) error {
return io.EOF
}

Some files were not shown because too many files have changed in this diff Show More