Merge branch 'master' into hidden-password-cmdline

This commit is contained in:
Tim Vaillancourt 2021-06-21 05:03:01 +02:00 committed by GitHub
commit 7b3d688450
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 698 additions and 118 deletions

View File

@ -10,10 +10,10 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Set up Go 1.14
- name: Set up Go 1.16
uses: actions/setup-go@v1
with:
go-version: 1.14
go-version: 1.16
- name: Build
run: script/cibuild

View File

@ -6,14 +6,19 @@ jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
version: [mysql-5.5.62,mysql-5.6.43,mysql-5.7.25,mysql-8.0.16]
steps:
- uses: actions/checkout@v2
- name: Set up Go 1.14
- name: Set up Go 1.16
uses: actions/setup-go@v1
with:
go-version: 1.14
go-version: 1.16
- name: migration tests
env:
TEST_MYSQL_VERSION: ${{ matrix.version }}
run: script/cibuild-gh-ost-replica-tests

View File

@ -1,6 +1,4 @@
#
FROM golang:1.14.7
FROM golang:1.16.4
RUN apt-get update
RUN apt-get install -y ruby ruby-dev rubygems build-essential

View File

@ -1,4 +1,4 @@
FROM golang:1.14.7
FROM golang:1.16.4
LABEL maintainer="github@github.com"
RUN apt-get update

View File

@ -1,6 +1,6 @@
# gh-ost
[![build status](https://travis-ci.org/github/gh-ost.svg)](https://travis-ci.org/github/gh-ost) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
[![ci](https://github.com/github/gh-ost/actions/workflows/ci.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/ci.yml) [![replica-tests](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
#### GitHub's online schema migration for MySQL <img src="doc/images/gh-ost-logo-light-160.png" align="right">
@ -65,6 +65,7 @@ Also see:
- [the fine print](doc/the-fine-print.md)
- [Community questions](https://github.com/github/gh-ost/issues?q=label%3Aquestion)
- [Using `gh-ost` on AWS RDS](doc/rds.md)
- [Using `gh-ost` on Azure Database for MySQL](doc/azure.md)
## What's in a name?
@ -94,7 +95,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
`gh-ost` is a Go project; it is built with Go `1.14` and above. To build on your own, use either:
`gh-ost` is a Go project; it is built with Go `1.15` and above. To build on your own, use either:
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`

View File

@ -1 +1 @@
1.1.0
1.1.2

View File

@ -18,15 +18,16 @@ function build {
GOOS=$3
GOARCH=$4
if ! go version | egrep -q 'go(1\.1[456])' ; then
echo "go version must be 1.14 or above"
if ! go version | egrep -q 'go(1\.1[56])' ; then
echo "go version must be 1.15 or above"
exit 1
fi
# TODO: remove GO111MODULE once gh-ost uses Go modules
echo "Building ${osname} binary"
export GOOS
export GOARCH
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
GO111MODULE=off go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
if [ $? -ne 0 ]; then
echo "Build failed for ${osname}"
@ -40,7 +41,7 @@ function build {
builddir=$(setuptree)
cp $buildpath/$target $builddir/gh-ost/usr/bin
cd $buildpath
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
fi
}

26
doc/azure.md Normal file
View File

@ -0,0 +1,26 @@
`gh-ost` has been updated to work with Azure Database for MySQL however due to GitHub does not use it, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
# Azure Database for MySQL
## Limitations
- `gh-ost` runs should be setup use [`--assume-rbr`][assume_rbr_docs] and use `binlog_row_image=FULL`.
- Azure Database for MySQL does not use same user name suffix for master and replica, so master host, user and password need to be pointed out.
## Step
1. Change the replica server's `binlog_row_image` from `MINIMAL` to `FULL`. See [guide](https://docs.microsoft.com/en-us/azure/mysql/howto-server-parameters) on Azure document.
2. Use your `gh-ost` always with additional 5 parameter
```{bash}
gh-ost \
--azure \
--assume-master-host=master-server-dns-name \
--master-user="master-user-name" \
--master-password="master-password" \
--assume-rbr \
[-- other paramters you need]
```
[new_issue]: https://github.com/github/gh-ost/issues/new
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
[migrate_test_on_replica_docs]: https://github.com/github/gh-ost/blob/master/doc/cheatsheet.md#c-migratetest-on-replica

View File

@ -6,6 +6,10 @@ A more in-depth discussion of various `gh-ost` command line flags: implementatio
Add this flag when executing on Aliyun RDS.
### azure
Add this flag when executing on Azure Database for MySQL.
### allow-master-master
See [`--assume-master-host`](#assume-master-host).
@ -177,6 +181,9 @@ Optionally involve the process ID, for example: `--replica-server-id=$((10000000
It's on you to choose a number that does not collide with another `gh-ost` or another running replica.
See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet.
### serve-socket-file
Defaults to an auto-determined and advertised upon startup file. Defines Unix socket file to serve on.
### skip-foreign-key-checks
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.

View File

@ -66,6 +66,7 @@ The following variables are available on all hooks:
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
- `GH_OST_HEARTBEAT_LAG` - lag in seconds (floating point) of heartbeat
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
- `GH_OST_MIGRATED_HOST`
- `GH_OST_INSPECTED_HOST`

View File

@ -18,6 +18,8 @@ Both interfaces may serve at the same time. Both respond to simple text command,
- `status`: returns a detailed status summary of migration progress and configuration
- `sup`: returns a brief status summary of migration progress
- `coordinates`: returns recent (though not exactly up to date) binary log coordinates of the inspected server
- `applier`: returns the hostname of the applier
- `inspector`: returns the hostname of the inspector
- `chunk-size=<newsize>`: modify the `chunk-size`; applies on next running copy-iteration
- `dml-batch-size=<newsize>`: modify the `dml-batch-size`; applies on next applying of binary log events
- `max-lag-millis=<max-lag>`: modify the maximum replication lag threshold (milliseconds, minimum value is `100`, i.e. `0.1` second)

View File

@ -41,6 +41,7 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- Amazon RDS works, but has its own [limitations](rds.md).
- Google Cloud SQL works, `--gcp` flag required.
- Aliyun RDS works, `--aliyun-rds` flag required.
- Azure Database for MySQL works, `--azure` flag required, and have detailed document about it. (azure.md)
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)

View File

@ -52,6 +52,7 @@ const (
const (
HTTPStatusOK = 200
MaxEventsBatchSize = 1000
ETAUnknown = math.MinInt64
)
var (
@ -97,6 +98,7 @@ type MigrationContext struct {
DiscardForeignKeys bool
AliyunRDS bool
GoogleCloudPlatform bool
AzureMySQL bool
config ContextConfig
configMutex *sync.Mutex
@ -177,8 +179,11 @@ type MigrationContext struct {
RenameTablesEndTime time.Time
pointOfInterestTime time.Time
pointOfInterestTimeMutex *sync.Mutex
lastHeartbeatOnChangelogTime time.Time
lastHeartbeatOnChangelogMutex *sync.Mutex
CurrentLag int64
currentProgress uint64
etaNanoseonds int64
ThrottleHTTPStatusCode int64
controlReplicasLagResult mysql.ReplicationLagResult
TotalRowsCopied int64
@ -203,6 +208,7 @@ type MigrationContext struct {
OriginalTableColumns *sql.ColumnList
OriginalTableVirtualColumns *sql.ColumnList
OriginalTableUniqueKeys [](*sql.UniqueKey)
OriginalTableAutoIncrement uint64
GhostTableColumns *sql.ColumnList
GhostTableVirtualColumns *sql.ColumnList
GhostTableUniqueKeys [](*sql.UniqueKey)
@ -263,6 +269,7 @@ func NewMigrationContext() *MigrationContext {
MaxLagMillisecondsThrottleThreshold: 1500,
CutOverLockTimeoutSeconds: 3,
DMLBatchSize: 10,
etaNanoseonds: ETAUnknown,
maxLoad: NewLoadMap(),
criticalLoad: NewLoadMap(),
throttleMutex: &sync.Mutex{},
@ -270,6 +277,7 @@ func NewMigrationContext() *MigrationContext {
throttleControlReplicaKeys: mysql.NewInstanceKeyMap(),
configMutex: &sync.Mutex{},
pointOfInterestTimeMutex: &sync.Mutex{},
lastHeartbeatOnChangelogMutex: &sync.Mutex{},
ColumnRenameMap: make(map[string]string),
PanicAbort: make(chan error),
Log: NewDefaultLogger(),
@ -453,6 +461,10 @@ func (this *MigrationContext) MarkRowCopyEndTime() {
this.RowCopyEndTime = time.Now()
}
func (this *MigrationContext) TimeSinceLastHeartbeatOnChangelog() time.Duration {
return time.Since(this.GetLastHeartbeatOnChangelogTime())
}
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
}
@ -465,6 +477,22 @@ func (this *MigrationContext) SetProgressPct(progressPct float64) {
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
}
func (this *MigrationContext) GetETADuration() time.Duration {
return time.Duration(atomic.LoadInt64(&this.etaNanoseonds))
}
func (this *MigrationContext) SetETADuration(etaDuration time.Duration) {
atomic.StoreInt64(&this.etaNanoseonds, etaDuration.Nanoseconds())
}
func (this *MigrationContext) GetETASeconds() int64 {
nano := atomic.LoadInt64(&this.etaNanoseonds)
if nano < 0 {
return ETAUnknown
}
return nano / int64(time.Second)
}
// math.Float64bits([f=0..100])
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
@ -492,6 +520,20 @@ func (this *MigrationContext) TimeSincePointOfInterest() time.Duration {
return time.Since(this.pointOfInterestTime)
}
func (this *MigrationContext) SetLastHeartbeatOnChangelogTime(t time.Time) {
this.lastHeartbeatOnChangelogMutex.Lock()
defer this.lastHeartbeatOnChangelogMutex.Unlock()
this.lastHeartbeatOnChangelogTime = t
}
func (this *MigrationContext) GetLastHeartbeatOnChangelogTime() time.Time {
this.lastHeartbeatOnChangelogMutex.Lock()
defer this.lastHeartbeatOnChangelogMutex.Unlock()
return this.lastHeartbeatOnChangelogTime
}
func (this *MigrationContext) SetHeartbeatIntervalMilliseconds(heartbeatIntervalMilliseconds int64) {
if heartbeatIntervalMilliseconds < 100 {
heartbeatIntervalMilliseconds = 100

View File

@ -15,6 +15,7 @@ import (
gosql "database/sql"
"github.com/ErikDubbelboer/gspt"
"github.com/github/gh-ost/go/mysql"
)
@ -64,7 +65,7 @@ func StringContainsAll(s string, substrings ...string) bool {
return nonEmptyStringsFound
}
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext) (string, error) {
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext, name string) (string, error) {
versionQuery := `select @@global.version`
var port, extraPort int
var version string
@ -77,7 +78,8 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
}
// AliyunRDS set users port to "NULL", replace it by gh-ost param
// GCP set users port to "NULL", replace it by gh-ost param
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform {
// Azure MySQL set users port to a different value by design, replace it by gh-ost para
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform || migrationContext.AzureMySQL {
port = connectionConfig.Key.Port
} else {
portQuery := `select @@global.port`
@ -87,7 +89,7 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
}
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
migrationContext.Log.Infof("connection validated on %+v", connectionConfig.Key)
migrationContext.Log.Infof("%s connection validated on %+v", name, connectionConfig.Key)
return version, nil
} else if extraPort == 0 {
return "", fmt.Errorf("Unexpected database port reported: %+v", port)

View File

@ -79,6 +79,7 @@ func main() {
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
flag.BoolVar(&migrationContext.AzureMySQL, "azure", false, "set to 'true' when you execute on Azure Database on MySQL.")
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")

View File

@ -17,6 +17,7 @@ import (
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/sqlutils"
"sync"
)
const (
@ -56,6 +57,7 @@ type Applier struct {
singletonDB *gosql.DB
migrationContext *base.MigrationContext
finishedMigrating int64
name string
}
func NewApplier(migrationContext *base.MigrationContext) *Applier {
@ -63,6 +65,7 @@ func NewApplier(migrationContext *base.MigrationContext) *Applier {
connectionConfig: migrationContext.ApplierConnectionConfig,
migrationContext: migrationContext,
finishedMigrating: 0,
name: "applier",
}
}
@ -77,18 +80,18 @@ func (this *Applier) InitDBConnections() (err error) {
return err
}
this.singletonDB.SetMaxOpenConns(1)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
if err != nil {
return err
}
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext); err != nil {
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext, this.name); err != nil {
return err
}
this.migrationContext.ApplierMySQLVersion = version
if err := this.validateAndReadTimeZone(); err != nil {
return err
}
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
@ -204,6 +207,25 @@ func (this *Applier) AlterGhost() error {
return nil
}
// AlterGhost applies `alter` statement on ghost table
func (this *Applier) AlterGhostAutoIncrement() error {
query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s AUTO_INCREMENT=%d`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
this.migrationContext.OriginalTableAutoIncrement,
)
this.migrationContext.Log.Infof("Altering ghost table AUTO_INCREMENT value %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
this.migrationContext.Log.Debugf("AUTO_INCREMENT ALTER statement: %s", query)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
this.migrationContext.Log.Infof("Ghost table AUTO_INCREMENT altered")
return nil
}
// CreateChangelogTable creates the changelog table on the applier host
func (this *Applier) CreateChangelogTable() error {
if err := this.DropChangelogTable(); err != nil {
@ -787,7 +809,7 @@ func (this *Applier) CreateAtomicCutOverSentryTable() error {
}
// AtomicCutOverMagicLock
func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocked chan<- error, okToUnlockTable <-chan bool, tableUnlocked chan<- error) error {
func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocked chan<- error, okToUnlockTable <-chan bool, tableUnlocked chan<- error, dropCutOverSentryTableOnce *sync.Once) error {
tx, err := this.db.Begin()
if err != nil {
tableLocked <- err
@ -865,10 +887,13 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetOldTableName()),
)
if _, err := tx.Exec(query); err != nil {
this.migrationContext.Log.Errore(err)
// We DO NOT return here because we must `UNLOCK TABLES`!
}
dropCutOverSentryTableOnce.Do(func() {
if _, err := tx.Exec(query); err != nil {
this.migrationContext.Log.Errore(err)
// We DO NOT return here because we must `UNLOCK TABLES`!
}
})
// Tables still locked
this.migrationContext.Log.Infof("Releasing lock from %s.%s, %s.%s",

View File

@ -64,7 +64,9 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
env = append(env, fmt.Sprintf("GH_OST_HEARTBEAT_LAG=%f", this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds()))
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
env = append(env, fmt.Sprintf("GH_OST_ETA_SECONDS=%d", this.migrationContext.GetETASeconds()))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))

View File

@ -29,12 +29,14 @@ type Inspector struct {
db *gosql.DB
informationSchemaDb *gosql.DB
migrationContext *base.MigrationContext
name string
}
func NewInspector(migrationContext *base.MigrationContext) *Inspector {
return &Inspector{
connectionConfig: migrationContext.InspectorConnectionConfig,
migrationContext: migrationContext,
name: "inspector",
}
}
@ -52,7 +54,7 @@ func (this *Inspector) InitDBConnections() (err error) {
if err := this.validateConnection(); err != nil {
return err
}
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
@ -109,6 +111,10 @@ func (this *Inspector) InspectOriginalTable() (err error) {
if err != nil {
return err
}
this.migrationContext.OriginalTableAutoIncrement, err = this.getAutoIncrementValue(this.migrationContext.OriginalTableName)
if err != nil {
return err
}
return nil
}
@ -181,9 +187,17 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
if column.Name == mappedColumn.Name && column.Type == sql.DateTimeColumnType && mappedColumn.Type == sql.TimestampColumnType {
this.migrationContext.MappedSharedColumns.SetConvertDatetimeToTimestamp(column.Name, this.migrationContext.ApplierTimeZone)
}
if column.Name == mappedColumn.Name && column.Type == sql.EnumColumnType && mappedColumn.Charset != "" {
this.migrationContext.MappedSharedColumns.SetEnumToTextConversion(column.Name)
this.migrationContext.MappedSharedColumns.SetEnumValues(column.Name, column.EnumValues)
}
}
for _, column := range this.migrationContext.UniqueKey.Columns.Columns() {
if this.migrationContext.GhostTableVirtualColumns.GetColumn(column.Name) != nil {
// this is a virtual column
continue
}
if this.migrationContext.MappedSharedColumns.HasTimezoneConversion(column.Name) {
return fmt.Errorf("No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key. Column: %s, key: %s", column.Name, this.migrationContext.UniqueKey.Name)
}
@ -198,7 +212,7 @@ func (this *Inspector) validateConnection() error {
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
}
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
this.migrationContext.InspectorMySQLVersion = version
return err
}
@ -526,7 +540,7 @@ func (this *Inspector) CountTableRows() error {
this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
query := fmt.Sprintf(`select /* gh-ost */ count(*) as rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
var rowsEstimate int64
if err := this.db.QueryRow(query).Scan(&rowsEstimate); err != nil {
return err
@ -553,6 +567,7 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
columnName := m.GetString("COLUMN_NAME")
columnType := m.GetString("COLUMN_TYPE")
columnOctetLength := m.GetUint("CHARACTER_OCTET_LENGTH")
for _, columnsList := range columnsLists {
column := columnsList.GetColumn(columnName)
if column == nil {
@ -579,6 +594,11 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
}
if strings.HasPrefix(columnType, "enum") {
column.Type = sql.EnumColumnType
column.EnumValues = sql.ParseEnumValues(m.GetString("COLUMN_TYPE"))
}
if strings.HasPrefix(columnType, "binary") {
column.Type = sql.BinaryColumnType
column.BinaryOctetLength = columnOctetLength
}
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
column.Charset = charset
@ -589,6 +609,24 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
return err
}
// getAutoIncrementValue get's the original table's AUTO_INCREMENT value, if exists (0 value if not exists)
func (this *Inspector) getAutoIncrementValue(tableName string) (autoIncrement uint64, err error) {
query := `
SELECT
AUTO_INCREMENT
FROM INFORMATION_SCHEMA.TABLES
WHERE
TABLES.TABLE_SCHEMA = ?
AND TABLES.TABLE_NAME = ?
AND AUTO_INCREMENT IS NOT NULL
`
err = sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
autoIncrement = m.GetUint64("AUTO_INCREMENT")
return nil
}, this.migrationContext.DatabaseName, tableName)
return autoIncrement, err
}
// getCandidateUniqueKeys investigates a table and returns the list of unique keys
// candidate for chunking
func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) {

View File

@ -11,6 +11,7 @@ import (
"math"
"os"
"strings"
"sync"
"sync/atomic"
"time"
@ -206,12 +207,20 @@ func (this *Migrator) canStopStreaming() bool {
return atomic.LoadInt64(&this.migrationContext.CutOverCompleteFlag) != 0
}
// onChangelogStateEvent is called when a binlog event operation on the changelog table is intercepted.
func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
// onChangelogEvent is called when a binlog event operation on the changelog table is intercepted.
func (this *Migrator) onChangelogEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
// Hey, I created the changelog table, I know the type of columns it has!
if hint := dmlEvent.NewColumnValues.StringColumn(2); hint != "state" {
switch hint := dmlEvent.NewColumnValues.StringColumn(2); hint {
case "state":
return this.onChangelogStateEvent(dmlEvent)
case "heartbeat":
return this.onChangelogHeartbeatEvent(dmlEvent)
default:
return nil
}
}
func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
changelogStateString := dmlEvent.NewColumnValues.StringColumn(3)
changelogState := ReadChangelogState(changelogStateString)
this.migrationContext.Log.Infof("Intercepted changelog state %s", changelogState)
@ -244,6 +253,18 @@ func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (er
return nil
}
func (this *Migrator) onChangelogHeartbeatEvent(dmlEvent *binlog.BinlogDMLEvent) (err error) {
changelogHeartbeatString := dmlEvent.NewColumnValues.StringColumn(3)
heartbeatTime, err := time.Parse(time.RFC3339Nano, changelogHeartbeatString)
if err != nil {
return this.migrationContext.Log.Errore(err)
} else {
this.migrationContext.SetLastHeartbeatOnChangelogTime(heartbeatTime)
return nil
}
}
// listenOnPanicAbort aborts on abort request
func (this *Migrator) listenOnPanicAbort() {
err := <-this.migrationContext.PanicAbort
@ -475,6 +496,13 @@ func (this *Migrator) cutOver() (err error) {
this.migrationContext.Log.Debugf("checking for cut-over postpone")
this.sleepWhileTrue(
func() (bool, error) {
heartbeatLag := this.migrationContext.TimeSinceLastHeartbeatOnChangelog()
maxLagMillisecondsThrottle := time.Duration(atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)) * time.Millisecond
cutOverLockTimeout := time.Duration(this.migrationContext.CutOverLockTimeoutSeconds) * time.Second
if heartbeatLag > maxLagMillisecondsThrottle || heartbeatLag > cutOverLockTimeout {
this.migrationContext.Log.Debugf("current HeartbeatLag (%.2fs) is too high, it needs to be less than both --max-lag-millis (%.2fs) and --cut-over-lock-timeout-seconds (%.2fs) to continue", heartbeatLag.Seconds(), maxLagMillisecondsThrottle.Seconds(), cutOverLockTimeout.Seconds())
return true, nil
}
if this.migrationContext.PostponeCutOverFlagFile == "" {
return false, nil
}
@ -606,9 +634,12 @@ func (this *Migrator) atomicCutOver() (err error) {
defer atomic.StoreInt64(&this.migrationContext.InCutOverCriticalSectionFlag, 0)
okToUnlockTable := make(chan bool, 4)
var dropCutOverSentryTableOnce sync.Once
defer func() {
okToUnlockTable <- true
this.applier.DropAtomicCutOverSentryTableIfExists()
dropCutOverSentryTableOnce.Do(func() {
this.applier.DropAtomicCutOverSentryTableIfExists()
})
}()
atomic.StoreInt64(&this.migrationContext.AllEventsUpToLockProcessedInjectedFlag, 0)
@ -617,7 +648,7 @@ func (this *Migrator) atomicCutOver() (err error) {
tableLocked := make(chan error, 2)
tableUnlocked := make(chan error, 2)
go func() {
if err := this.applier.AtomicCutOverMagicLock(lockOriginalSessionIdChan, tableLocked, okToUnlockTable, tableUnlocked); err != nil {
if err := this.applier.AtomicCutOverMagicLock(lockOriginalSessionIdChan, tableLocked, okToUnlockTable, tableUnlocked, &dropCutOverSentryTableOnce); err != nil {
this.migrationContext.Log.Errore(err)
}
}()
@ -737,7 +768,7 @@ func (this *Migrator) initiateInspector() (err error) {
this.migrationContext.Log.Infof("Master found to be %+v", *this.migrationContext.ApplierConnectionConfig.ImpliedKey)
} else {
// Forced master host.
key, err := mysql.ParseRawInstanceKeyLoose(this.migrationContext.AssumeMasterHostname)
key, err := mysql.ParseInstanceKey(this.migrationContext.AssumeMasterHostname)
if err != nil {
return err
}
@ -908,20 +939,29 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
}
var etaSeconds float64 = math.MaxFloat64
eta := "N/A"
var etaDuration = time.Duration(base.ETAUnknown)
if progressPct >= 100.0 {
eta = "due"
etaDuration = 0
} else if progressPct >= 0.1 {
elapsedRowCopySeconds := this.migrationContext.ElapsedRowCopyTime().Seconds()
totalExpectedSeconds := elapsedRowCopySeconds * float64(rowsEstimate) / float64(totalRowsCopied)
etaSeconds = totalExpectedSeconds - elapsedRowCopySeconds
if etaSeconds >= 0 {
etaDuration := time.Duration(etaSeconds) * time.Second
eta = base.PrettifyDurationOutput(etaDuration)
etaDuration = time.Duration(etaSeconds) * time.Second
} else {
eta = "due"
etaDuration = 0
}
}
this.migrationContext.SetETADuration(etaDuration)
var eta string
switch etaDuration {
case 0:
eta = "due"
case time.Duration(base.ETAUnknown):
eta = "N/A"
default:
eta = base.PrettifyDurationOutput(etaDuration)
}
state := "migrating"
if atomic.LoadInt64(&this.migrationContext.CountingRowsFlag) > 0 && !this.migrationContext.ConcurrentCountTableRows {
@ -958,13 +998,14 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
currentBinlogCoordinates := *this.eventsStreamer.GetCurrentBinlogCoordinates()
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, State: %s; ETA: %s",
status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, HeartbeatLag: %.2fs, State: %s; ETA: %s",
totalRowsCopied, rowsEstimate, progressPct,
atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied),
len(this.applyEventsQueue), cap(this.applyEventsQueue),
base.PrettifyDurationOutput(elapsedTime), base.PrettifyDurationOutput(this.migrationContext.ElapsedRowCopyTime()),
currentBinlogCoordinates,
this.migrationContext.GetCurrentLagDuration().Seconds(),
this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds(),
state,
eta,
)
@ -991,7 +1032,7 @@ func (this *Migrator) initiateStreaming() error {
this.migrationContext.DatabaseName,
this.migrationContext.GetChangelogTableName(),
func(dmlEvent *binlog.BinlogDMLEvent) error {
return this.onChangelogStateEvent(dmlEvent)
return this.onChangelogEvent(dmlEvent)
},
)
@ -1068,6 +1109,14 @@ func (this *Migrator) initiateApplier() error {
return err
}
if this.migrationContext.OriginalTableAutoIncrement > 0 && !this.parser.IsAutoIncrementDefined() {
// Original table has AUTO_INCREMENT value and the -alter statement does not indicate any override,
// so we should copy AUTO_INCREMENT value onto our ghost table.
if err := this.applier.AlterGhostAutoIncrement(); err != nil {
this.migrationContext.Log.Errorf("Unable to ALTER ghost table AUTO_INCREMENT value, see further error details. Bailing out")
return err
}
}
this.applier.WriteChangelogState(string(GhostTableMigrated))
go this.applier.InitiateHeartbeat()
return nil

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2021 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -146,7 +146,9 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
fmt.Fprint(writer, `available commands:
status # Print a detailed status message
sup # Print a short status message
coordinates # Print the currently inspected coordinates
coordinates # Print the currently inspected coordinates
applier # Print the hostname of the applier
inspector # Print the hostname of the inspector
chunk-size=<newsize> # Set a new chunk-size
dml-batch-size=<newsize> # Set a new dml-batch-size
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
@ -177,6 +179,22 @@ help # This message
}
return NoPrintStatusRule, fmt.Errorf("coordinates are read-only")
}
case "applier":
if this.migrationContext.ApplierConnectionConfig != nil && this.migrationContext.ApplierConnectionConfig.ImpliedKey != nil {
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
this.migrationContext.ApplierConnectionConfig.ImpliedKey.String(),
this.migrationContext.ApplierMySQLVersion,
)
}
return NoPrintStatusRule, nil
case "inspector":
if this.migrationContext.InspectorConnectionConfig != nil && this.migrationContext.InspectorConnectionConfig.ImpliedKey != nil {
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
this.migrationContext.InspectorConnectionConfig.ImpliedKey.String(),
this.migrationContext.InspectorMySQLVersion,
)
}
return NoPrintStatusRule, nil
case "chunk-size":
{
if argIsQuestion {

View File

@ -42,6 +42,7 @@ type EventsStreamer struct {
listenersMutex *sync.Mutex
eventsChannel chan *binlog.BinlogEntry
binlogReader *binlog.GoMySQLReader
name string
}
func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer {
@ -51,6 +52,7 @@ func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer
listeners: [](*BinlogEventListener){},
listenersMutex: &sync.Mutex{},
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
name: "streamer",
}
}
@ -106,7 +108,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
return err
}
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext); err != nil {
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name); err != nil {
return err
}
if err := this.readCurrentBinlogCoordinates(); err != nil {

View File

@ -188,9 +188,12 @@ func (this *Throttler) collectControlReplicasLag() {
dbUri := connectionConfig.GetDBUri("information_schema")
var heartbeatValue string
if db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri); err != nil {
db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri)
if err != nil {
return lag, err
} else if err = db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
}
if err := db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
return lag, err
}

View File

@ -92,6 +92,7 @@ func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clien
}
this.tlsConfig = &tls.Config{
ServerName: this.Key.Hostname,
Certificates: certs,
RootCAs: rootCertPool,
InsecureSkipVerify: allowInsecure,

View File

@ -7,6 +7,7 @@ package mysql
import (
"fmt"
"regexp"
"strconv"
"strings"
)
@ -15,6 +16,13 @@ const (
DefaultInstancePort = 3306
)
var (
ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$")
ipv4HostRegexp = regexp.MustCompile("^([^:]+)$")
ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") // e.g. 2001:db8:1f70::999:de8:7648:6e8
)
// InstanceKey is an instance indicator, identified by hostname and port
type InstanceKey struct {
Hostname string
@ -25,25 +33,35 @@ const detachHint = "//"
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
func NewRawInstanceKey(hostPort string) (*InstanceKey, error) {
tokens := strings.SplitN(hostPort, ":", 2)
if len(tokens) != 2 {
return nil, fmt.Errorf("Cannot parse InstanceKey from %s. Expected format is host:port", hostPort)
hostname := ""
port := ""
if submatch := ipv4HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
port = submatch[2]
} else if submatch := ipv4HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
} else if submatch := ipv6HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
port = submatch[2]
} else if submatch := ipv6HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
} else {
return nil, fmt.Errorf("Cannot parse address: %s", hostPort)
}
instanceKey := &InstanceKey{Hostname: tokens[0]}
var err error
if instanceKey.Port, err = strconv.Atoi(tokens[1]); err != nil {
return instanceKey, fmt.Errorf("Invalid port: %s", tokens[1])
instanceKey := &InstanceKey{Hostname: hostname, Port: DefaultInstancePort}
if port != "" {
var err error
if instanceKey.Port, err = strconv.Atoi(port); err != nil {
return instanceKey, fmt.Errorf("Invalid port: %s", port)
}
}
return instanceKey, nil
}
// ParseRawInstanceKeyLoose will parse an InstanceKey from a string representation such as 127.0.0.1:3306.
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306.
// The port part is optional; there will be no name resolve
func ParseRawInstanceKeyLoose(hostPort string) (*InstanceKey, error) {
if !strings.Contains(hostPort, ":") {
return &InstanceKey{Hostname: hostPort, Port: DefaultInstancePort}, nil
}
func ParseInstanceKey(hostPort string) (*InstanceKey, error) {
return NewRawInstanceKey(hostPort)
}

View File

@ -92,7 +92,7 @@ func (this *InstanceKeyMap) ReadCommaDelimitedList(list string) error {
}
tokens := strings.Split(list, ",")
for _, token := range tokens {
key, err := ParseRawInstanceKeyLoose(token)
key, err := ParseInstanceKey(token)
if err != nil {
return err
}

View File

@ -0,0 +1,74 @@
/*
Copyright 2016 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package mysql
import (
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
)
func init() {
log.SetLevel(log.ERROR)
}
func TestParseInstanceKey(t *testing.T) {
{
key, err := ParseInstanceKey("myhost:1234")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "myhost")
test.S(t).ExpectEquals(key.Port, 1234)
}
{
key, err := ParseInstanceKey("myhost")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "myhost")
test.S(t).ExpectEquals(key.Port, 3306)
}
{
key, err := ParseInstanceKey("10.0.0.3:3307")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "10.0.0.3")
test.S(t).ExpectEquals(key.Port, 3307)
}
{
key, err := ParseInstanceKey("10.0.0.3")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "10.0.0.3")
test.S(t).ExpectEquals(key.Port, 3306)
}
{
key, err := ParseInstanceKey("[2001:db8:1f70::999:de8:7648:6e8]:3308")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "2001:db8:1f70::999:de8:7648:6e8")
test.S(t).ExpectEquals(key.Port, 3308)
}
{
key, err := ParseInstanceKey("::1")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "::1")
test.S(t).ExpectEquals(key.Port, 3306)
}
{
key, err := ParseInstanceKey("0:0:0:0:0:0:0:0")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "0:0:0:0:0:0:0:0")
test.S(t).ExpectEquals(key.Port, 3306)
}
{
_, err := ParseInstanceKey("[2001:xxxx:1f70::999:de8:7648:6e8]:3308")
test.S(t).ExpectNotNil(err)
}
{
_, err := ParseInstanceKey("10.0.0.4:")
test.S(t).ExpectNotNil(err)
}
{
_, err := ParseInstanceKey("10.0.0.4:5.6.7")
test.S(t).ExpectNotNil(err)
}
}

View File

@ -18,8 +18,11 @@ import (
"github.com/outbrain/golib/sqlutils"
)
const MaxTableNameLength = 64
const MaxReplicationPasswordLength = 32
const (
MaxTableNameLength = 64
MaxReplicationPasswordLength = 32
MaxDBPoolConnections = 3
)
type ReplicationLagResult struct {
Key InstanceKey
@ -39,23 +42,22 @@ func (this *ReplicationLagResult) HasLag() bool {
var knownDBs map[string]*gosql.DB = make(map[string]*gosql.DB)
var knownDBsMutex = &sync.Mutex{}
func GetDB(migrationUuid string, mysql_uri string) (*gosql.DB, bool, error) {
func GetDB(migrationUuid string, mysql_uri string) (db *gosql.DB, exists bool, err error) {
cacheKey := migrationUuid + ":" + mysql_uri
knownDBsMutex.Lock()
defer func() {
knownDBsMutex.Unlock()
}()
defer knownDBsMutex.Unlock()
var exists bool
if _, exists = knownDBs[cacheKey]; !exists {
if db, err := gosql.Open("mysql", mysql_uri); err == nil {
knownDBs[cacheKey] = db
} else {
return db, exists, err
if db, exists = knownDBs[cacheKey]; !exists {
db, err = gosql.Open("mysql", mysql_uri)
if err != nil {
return nil, false, err
}
db.SetMaxOpenConns(MaxDBPoolConnections)
db.SetMaxIdleConns(MaxDBPoolConnections)
knownDBs[cacheKey] = db
}
return knownDBs[cacheKey], exists, nil
return db, exists, nil
}
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS

View File

@ -38,6 +38,8 @@ func buildColumnsPreparedValues(columns *ColumnList) []string {
var token string
if column.timezoneConversion != nil {
token = fmt.Sprintf("convert_tz(?, '%s', '%s')", column.timezoneConversion.ToTimezone, "+00:00")
} else if column.enumToTextConversion {
token = fmt.Sprintf("ELT(?, %s)", column.EnumValues)
} else if column.Type == JSONColumnType {
token = "convert(? using utf8mb4)"
} else {
@ -108,6 +110,8 @@ func BuildSetPreparedClause(columns *ColumnList) (result string, err error) {
var setToken string
if column.timezoneConversion != nil {
setToken = fmt.Sprintf("%s=convert_tz(?, '%s', '%s')", EscapeName(column.Name), column.timezoneConversion.ToTimezone, "+00:00")
} else if column.enumToTextConversion {
setToken = fmt.Sprintf("%s=ELT(?, %s)", EscapeName(column.Name), column.EnumValues)
} else if column.Type == JSONColumnType {
setToken = fmt.Sprintf("%s=convert(? using utf8mb4)", EscapeName(column.Name))
} else {
@ -396,7 +400,7 @@ func BuildDMLDeleteQuery(databaseName, tableName string, tableColumns, uniqueKey
}
for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(args[tableOrdinal])
arg := column.convertArg(args[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg)
}
databaseName = EscapeName(databaseName)
@ -433,7 +437,7 @@ func BuildDMLInsertQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(args[tableOrdinal])
arg := column.convertArg(args[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg)
}
@ -481,13 +485,13 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(valueArgs[tableOrdinal])
arg := column.convertArg(valueArgs[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg)
}
for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(whereArgs[tableOrdinal])
arg := column.convertArg(whereArgs[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg)
}

View File

@ -16,6 +16,7 @@ var (
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
autoIncrementRegexp = regexp.MustCompile(`(?i)\bauto_increment[\s]*=[\s]*([0-9]+)`)
alterTableExplicitSchemaTableRegexps = []*regexp.Regexp{
// ALTER TABLE `scm`.`tbl` something
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
@ -32,12 +33,14 @@ var (
// ALTER TABLE tbl something
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)\s+(.*$)`),
}
enumValuesRegexp = regexp.MustCompile("^enum[(](.*)[)]$")
)
type AlterTableParser struct {
columnRenameMap map[string]string
droppedColumns map[string]bool
isRenameTable bool
columnRenameMap map[string]string
droppedColumns map[string]bool
isRenameTable bool
isAutoIncrementDefined bool
alterStatementOptions string
alterTokens []string
@ -122,6 +125,12 @@ func (this *AlterTableParser) parseAlterToken(alterToken string) (err error) {
this.isRenameTable = true
}
}
{
// auto_increment
if autoIncrementRegexp.MatchString(alterToken) {
this.isAutoIncrementDefined = true
}
}
return nil
}
@ -173,6 +182,11 @@ func (this *AlterTableParser) DroppedColumnsMap() map[string]bool {
func (this *AlterTableParser) IsRenameTable() bool {
return this.isRenameTable
}
func (this *AlterTableParser) IsAutoIncrementDefined() bool {
return this.isAutoIncrementDefined
}
func (this *AlterTableParser) GetExplicitSchema() string {
return this.explicitSchema
}
@ -192,3 +206,10 @@ func (this *AlterTableParser) HasExplicitTable() bool {
func (this *AlterTableParser) GetAlterStatementOptions() string {
return this.alterStatementOptions
}
func ParseEnumValues(enumColumnType string) string {
if submatch := enumValuesRegexp.FindStringSubmatch(enumColumnType); len(submatch) > 0 {
return submatch[1]
}
return enumColumnType
}

View File

@ -24,6 +24,7 @@ func TestParseAlterStatement(t *testing.T) {
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
}
func TestParseAlterStatementTrivialRename(t *testing.T) {
@ -33,10 +34,31 @@ func TestParseAlterStatementTrivialRename(t *testing.T) {
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 1)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
}
func TestParseAlterStatementWithAutoIncrement(t *testing.T) {
statements := []string{
"auto_increment=7",
"auto_increment = 7",
"AUTO_INCREMENT = 71",
"add column t int, change ts ts timestamp, auto_increment=7 engine=innodb",
"add column t int, change ts ts timestamp, auto_increment =7 engine=innodb",
"add column t int, change ts ts timestamp, AUTO_INCREMENT = 7 engine=innodb",
"add column t int, change ts ts timestamp, engine=innodb auto_increment=73425",
}
for _, statement := range statements {
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectTrue(parser.IsAutoIncrementDefined())
}
}
func TestParseAlterStatementTrivialRenames(t *testing.T) {
statement := "add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb"
parser := NewAlterTableParser()
@ -44,6 +66,7 @@ func TestParseAlterStatementTrivialRenames(t *testing.T) {
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 2)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
test.S(t).ExpectEquals(parser.columnRenameMap["f"], "f")
@ -64,6 +87,7 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
renames := parser.GetNonTrivialRenames()
test.S(t).ExpectEquals(len(renames), 2)
@ -298,3 +322,21 @@ func TestParseAlterStatementExplicitTable(t *testing.T) {
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b", "add index idx(i)"}))
}
}
func TestParseEnumValues(t *testing.T) {
{
s := "enum('red','green','blue','orange')"
values := ParseEnumValues(s)
test.S(t).ExpectEquals(values, "'red','green','blue','orange'")
}
{
s := "('red','green','blue','orange')"
values := ParseEnumValues(s)
test.S(t).ExpectEquals(values, "('red','green','blue','orange')")
}
{
s := "zzz"
values := ParseEnumValues(s)
test.S(t).ExpectEquals(values, "zzz")
}
}

View File

@ -6,6 +6,7 @@
package sql
import (
"bytes"
"fmt"
"reflect"
"strconv"
@ -22,6 +23,7 @@ const (
MediumIntColumnType
JSONColumnType
FloatColumnType
BinaryColumnType
)
const maxMediumintUnsigned int32 = 16777215
@ -31,19 +33,37 @@ type TimezoneConversion struct {
}
type Column struct {
Name string
IsUnsigned bool
Charset string
Type ColumnType
timezoneConversion *TimezoneConversion
Name string
IsUnsigned bool
Charset string
Type ColumnType
EnumValues string
timezoneConversion *TimezoneConversion
enumToTextConversion bool
// add Octet length for binary type, fix bytes with suffix "00" get clipped in mysql binlog.
// https://github.com/github/gh-ost/issues/909
BinaryOctetLength uint
}
func (this *Column) convertArg(arg interface{}) interface{} {
func (this *Column) convertArg(arg interface{}, isUniqueKeyColumn bool) interface{} {
if s, ok := arg.(string); ok {
// string, charset conversion
if encoding, ok := charsetEncodingMap[this.Charset]; ok {
arg, _ = encoding.NewDecoder().String(s)
}
if this.Type == BinaryColumnType && isUniqueKeyColumn {
arg2Bytes := []byte(arg.(string))
size := len(arg2Bytes)
if uint(size) < this.BinaryOctetLength {
buf := bytes.NewBuffer(arg2Bytes)
for i := uint(0); i < (this.BinaryOctetLength - uint(size)); i++ {
buf.Write([]byte{0})
}
arg = buf.String()
}
}
return arg
}
@ -179,6 +199,18 @@ func (this *ColumnList) HasTimezoneConversion(columnName string) bool {
return this.GetColumn(columnName).timezoneConversion != nil
}
func (this *ColumnList) SetEnumToTextConversion(columnName string) {
this.GetColumn(columnName).enumToTextConversion = true
}
func (this *ColumnList) IsEnumToTextConversion(columnName string) bool {
return this.GetColumn(columnName).enumToTextConversion
}
func (this *ColumnList) SetEnumValues(columnName string, enumValues string) {
this.GetColumn(columnName).EnumValues = enumValues
}
func (this *ColumnList) String() string {
return strings.Join(this.Names(), ",")
}

View File

@ -0,0 +1,17 @@
drop event if exists gh_ost_test;
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (NULL, 11);
insert into gh_ost_test values (NULL, 13);
insert into gh_ost_test values (NULL, 17);
insert into gh_ost_test values (NULL, 23);
insert into gh_ost_test values (NULL, 29);
insert into gh_ost_test values (NULL, 31);
insert into gh_ost_test values (NULL, 37);
delete from gh_ost_test where id>=5;

View File

@ -0,0 +1 @@
AUTO_INCREMENT=7

View File

@ -0,0 +1 @@
--alter='AUTO_INCREMENT=7'

View File

@ -0,0 +1,17 @@
drop event if exists gh_ost_test;
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (NULL, 11);
insert into gh_ost_test values (NULL, 13);
insert into gh_ost_test values (NULL, 17);
insert into gh_ost_test values (NULL, 23);
insert into gh_ost_test values (NULL, 29);
insert into gh_ost_test values (NULL, 31);
insert into gh_ost_test values (NULL, 37);
delete from gh_ost_test where id>=5;

View File

@ -0,0 +1 @@
AUTO_INCREMENT=8

View File

@ -0,0 +1,13 @@
drop event if exists gh_ost_test;
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (NULL, 11);
insert into gh_ost_test values (NULL, 13);
insert into gh_ost_test values (NULL, 17);
insert into gh_ost_test values (NULL, 23);

View File

@ -0,0 +1 @@
AUTO_INCREMENT=5

View File

@ -0,0 +1,26 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
e enum('red', 'green', 'blue', 'orange') null default null collate 'utf8_bin',
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 7, 'red');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, 'red');
insert into gh_ost_test values (null, 13, 'green');
insert into gh_ost_test values (null, 17, 'blue');
set @last_insert_id := last_insert_id();
update gh_ost_test set e='orange' where id = @last_insert_id;
end ;;

View File

@ -0,0 +1 @@
--alter="change e e varchar(32) not null default ''"

View File

@ -0,0 +1,30 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
`idb` varchar(36) CHARACTER SET utf8mb4 GENERATED ALWAYS AS (json_unquote(json_extract(`jsonobj`,_utf8mb4'$._id'))) STORED NOT NULL,
`jsonobj` json NOT NULL,
PRIMARY KEY (`id`,`idb`)
) auto_increment=1;
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":2}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":3}');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":5}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":7}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":11}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":13}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":17}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":19}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":23}');
insert into gh_ost_test (id, jsonobj) values (null, '{"_id":27}');
end ;;

View File

@ -0,0 +1 @@
(5.5|5.6)

View File

@ -12,6 +12,7 @@ test_logfile=/tmp/gh-ost-test.log
default_ghost_binary=/tmp/gh-ost-test
ghost_binary=""
exec_command_file=/tmp/gh-ost-test.bash
ghost_structure_output_file=/tmp/gh-ost-test.ghost.structure.sql
orig_content_output_file=/tmp/gh-ost-test.orig.content.csv
ghost_content_output_file=/tmp/gh-ost-test.ghost.content.csv
throttle_flag_file=/tmp/gh-ost-test.ghost.throttle.flag
@ -204,6 +205,18 @@ test_single() {
return 1
fi
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "show create table _gh_ost_test_gho\G" -ss > $ghost_structure_output_file
if [ -f $tests_path/$test_name/expect_table_structure ] ; then
expected_table_structure="$(cat $tests_path/$test_name/expect_table_structure)"
if ! grep -q "$expected_table_structure" $ghost_structure_output_file ; then
echo
echo "ERROR $test_name: table structure was expected to include ${expected_table_structure} but did not. cat $ghost_structure_output_file:"
cat $ghost_structure_output_file
return 1
fi
fi
echo_dot
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test ${order_by}" -ss > $orig_content_output_file
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho ${order_by}" -ss > $ghost_content_output_file
@ -229,7 +242,10 @@ build_binary() {
echo "Using binary: $ghost_binary"
return 0
fi
go build -o $ghost_binary go/cmd/gh-ost/main.go
# TODO: remove GO111MODULE once gh-ost uses Go modules
GO111MODULE=off go build -o $ghost_binary go/cmd/gh-ost/main.go
if [ $? -ne 0 ] ; then
echo "Build failure"
exit 1

View File

@ -17,4 +17,5 @@ export GOPATH="$PWD/.gopath"
cd .gopath/src/github.com/github/gh-ost
# We put the binaries directly into the bindir, because we have no need for shim wrappers
go build -o "$bindir/gh-ost" -ldflags "-X main.AppVersion=${version} -X main.BuildDescribe=${describe}" ./go/cmd/gh-ost/main.go
# TODO: remove GO111MODULE once gh-ost uses Go modules
GO111MODULE=off go build -o "$bindir/gh-ost" -ldflags "-X main.AppVersion=${version} -X main.BuildDescribe=${describe}" ./go/cmd/gh-ost/main.go

View File

@ -30,8 +30,6 @@ cp ${tarball}.gz "$BUILD_ARTIFACT_DIR"/gh-ost/
### HACK HACK HACK HACK ###
# blame @carlosmn, @mattr and @timvaillancourt-
# Allow builds on buster to also be used for stretch + jessie
# Allow builds on buster to also be used for stretch
stretch_tarball_name=$(echo $(basename "${tarball}") | sed s/-buster-/-stretch-/)
jessie_tarball_name=$(echo $(basename "${stretch_tarball_name}") | sed s/-stretch-/-jessie-/)
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${stretch_tarball_name}.gz"
cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${jessie_tarball_name}.gz"

View File

@ -4,19 +4,25 @@ set -e
whoami
# Clone gh-ost-ci-env
# Only clone if not already running locally at latest commit
remote_commit=$(git ls-remote https://github.com/github/gh-ost-ci-env.git HEAD | cut -f1)
local_commit="unknown"
[ -d "gh-ost-ci-env" ] && local_commit=$(cd gh-ost-ci-env && git log --format="%H" -n 1)
fetch_ci_env() {
# Clone gh-ost-ci-env
# Only clone if not already running locally at latest commit
remote_commit=$(git ls-remote https://github.com/github/gh-ost-ci-env.git HEAD | cut -f1)
local_commit="unknown"
[ -d "gh-ost-ci-env" ] && local_commit=$(cd gh-ost-ci-env && git log --format="%H" -n 1)
echo "remote commit is: $remote_commit"
echo "local commit is: $local_commit"
echo "remote commit is: $remote_commit"
echo "local commit is: $local_commit"
if [ "$remote_commit" != "$local_commit" ] ; then
rm -rf ./gh-ost-ci-env
git clone https://github.com/github/gh-ost-ci-env.git
fi
if [ "$remote_commit" != "$local_commit" ] ; then
rm -rf ./gh-ost-ci-env
git clone https://github.com/github/gh-ost-ci-env.git
fi
}
test_dbdeployer() {
gh-ost-ci-env/bin/linux/dbdeployer --version
}
test_mysql_version() {
local mysql_version
@ -30,17 +36,18 @@ test_mysql_version() {
mkdir -p sandbox/binary
rm -rf sandbox/binary/*
gh-ost-ci-env/bin/linux/dbdeployer unpack gh-ost-ci-env/mysql-tarballs/"$mysql_version".tar.gz --unpack-version="$mysql_version" --sandbox-binary ${PWD}/sandbox/binary
gh-ost-ci-env/bin/linux/dbdeployer unpack gh-ost-ci-env/mysql-tarballs/"$mysql_version".tar.xz --sandbox-binary ${PWD}/sandbox/binary
mkdir -p sandboxes
rm -rf sandboxes/*
if echo "$mysql_version" | egrep "5[.]5[.]" ; then
local mysql_version_num=${mysql_version#*-}
if echo "$mysql_version_num" | egrep "5[.]5[.]" ; then
gtid=""
else
gtid="--gtid"
fi
gh-ost-ci-env/bin/linux/dbdeployer deploy replication "$mysql_version" --nodes 2 --sandbox-binary ${PWD}/sandbox/binary --sandbox-home ${PWD}/sandboxes ${gtid} --my-cnf-options log_slave_updates --my-cnf-options log_bin --my-cnf-options binlog_format=ROW --sandbox-directory rsandbox
gh-ost-ci-env/bin/linux/dbdeployer deploy replication "$mysql_version_num" --nodes 2 --sandbox-binary ${PWD}/sandbox/binary --sandbox-home ${PWD}/sandboxes ${gtid} --my-cnf-options log_slave_updates --my-cnf-options log_bin --my-cnf-options binlog_format=ROW --sandbox-directory rsandbox
sed '/sandboxes/d' -i gh-ost-ci-env/bin/gh-ost-test-mysql-master
echo 'sandboxes/rsandbox/m "$@"' >> gh-ost-ci-env/bin/gh-ost-test-mysql-master
@ -59,12 +66,26 @@ test_mysql_version() {
find sandboxes -name "stop_all" | bash
}
echo "Building..."
. script/build
# Test all versions:
find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.gz" | while read f ; do basename $f ".tar.gz" ; done | sort -r | while read mysql_version ; do
echo "found MySQL version: $mysql_version"
done
find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.gz" | while read f ; do basename $f ".tar.gz" ; done | sort -r | while read mysql_version ; do
test_mysql_version "$mysql_version"
done
main() {
fetch_ci_env
test_dbdeployer
echo "Building..."
. script/build
# TEST_MYSQL_VERSION is set by the replica-tests CI job
if [ -z "$TEST_MYSQL_VERSION" ]; then
# Test all versions:
find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.xz" | while read f ; do basename $f ".tar.xz" ; done | sort -r | while read mysql_version ; do
echo "found MySQL version: $mysql_version"
done
find gh-ost-ci-env/mysql-tarballs/ -name "*.tar.xz" | while read f ; do basename $f ".tar.xz" ; done | sort -r | while read mysql_version ; do
test_mysql_version "$mysql_version"
done
else
echo "found MySQL version: $TEST_MYSQL_VERSION"
test_mysql_version "$TEST_MYSQL_VERSION"
fi
}
main

View File

@ -1,7 +1,7 @@
#!/bin/bash
PREFERRED_GO_VERSION=go1.14.7
SUPPORTED_GO_VERSIONS='go1.1[456]'
PREFERRED_GO_VERSION=go1.16.4
SUPPORTED_GO_VERSIONS='go1.1[56]'
GO_PKG_DARWIN=${PREFERRED_GO_VERSION}.darwin-amd64.pkg
GO_PKG_DARWIN_SHA=0f215de06019a054a3da46a0722989986c956d719c7a0a8fc38a5f3c216d6f6b

View File

@ -13,5 +13,6 @@ script/build
cd .gopath/src/github.com/github/gh-ost
# TODO: remove GO111MODULE once gh-ost uses Go modules
echo "Running unit tests"
go test ./go/...
GO111MODULE=off go test ./go/...

View File

@ -5,7 +5,10 @@ retval=0
for testsuite in base mysql sql
do
pushd go/${testsuite} > /dev/null;
go test $*;
# TODO: remove GO111MODULE once gh-ost uses Go modules
GO111MODULE=off go test $*;
[ $? -ne 0 ] && retval=1
popd > /dev/null;
done

View File

@ -117,6 +117,19 @@ func (this *RowMap) GetUintD(key string, def uint) uint {
return uint(res)
}
func (this *RowMap) GetUint64(key string) uint64 {
res, _ := strconv.ParseUint(this.GetString(key), 10, 0)
return res
}
func (this *RowMap) GetUint64D(key string, def uint64) uint64 {
res, err := strconv.ParseUint(this.GetString(key), 10, 0)
if err != nil {
return def
}
return uint64(res)
}
func (this *RowMap) GetBool(key string) bool {
return this.GetInt(key) != 0
}