diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000..685afbe --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,21 @@ +name: golangci-lint +on: + push: + branches: + - master + pull_request: +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + with: + go-version: 1.16 + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 diff --git a/.github/workflows/replica-tests.yml b/.github/workflows/replica-tests.yml index f1c887c..360e13b 100644 --- a/.github/workflows/replica-tests.yml +++ b/.github/workflows/replica-tests.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - version: [mysql-5.5.62,mysql-5.6.43,mysql-5.7.25,mysql-8.0.16] + version: [mysql-5.7.25,mysql-8.0.16] steps: - uses: actions/checkout@v2 diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..4a487bd --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,12 @@ +run: + timeout: 5m + modules-download-mode: readonly + +linters: + disable: + - errcheck + - staticcheck + enable: + - gosimple + - govet + - unused diff --git a/build.sh b/build.sh index 6efa2d4..068bdfb 100755 --- a/build.sh +++ b/build.sh @@ -18,30 +18,31 @@ function build { GOOS=$3 GOARCH=$4 - if ! go version | egrep -q 'go(1\.1[56])' ; then + if ! go version | egrep -q 'go1\.(1[5-9]|[2-9][0-9]{1})' ; then echo "go version must be 1.15 or above" exit 1 fi - echo "Building ${osname} binary" + echo "Building ${osname}-${GOARCH} binary" export GOOS export GOARCH go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go if [ $? -ne 0 ]; then - echo "Build failed for ${osname}" + echo "Build failed for ${osname} ${GOARCH}." exit 1 fi - (cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target) + (cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${GOARCH}-${timestamp}.tar.gz $target) if [ "$GOOS" == "linux" ] ; then echo "Creating Distro full packages" builddir=$(setuptree) cp $buildpath/$target $builddir/gh-ost/usr/bin cd $buildpath - fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach ' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" . - fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach ' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files . + fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" --rpm-os linux . + fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files . + cd - fi } @@ -62,10 +63,15 @@ main() { mkdir -p ${buildpath} rm -rf ${buildpath:?}/* build GNU/Linux linux linux amd64 - # build macOS osx darwin amd64 + build GNU/Linux linux linux arm64 + build macOS osx darwin amd64 + build macOS osx darwin arm64 echo "Binaries found in:" find $buildpath/gh-ost* -type f -maxdepth 1 + + echo "Checksums:" + (cd $buildpath && shasum -a256 gh-ost* 2>/dev/null) } main "$@" diff --git a/doc/command-line-flags.md b/doc/command-line-flags.md index 62d3d11..417255a 100644 --- a/doc/command-line-flags.md +++ b/doc/command-line-flags.md @@ -22,7 +22,7 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c ### approve-renamed-columns -When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added. +When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise, the new structure may also look like some column was dropped and another was added. `gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`. @@ -32,7 +32,7 @@ If you think `gh-ost` is mistaken and that there's actually no _rename_ involved `gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in: -- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters and you prefer that it picks a specific one +- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters, and you prefer that it picks a specific one - _tungsten replicator_ topologies (together with [`--tungsten`](#tungsten)), where `gh-ost` is unable to crawl and detect the master ### assume-rbr @@ -61,7 +61,13 @@ Comma delimited status-name=threshold, same format as [`--max-load`](#max-load). `--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold. -This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10 hour migration. +This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10-hour migration. + +### critical-load-hibernate-seconds + +When `--critical-load-hibernate-seconds` is non-zero (e.g. `--critical-load-hibernate-seconds=300`), `critical-load` does not panic and bail out; instead, `gh-ost` goes into hibernation for the specified duration. It will not read/write anything from/to any server during this time. Execution then continues upon waking from hibernation. + +If `critical-load` is met again, `gh-ost` will repeat this cycle, and never panic and bail out. ### critical-load-interval-millis @@ -98,7 +104,7 @@ Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `g ### exact-rowcount -A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can, and often be, a large number. Exactly what that number is? +A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can and often will be, a large number. Exactly what that number is? `gh-ost` initially estimates the number of rows in your table by issuing an `explain select * from your_table`. This will use statistics on your table and return with a rough estimate. How rough? It might go as low as half or as high as double the actual number of rows in your table. This is the same method as used in [`pt-online-schema-change`](https://www.percona.com/doc/percona-toolkit/2.2/pt-online-schema-change.html). `gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen: @@ -135,6 +141,10 @@ Add this flag when executing on a 1st generation Google Cloud Platform (GCP). Default 100. See [`subsecond-lag`](subsecond-lag.md) for details. +### hooks-status-interval + +Defaults to 60 seconds. Configures how often the `gh-ost-on-status` hook is called, see [`hooks`](hooks.md) for full details on how to use hooks. + ### initially-drop-ghost-table `gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand. @@ -230,7 +240,7 @@ Provide a command delimited list of replicas; `gh-ost` will throttle when any of ### throttle-http -Provide a HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check. +Provide an HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check. ### timestamp-old-table diff --git a/doc/hooks.md b/doc/hooks.md index 91e1311..c1fe594 100644 --- a/doc/hooks.md +++ b/doc/hooks.md @@ -68,6 +68,7 @@ The following variables are available on all hooks: - `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server - `GH_OST_HEARTBEAT_LAG` - lag in seconds (floating point) of heartbeat - `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration +- `GH_OST_ETA_SECONDS` - estimated duration until migration finishes in seconds - `GH_OST_MIGRATED_HOST` - `GH_OST_INSPECTED_HOST` - `GH_OST_EXECUTING_HOST` diff --git a/doc/requirements-and-limitations.md b/doc/requirements-and-limitations.md index e09ae4f..0521028 100644 --- a/doc/requirements-and-limitations.md +++ b/doc/requirements-and-limitations.md @@ -2,6 +2,8 @@ ### Requirements +- `gh-ost` currently requires MySQL versions 5.7 and greater. + - You will need to have one server serving Row Based Replication (RBR) format binary logs. Right now `FULL` row image is supported. `MINIMAL` to be supported in the near future. `gh-ost` prefers to work with replicas. You may [still have your master configured with Statement Based Replication](migrating-with-sbr.md) (SBR). - If you are using a replica, the table must have an identical schema between the master and replica. diff --git a/doc/throttle.md b/doc/throttle.md index 2ebc2ee..8f06b2a 100644 --- a/doc/throttle.md +++ b/doc/throttle.md @@ -38,7 +38,7 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c `--max-load='Threads_running=100,Threads_connected=500'` - Metrics must be valid, numeric [status variables](http://dev.mysql.com/doc/refman/5.6/en/server-status-variables.html) + Metrics must be valid, numeric [status variables](https://dev.mysql.com/doc/refman/5.7/en/server-status-variables.html) #### Throttle query @@ -97,7 +97,7 @@ Copy: 0/2915 0.0%; Applied: 0; Backlog: 0/100; Elapsed: 42s(copy), 42s(total); s Throttling time is limited by the availability of the binary logs. When throttling begins, `gh-ost` suspends reading the binary logs, and expects to resume reading from same binary log where it paused. -Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days. +Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days. Having said that, throttling for so long is far fetching, in that the `gh-ost` process itself must be kept alive during that time; and the amount of binary logs to process once it resumes will potentially take days to replay. diff --git a/doc/why-triggerless.md b/doc/why-triggerless.md index 2ea8c81..1e7d97a 100644 --- a/doc/why-triggerless.md +++ b/doc/why-triggerless.md @@ -7,7 +7,7 @@ Existing MySQL schema migration tools: - [LHM](https://github.com/soundcloud/lhm) - [oak-online-alter-table](https://github.com/shlomi-noach/openarkkit) -are all using [triggers](http://dev.mysql.com/doc/refman/5.6/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table). +are all using [triggers](https://dev.mysql.com/doc/refman/5.7/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table). Use of triggers simplifies a lot of the flow in doing a live table migration, but also poses some limitations or difficulties. Here are reasons why we choose to [design a triggerless solution](triggerless-design.md) to schema migrations. diff --git a/go/base/context.go b/go/base/context.go index f8054d6..93f84ce 100644 --- a/go/base/context.go +++ b/go/base/context.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -15,7 +15,7 @@ import ( "sync/atomic" "time" - "github.com/satori/go.uuid" + uuid "github.com/satori/go.uuid" "github.com/github/gh-ost/go/mysql" "github.com/github/gh-ost/go/sql" @@ -140,6 +140,7 @@ type MigrationContext struct { HooksHintMessage string HooksHintOwner string HooksHintToken string + HooksStatusIntervalSec int64 DropServeSocket bool ServeSocketFile string @@ -551,8 +552,8 @@ func (this *MigrationContext) SetMaxLagMillisecondsThrottleThreshold(maxLagMilli } func (this *MigrationContext) SetChunkSize(chunkSize int64) { - if chunkSize < 100 { - chunkSize = 100 + if chunkSize < 10 { + chunkSize = 10 } if chunkSize > 100000 { chunkSize = 100000 diff --git a/go/base/context_test.go b/go/base/context_test.go index 07e00ce..de208ba 100644 --- a/go/base/context_test.go +++ b/go/base/context_test.go @@ -1,12 +1,11 @@ /* - Copyright 2021 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ package base import ( - "fmt" "io/ioutil" "os" "testing" @@ -89,7 +88,7 @@ func TestReadConfigFile(t *testing.T) { } defer os.Remove(f.Name()) - f.Write([]byte(fmt.Sprintf("[client]\nuser=test\npassword=123456"))) + f.Write([]byte("[client]\nuser=test\npassword=123456")) context := NewMigrationContext() context.ConfigFile = f.Name() if err := context.ReadConfigFile(); err != nil { @@ -109,7 +108,7 @@ func TestReadConfigFile(t *testing.T) { } defer os.Remove(f.Name()) - f.Write([]byte(fmt.Sprintf("[osc]\nmax_load=10"))) + f.Write([]byte("[osc]\nmax_load=10")) context := NewMigrationContext() context.ConfigFile = f.Name() if err := context.ReadConfigFile(); err != nil { diff --git a/go/base/default_logger.go b/go/base/default_logger.go index 0bea419..59563ff 100644 --- a/go/base/default_logger.go +++ b/go/base/default_logger.go @@ -1,3 +1,8 @@ +/* + Copyright 2022 GitHub Inc. + See https://github.com/github/gh-ost/blob/master/LICENSE +*/ + package base import ( @@ -12,22 +17,18 @@ func NewDefaultLogger() *simpleLogger { func (*simpleLogger) Debug(args ...interface{}) { log.Debug(args[0].(string), args[1:]) - return } func (*simpleLogger) Debugf(format string, args ...interface{}) { log.Debugf(format, args...) - return } func (*simpleLogger) Info(args ...interface{}) { log.Info(args[0].(string), args[1:]) - return } func (*simpleLogger) Infof(format string, args ...interface{}) { log.Infof(format, args...) - return } func (*simpleLogger) Warning(args ...interface{}) error { @@ -64,10 +65,8 @@ func (*simpleLogger) Fatale(err error) error { func (*simpleLogger) SetLevel(level log.LogLevel) { log.SetLevel(level) - return } func (*simpleLogger) SetPrintStackTrace(printStackTraceFlag bool) { log.SetPrintStackTrace(printStackTraceFlag) - return } diff --git a/go/base/utils.go b/go/base/utils.go index ed14514..c0e3293 100644 --- a/go/base/utils.go +++ b/go/base/utils.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -25,9 +25,7 @@ func PrettifyDurationOutput(d time.Duration) string { if d < time.Second { return "0s" } - result := fmt.Sprintf("%s", d) - result = prettifyDurationRegexp.ReplaceAllString(result, "") - return result + return prettifyDurationRegexp.ReplaceAllString(d.String(), "") } func FileExists(fileName string) bool { diff --git a/go/binlog/gomysql_reader.go b/go/binlog/gomysql_reader.go index 454b27a..1e936d2 100644 --- a/go/binlog/gomysql_reader.go +++ b/go/binlog/gomysql_reader.go @@ -1,5 +1,5 @@ /* - Copyright 2021 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -64,7 +64,10 @@ func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordin this.currentCoordinates = coordinates this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates) // Start sync with specified binlog file and position - this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)}) + this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{ + Name: this.currentCoordinates.LogFile, + Pos: uint32(this.currentCoordinates.LogPos), + }) return err } diff --git a/go/cmd/gh-ost/main.go b/go/cmd/gh-ost/main.go index 7b5af54..93d8fb9 100644 --- a/go/cmd/gh-ost/main.go +++ b/go/cmd/gh-ost/main.go @@ -8,6 +8,7 @@ package main import ( "flag" "fmt" + "net/url" "os" "os/signal" "syscall" @@ -98,7 +99,7 @@ func main() { flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges") flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').") exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.") - chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)") + chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 10-100,000)") dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)") defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking") cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)") @@ -124,13 +125,14 @@ func main() { flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience") flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience") flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience") + flag.Int64Var(&migrationContext.HooksStatusIntervalSec, "hooks-status-interval", 60, "how many seconds to wait between calling onStatus hook") flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999") maxLoad := flag.String("max-load", "", "Comma delimited status-name=threshold. e.g: 'Threads_running=100,Threads_connected=500'. When status exceeds threshold, app throttles writes") criticalLoad := flag.String("critical-load", "", "Comma delimited status-name=threshold, same format as --max-load. When status exceeds threshold, app panics and quits") flag.Int64Var(&migrationContext.CriticalLoadIntervalMilliseconds, "critical-load-interval-millis", 0, "When 0, migration immediately bails out upon meeting critical-load. When non-zero, a second check is done after given interval, and migration only bails out if 2nd check still meets critical load") - flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When nonzero, critical-load does not panic and bail out; instead, gh-ost goes into hibernate for the specified duration. It will not read/write anything to from/to any server") + flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When non-zero, critical-load does not panic and bail out; instead, gh-ost goes into hibernation for the specified duration. It will not read/write anything from/to any server") quiet := flag.Bool("quiet", false, "quiet") verbose := flag.Bool("verbose", false, "verbose") debug := flag.Bool("debug", false, "debug mode (very verbose)") @@ -188,6 +190,11 @@ func main() { log.Fatalf("--database must be provided and database name must not be empty, or --alter must specify database name") } } + + if err := flag.Set("database", url.QueryEscape(migrationContext.DatabaseName)); err != nil { + migrationContext.Log.Fatale(err) + } + if migrationContext.OriginalTableName == "" { if parser.HasExplicitTable() { migrationContext.OriginalTableName = parser.GetExplicitTable() diff --git a/go/logic/hooks.go b/go/logic/hooks.go index 00672ba..0ff296d 100644 --- a/go/logic/hooks.go +++ b/go/logic/hooks.go @@ -1,6 +1,5 @@ /* -/* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -72,9 +71,7 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [ env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken)) env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop)) - for _, variable := range extraVariables { - env = append(env, variable) - } + env = append(env, extraVariables...) return env } diff --git a/go/logic/inspect.go b/go/logic/inspect.go index 0e0c2a3..e66d673 100644 --- a/go/logic/inspect.go +++ b/go/logic/inspect.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -283,7 +283,7 @@ func (this *Inspector) validateGrants() error { // It is entirely possible, for example, that the replication is using 'STATEMENT' // binlog format even as the variable says 'ROW' func (this *Inspector) restartReplication() error { - this.migrationContext.Log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Infof("Restarting replication on %s to make sure binlog settings apply to replication thread", this.connectionConfig.Key.String()) masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.connectionConfig) if masterKey == nil { @@ -342,13 +342,13 @@ func (this *Inspector) validateBinlogs() error { return err } if !hasBinaryLogs { - return fmt.Errorf("%s:%d must have binary logs enabled", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + return fmt.Errorf("%s must have binary logs enabled", this.connectionConfig.Key.String()) } if this.migrationContext.RequiresBinlogFormatChange() { if !this.migrationContext.SwitchToRowBinlogFormat { - return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s:%d doesn't have replicas", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", this.connectionConfig.Key.String()) } - query := fmt.Sprintf(`show /* gh-ost */ slave hosts`) + query := `show /* gh-ost */ slave hosts` countReplicas := 0 err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error { countReplicas++ @@ -358,21 +358,20 @@ func (this *Inspector) validateBinlogs() error { return err } if countReplicas > 0 { - return fmt.Errorf("%s:%d has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat) + return fmt.Errorf("%s has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat) } - this.migrationContext.Log.Infof("%s:%d has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat) + this.migrationContext.Log.Infof("%s has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat) } query = `select @@global.binlog_row_image` if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil { - // Only as of 5.6. We wish to support 5.5 as well - this.migrationContext.OriginalBinlogRowImage = "FULL" + return err } this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage) if this.migrationContext.OriginalBinlogRowImage != "FULL" { - return fmt.Errorf("%s:%d has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogRowImage) + return fmt.Errorf("%s has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogRowImage) } - this.migrationContext.Log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Infof("binary logs validated on %s", this.connectionConfig.Key.String()) return nil } @@ -385,25 +384,25 @@ func (this *Inspector) validateLogSlaveUpdates() error { } if logSlaveUpdates { - this.migrationContext.Log.Infof("log_slave_updates validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Infof("log_slave_updates validated on %s", this.connectionConfig.Key.String()) return nil } if this.migrationContext.IsTungsten { - this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.String()) return nil } if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica { - return fmt.Errorf("%s:%d must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + return fmt.Errorf("%s must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.String()) } if this.migrationContext.InspectorIsAlsoApplier() { - this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.String()) return nil } - return fmt.Errorf("%s:%d must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + return fmt.Errorf("%s must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.String()) } // validateTable makes sure the table we need to operate on actually exists @@ -805,5 +804,4 @@ func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err er func (this *Inspector) Teardown() { this.db.Close() this.informationSchemaDb.Close() - return } diff --git a/go/logic/migrator.go b/go/logic/migrator.go index 75f934f..308eb31 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -177,16 +177,6 @@ func (this *Migrator) retryOperationWithExponentialBackoff(operation func() erro return err } -// executeAndThrottleOnError executes a given function. If it errors, it -// throttles. -func (this *Migrator) executeAndThrottleOnError(operation func() error) (err error) { - if err := operation(); err != nil { - this.throttler.throttle(nil) - return err - } - return nil -} - // consumeRowCopyComplete blocks on the rowCopyComplete channel once, and then // consumes and drops any further incoming events that may be left hanging. func (this *Migrator) consumeRowCopyComplete() { @@ -826,57 +816,57 @@ func (this *Migrator) initiateStatus() error { // migration, and as response to the "status" interactive command. func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) { w := io.MultiWriter(writers...) - fmt.Fprintln(w, fmt.Sprintf("# Migrating %s.%s; Ghost table is %s.%s", + fmt.Fprintf(w, "# Migrating %s.%s; Ghost table is %s.%s\n", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName), sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.GetGhostTableName()), - )) - fmt.Fprintln(w, fmt.Sprintf("# Migrating %+v; inspecting %+v; executing on %+v", + ) + fmt.Fprintf(w, "# Migrating %+v; inspecting %+v; executing on %+v\n", *this.applier.connectionConfig.ImpliedKey, *this.inspector.connectionConfig.ImpliedKey, this.migrationContext.Hostname, - )) - fmt.Fprintln(w, fmt.Sprintf("# Migration started at %+v", + ) + fmt.Fprintf(w, "# Migration started at %+v\n", this.migrationContext.StartTime.Format(time.RubyDate), - )) + ) maxLoad := this.migrationContext.GetMaxLoad() criticalLoad := this.migrationContext.GetCriticalLoad() - fmt.Fprintln(w, fmt.Sprintf("# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f", + fmt.Fprintf(w, "# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f\n", atomic.LoadInt64(&this.migrationContext.ChunkSize), atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold), atomic.LoadInt64(&this.migrationContext.DMLBatchSize), maxLoad.String(), criticalLoad.String(), this.migrationContext.GetNiceRatio(), - )) + ) if this.migrationContext.ThrottleFlagFile != "" { setIndicator := "" if base.FileExists(this.migrationContext.ThrottleFlagFile) { setIndicator = "[set]" } - fmt.Fprintln(w, fmt.Sprintf("# throttle-flag-file: %+v %+v", + fmt.Fprintf(w, "# throttle-flag-file: %+v %+v\n", this.migrationContext.ThrottleFlagFile, setIndicator, - )) + ) } if this.migrationContext.ThrottleAdditionalFlagFile != "" { setIndicator := "" if base.FileExists(this.migrationContext.ThrottleAdditionalFlagFile) { setIndicator = "[set]" } - fmt.Fprintln(w, fmt.Sprintf("# throttle-additional-flag-file: %+v %+v", + fmt.Fprintf(w, "# throttle-additional-flag-file: %+v %+v\n", this.migrationContext.ThrottleAdditionalFlagFile, setIndicator, - )) + ) } if throttleQuery := this.migrationContext.GetThrottleQuery(); throttleQuery != "" { - fmt.Fprintln(w, fmt.Sprintf("# throttle-query: %+v", + fmt.Fprintf(w, "# throttle-query: %+v\n", throttleQuery, - )) + ) } if throttleControlReplicaKeys := this.migrationContext.GetThrottleControlReplicaKeys(); throttleControlReplicaKeys.Len() > 0 { - fmt.Fprintln(w, fmt.Sprintf("# throttle-control-replicas count: %+v", + fmt.Fprintf(w, "# throttle-control-replicas count: %+v\n", throttleControlReplicaKeys.Len(), - )) + ) } if this.migrationContext.PostponeCutOverFlagFile != "" { @@ -884,20 +874,20 @@ func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) { if base.FileExists(this.migrationContext.PostponeCutOverFlagFile) { setIndicator = "[set]" } - fmt.Fprintln(w, fmt.Sprintf("# postpone-cut-over-flag-file: %+v %+v", + fmt.Fprintf(w, "# postpone-cut-over-flag-file: %+v %+v\n", this.migrationContext.PostponeCutOverFlagFile, setIndicator, - )) + ) } if this.migrationContext.PanicFlagFile != "" { - fmt.Fprintln(w, fmt.Sprintf("# panic-flag-file: %+v", + fmt.Fprintf(w, "# panic-flag-file: %+v\n", this.migrationContext.PanicFlagFile, - )) + ) } - fmt.Fprintln(w, fmt.Sprintf("# Serving on unix socket: %+v", + fmt.Fprintf(w, "# Serving on unix socket: %+v\n", this.migrationContext.ServeSocketFile, - )) + ) if this.migrationContext.ServeTCPPort != 0 { - fmt.Fprintln(w, fmt.Sprintf("# Serving on TCP port: %+v", this.migrationContext.ServeTCPPort)) + fmt.Fprintf(w, "# Serving on TCP port: %+v\n", this.migrationContext.ServeTCPPort) } } @@ -1019,7 +1009,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) { w := io.MultiWriter(writers...) fmt.Fprintln(w, status) - if elapsedSeconds%60 == 0 { + if elapsedSeconds%this.migrationContext.HooksStatusIntervalSec == 0 { this.hooksExecutor.onStatus(status) } } @@ -1198,7 +1188,6 @@ func (this *Migrator) iterateChunks() error { // Enqueue copy operation; to be executed by executeWriteFuncs() this.copyRowsQueue <- copyRowsFunc } - return nil } func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error { @@ -1304,7 +1293,6 @@ func (this *Migrator) executeWriteFuncs() error { } } } - return nil } // finalCleanup takes actions at very end of migration, dropping tables etc. diff --git a/go/logic/streamer.go b/go/logic/streamer.go index 22158b9..604f289 100644 --- a/go/logic/streamer.go +++ b/go/logic/streamer.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -220,5 +220,4 @@ func (this *EventsStreamer) Close() (err error) { func (this *EventsStreamer) Teardown() { this.db.Close() - return } diff --git a/go/logic/throttler.go b/go/logic/throttler.go index abe8669..9c120b3 100644 --- a/go/logic/throttler.go +++ b/go/logic/throttler.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ diff --git a/go/mysql/instance_key.go b/go/mysql/instance_key.go index eb108d8..679bdc9 100644 --- a/go/mysql/instance_key.go +++ b/go/mysql/instance_key.go @@ -1,5 +1,6 @@ /* Copyright 2015 Shlomi Noach, courtesy Booking.com + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -12,15 +13,16 @@ import ( "strings" ) -const ( - DefaultInstancePort = 3306 -) +const DefaultInstancePort = 3306 var ( ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$") ipv4HostRegexp = regexp.MustCompile("^([^:]+)$") - ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308 - ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") // e.g. 2001:db8:1f70::999:de8:7648:6e8 + + // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308 + ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") //nolint:gosimple + // e.g. 2001:db8:1f70::999:de8:7648:6e8 + ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") ) // InstanceKey is an instance indicator, identified by hostname and port diff --git a/go/sql/builder.go b/go/sql/builder.go index 7fe366c..15199ff 100644 --- a/go/sql/builder.go +++ b/go/sql/builder.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -33,7 +33,7 @@ func EscapeName(name string) string { } func buildColumnsPreparedValues(columns *ColumnList) []string { - values := make([]string, columns.Len(), columns.Len()) + values := make([]string, columns.Len()) for i, column := range columns.Columns() { var token string if column.timezoneConversion != nil { @@ -51,7 +51,7 @@ func buildColumnsPreparedValues(columns *ColumnList) []string { } func buildPreparedValues(length int) []string { - values := make([]string, length, length) + values := make([]string, length) for i := 0; i < length; i++ { values[i] = "?" } @@ -59,7 +59,7 @@ func buildPreparedValues(length int) []string { } func duplicateNames(names []string) []string { - duplicate := make([]string, len(names), len(names)) + duplicate := make([]string, len(names)) copy(duplicate, names) return duplicate } @@ -261,8 +261,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string explodedArgs = append(explodedArgs, rangeExplodedArgs...) uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names()) - uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) - uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) + uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames)) + uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames)) for i, column := range uniqueKeyColumns.Columns() { uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i]) if column.Type == EnumColumnType { @@ -316,8 +316,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, tableName str explodedArgs = append(explodedArgs, rangeExplodedArgs...) uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names()) - uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) - uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) + uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames)) + uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames)) for i, column := range uniqueKeyColumns.Columns() { uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i]) if column.Type == EnumColumnType { @@ -368,7 +368,7 @@ func buildUniqueKeyMinMaxValuesPreparedQuery(databaseName, tableName string, uni tableName = EscapeName(tableName) uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names()) - uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) + uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames)) for i, column := range uniqueKeyColumns.Columns() { uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i]) if column.Type == EnumColumnType { diff --git a/localtests/datetime-submillis-zeroleading/ignore_versions b/localtests/datetime-submillis-zeroleading/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/datetime-submillis-zeroleading/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5) diff --git a/localtests/datetime-submillis/ignore_versions b/localtests/datetime-submillis/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/datetime-submillis/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5) diff --git a/localtests/datetime-to-timestamp-pk-fail/ignore_versions b/localtests/datetime-to-timestamp-pk-fail/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/datetime-to-timestamp-pk-fail/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5) diff --git a/localtests/datetime/ignore_versions b/localtests/datetime/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/datetime/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5) diff --git a/localtests/generated-columns-add57/create.sql b/localtests/generated-columns-add/create.sql similarity index 100% rename from localtests/generated-columns-add57/create.sql rename to localtests/generated-columns-add/create.sql diff --git a/localtests/generated-columns-add57/extra_args b/localtests/generated-columns-add/extra_args similarity index 100% rename from localtests/generated-columns-add57/extra_args rename to localtests/generated-columns-add/extra_args diff --git a/localtests/generated-columns-add57/ghost_columns b/localtests/generated-columns-add/ghost_columns similarity index 100% rename from localtests/generated-columns-add57/ghost_columns rename to localtests/generated-columns-add/ghost_columns diff --git a/localtests/generated-columns-add57/order_by b/localtests/generated-columns-add/order_by similarity index 100% rename from localtests/generated-columns-add57/order_by rename to localtests/generated-columns-add/order_by diff --git a/localtests/generated-columns-add57/orig_columns b/localtests/generated-columns-add/orig_columns similarity index 100% rename from localtests/generated-columns-add57/orig_columns rename to localtests/generated-columns-add/orig_columns diff --git a/localtests/generated-columns-add57/ignore_versions b/localtests/generated-columns-add57/ignore_versions deleted file mode 100644 index b6de5f8..0000000 --- a/localtests/generated-columns-add57/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5|5.6) diff --git a/localtests/generated-columns-rename57/create.sql b/localtests/generated-columns-rename/create.sql similarity index 100% rename from localtests/generated-columns-rename57/create.sql rename to localtests/generated-columns-rename/create.sql diff --git a/localtests/generated-columns-rename57/extra_args b/localtests/generated-columns-rename/extra_args similarity index 100% rename from localtests/generated-columns-rename57/extra_args rename to localtests/generated-columns-rename/extra_args diff --git a/localtests/generated-columns-rename57/ignore_versions b/localtests/generated-columns-rename57/ignore_versions deleted file mode 100644 index b6de5f8..0000000 --- a/localtests/generated-columns-rename57/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5|5.6) diff --git a/localtests/generated-columns57-unique/create.sql b/localtests/generated-columns-unique/create.sql similarity index 100% rename from localtests/generated-columns57-unique/create.sql rename to localtests/generated-columns-unique/create.sql diff --git a/localtests/generated-columns57/create.sql b/localtests/generated-columns/create.sql similarity index 100% rename from localtests/generated-columns57/create.sql rename to localtests/generated-columns/create.sql diff --git a/localtests/generated-columns57-unique/ignore_versions b/localtests/generated-columns57-unique/ignore_versions deleted file mode 100644 index b6de5f8..0000000 --- a/localtests/generated-columns57-unique/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5|5.6) diff --git a/localtests/generated-columns57/ignore_versions b/localtests/generated-columns57/ignore_versions deleted file mode 100644 index b6de5f8..0000000 --- a/localtests/generated-columns57/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5|5.6) diff --git a/localtests/geometry57/create.sql b/localtests/geometry/create.sql similarity index 100% rename from localtests/geometry57/create.sql rename to localtests/geometry/create.sql diff --git a/localtests/geometry57/ignore_versions b/localtests/geometry57/ignore_versions deleted file mode 100644 index b6de5f8..0000000 --- a/localtests/geometry57/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5|5.6) diff --git a/localtests/json57dml/create.sql b/localtests/json-dml/create.sql similarity index 100% rename from localtests/json57dml/create.sql rename to localtests/json-dml/create.sql diff --git a/localtests/json57/create.sql b/localtests/json/create.sql similarity index 100% rename from localtests/json57/create.sql rename to localtests/json/create.sql diff --git a/localtests/json57/ignore_versions b/localtests/json57/ignore_versions deleted file mode 100644 index b6de5f8..0000000 --- a/localtests/json57/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5|5.6) diff --git a/localtests/json57dml/ignore_versions b/localtests/json57dml/ignore_versions deleted file mode 100644 index b6de5f8..0000000 --- a/localtests/json57dml/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5|5.6) diff --git a/localtests/spatial57/create.sql b/localtests/spatial/create.sql similarity index 100% rename from localtests/spatial57/create.sql rename to localtests/spatial/create.sql diff --git a/localtests/spatial57/ignore_versions b/localtests/spatial57/ignore_versions deleted file mode 100644 index b6de5f8..0000000 --- a/localtests/spatial57/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5|5.6) diff --git a/localtests/swap-pk-uk/ignore_versions b/localtests/swap-pk-uk/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/swap-pk-uk/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5) diff --git a/localtests/swap-uk-uk/ignore_versions b/localtests/swap-uk-uk/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/swap-uk-uk/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5) diff --git a/localtests/timestamp-to-datetime/ignore_versions b/localtests/timestamp-to-datetime/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/timestamp-to-datetime/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5) diff --git a/localtests/timestamp/ignore_versions b/localtests/timestamp/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/timestamp/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5) diff --git a/localtests/tz-datetime-ts/ignore_versions b/localtests/tz-datetime-ts/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/tz-datetime-ts/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5) diff --git a/localtests/tz/ignore_versions b/localtests/tz/ignore_versions deleted file mode 100644 index 7acd3f0..0000000 --- a/localtests/tz/ignore_versions +++ /dev/null @@ -1 +0,0 @@ -(5.5)