diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8d5a07b..0b83bd7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,7 +5,7 @@ on: [pull_request] jobs: build: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/replica-tests.yml b/.github/workflows/replica-tests.yml index e28c2bc..f2a52ec 100644 --- a/.github/workflows/replica-tests.yml +++ b/.github/workflows/replica-tests.yml @@ -5,10 +5,10 @@ on: [pull_request] jobs: build: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: matrix: - version: [mysql-5.7.25,mysql-8.0.16] + version: [mysql-5.7.25,mysql-8.0.16,PerconaServer-8.0.21] steps: - uses: actions/checkout@v2 diff --git a/.golangci.yml b/.golangci.yml index 4621e5c..e4ee4ab 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,14 +4,19 @@ linters: disable: - errcheck enable: + - bodyclose + - containedctx - contextcheck + - dogsled - durationcheck - errname + - errorlint - execinquery - gofmt - ifshort - misspell - nilerr + - nilnil - noctx - nolintlint - nosprintfhostport @@ -19,6 +24,7 @@ linters: - rowserrcheck - sqlclosecheck - unconvert + - unparam - unused - wastedassign - whitespace diff --git a/doc/command-line-flags.md b/doc/command-line-flags.md index dc481d0..021462f 100644 --- a/doc/command-line-flags.md +++ b/doc/command-line-flags.md @@ -45,6 +45,22 @@ If you happen to _know_ your servers use RBR (Row Based Replication, i.e. `binlo Skipping this step means `gh-ost` would not need the `SUPER` privilege in order to operate. You may want to use this on Amazon RDS. +### attempt-instant-ddl + +MySQL 8.0 supports "instant DDL" for some operations. If an alter statement can be completed with instant DDL, only a metadata change is required internally. Instant operations include: + +- Adding a column +- Dropping a column +- Dropping an index +- Extending a varchar column +- Adding a virtual generated column + +It is not reliable to parse the `ALTER` statement to determine if it is instant or not. This is because the table might be in an older row format, or have some other incompatibility that is difficult to identify. + +`--attempt-instant-ddl` is disabled by default, but the risks of enabling it are relatively minor: `gh-ost` may need to acquire a metadata lock at the start of the operation. This is not a problem for most scenarios, but it could be a problem for users that start the DDL during a period with long running transactions. + +`gh-ost` will automatically fallback to the normal DDL process if the attempt to use instant DDL is unsuccessful. + ### conf `--conf=/path/to/my.cnf`: file where credentials are specified. Should be in (or contain) the following format: @@ -230,6 +246,18 @@ Allows `gh-ost` to connect to the MySQL servers using encrypted connections, but `--ssl-key=/path/to/ssl-key.key`: SSL private key file (in PEM format). +### storage-engine +Default is `innodb`, and `rocksdb` support is currently experimental. InnoDB and RocksDB are both transactional engines, supporting both shared and exclusive row locks. + +But RocksDB currently lacks a few features support compared to InnoDB: +- Gap Locks +- Foreign Key +- Generated Columns +- Spatial +- Geometry + +When `--storage-engine=rocksdb`, `gh-ost` will make some changes necessary (e.g. sets isolation level to `READ_COMMITTED`) to support RocksDB. + ### test-on-replica Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [`testing-on-replica`](testing-on-replica.md) diff --git a/doc/shared-key.md b/doc/shared-key.md index c7f24cc..3dfa39b 100644 --- a/doc/shared-key.md +++ b/doc/shared-key.md @@ -29,7 +29,7 @@ CREATE TABLE tbl ( (This is also the definition of the _ghost_ table, except that that table would be called `_tbl_gho`). -In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tble` onto `_tbl_gho`. +In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tbl` onto `_tbl_gho`. The applying of the binlog events is what requires the shared unique key. For example, an `UPDATE` statement to `tbl` translates to a `REPLACE` statement which `gh-ost` applies to `_tbl_gho`. A `REPLACE` statement expects to insert or replace an existing row based on its row's values and the table's unique key constraints. In particular, if inserting that row would result in a unique key violation (e.g., a row with that primary key already exists), it would _replace_ that existing row with the new values. diff --git a/go/base/context.go b/go/base/context.go index f3fe712..e3472f5 100644 --- a/go/base/context.go +++ b/go/base/context.go @@ -101,6 +101,7 @@ type MigrationContext struct { AliyunRDS bool GoogleCloudPlatform bool AzureMySQL bool + AttemptInstantDDL bool config ContextConfig configMutex *sync.Mutex @@ -289,6 +290,19 @@ func NewMigrationContext() *MigrationContext { } } +func (this *MigrationContext) SetConnectionConfig(storageEngine string) error { + var transactionIsolation string + switch storageEngine { + case "rocksdb": + transactionIsolation = "READ-COMMITTED" + default: + transactionIsolation = "REPEATABLE-READ" + } + this.InspectorConnectionConfig.TransactionIsolation = transactionIsolation + this.ApplierConnectionConfig.TransactionIsolation = transactionIsolation + return nil +} + func getSafeTableName(baseName string, suffix string) string { name := fmt.Sprintf("_%s_%s", baseName, suffix) if len(name) <= mysql.MaxTableNameLength { @@ -427,6 +441,10 @@ func (this *MigrationContext) IsTransactionalTable() bool { { return true } + case "rocksdb": + { + return true + } } return false } @@ -858,7 +876,7 @@ func (this *MigrationContext) ReadConfigFile() error { if cfg.Section("osc").HasKey("chunk_size") { this.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64() if err != nil { - return fmt.Errorf("Unable to read osc chunk size: %s", err.Error()) + return fmt.Errorf("Unable to read osc chunk size: %w", err) } } @@ -873,7 +891,7 @@ func (this *MigrationContext) ReadConfigFile() error { if cfg.Section("osc").HasKey("max_lag_millis") { this.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64() if err != nil { - return fmt.Errorf("Unable to read max lag millis: %s", err.Error()) + return fmt.Errorf("Unable to read max lag millis: %w", err) } } diff --git a/go/cmd/gh-ost/main.go b/go/cmd/gh-ost/main.go index b99e70b..3daf244 100644 --- a/go/cmd/gh-ost/main.go +++ b/go/cmd/gh-ost/main.go @@ -67,6 +67,9 @@ func main() { flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)") flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)") flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)") + flag.BoolVar(&migrationContext.AttemptInstantDDL, "attempt-instant-ddl", false, "Attempt to use instant DDL for this migration first") + storageEngine := flag.String("storage-engine", "innodb", "Specify table storage engine (default: 'innodb'). When 'rocksdb': the session transaction isolation level is changed from REPEATABLE_READ to READ_COMMITTED.") + flag.BoolVar(&migrationContext.CountTableRows, "exact-rowcount", false, "actually count table rows as opposed to estimate them (results in more accurate progress estimation)") flag.BoolVar(&migrationContext.ConcurrentCountTableRows, "concurrent-rowcount", true, "(with --exact-rowcount), when true (default): count rows after row-copy begins, concurrently, and adjust row estimate later on; when false: first count rows, then start row copy") flag.BoolVar(&migrationContext.AllowedRunningOnMaster, "allow-on-master", false, "allow this migration to run directly on master. Preferably it would run on a replica") @@ -180,6 +183,10 @@ func main() { migrationContext.Log.SetLevel(log.ERROR) } + if err := migrationContext.SetConnectionConfig(*storageEngine); err != nil { + migrationContext.Log.Fatale(err) + } + if migrationContext.AlterStatement == "" { log.Fatal("--alter must be provided and statement must not be empty") } @@ -245,6 +252,9 @@ func main() { if *replicationLagQuery != "" { migrationContext.Log.Warning("--replication-lag-query is deprecated") } + if *storageEngine == "rocksdb" { + migrationContext.Log.Warning("RocksDB storage engine support is experimental") + } switch *cutOver { case "atomic", "default", "": diff --git a/go/logic/applier.go b/go/logic/applier.go index 89e1995..ad6368e 100644 --- a/go/logic/applier.go +++ b/go/logic/applier.go @@ -135,6 +135,16 @@ func (this *Applier) generateSqlModeQuery() string { return fmt.Sprintf("sql_mode = %s", sqlModeQuery) } +// generateInstantDDLQuery returns the SQL for this ALTER operation +// with an INSTANT assertion (requires MySQL 8.0+) +func (this *Applier) generateInstantDDLQuery() string { + return fmt.Sprintf(`ALTER /* gh-ost */ TABLE %s.%s %s, ALGORITHM=INSTANT`, + sql.EscapeName(this.migrationContext.DatabaseName), + sql.EscapeName(this.migrationContext.OriginalTableName), + this.migrationContext.AlterStatementOptions, + ) +} + // readTableColumns reads table columns on applier func (this *Applier) readTableColumns() (err error) { this.migrationContext.Log.Infof("Examining table structure on applier") @@ -188,6 +198,27 @@ func (this *Applier) ValidateOrDropExistingTables() error { return nil } +// AttemptInstantDDL attempts to use instant DDL (from MySQL 8.0, and earlier in Aurora and some others). +// If successful, the operation is only a meta-data change so a lot of time is saved! +// The risk of attempting to instant DDL when not supported is that a metadata lock may be acquired. +// This is minor, since gh-ost will eventually require a metadata lock anyway, but at the cut-over stage. +// Instant operations include: +// - Adding a column +// - Dropping a column +// - Dropping an index +// - Extending a VARCHAR column +// - Adding a virtual generated column +// It is not reliable to parse the `alter` statement to determine if it is instant or not. +// This is because the table might be in an older row format, or have some other incompatibility +// that is difficult to identify. +func (this *Applier) AttemptInstantDDL() error { + query := this.generateInstantDDLQuery() + this.migrationContext.Log.Infof("INSTANT DDL query is: %s", query) + // We don't need a trx, because for instant DDL the SQL mode doesn't matter. + _, err := this.db.Exec(query) + return err +} + // CreateGhostTable creates the ghost table on the applier host func (this *Applier) CreateGhostTable() error { query := fmt.Sprintf(`create /* gh-ost */ table %s.%s like %s.%s`, @@ -1134,7 +1165,7 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) } result, err := tx.Exec(buildResult.query, buildResult.args...) if err != nil { - err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args) + err = fmt.Errorf("%w; query=%s; args=%+v", err, buildResult.query, buildResult.args) return rollback(err) } diff --git a/go/logic/applier_test.go b/go/logic/applier_test.go index a2c1414..a356351 100644 --- a/go/logic/applier_test.go +++ b/go/logic/applier_test.go @@ -170,3 +170,16 @@ func TestApplierBuildDMLEventQuery(t *testing.T) { test.S(t).ExpectEquals(res[0].args[3], 42) }) } + +func TestApplierInstantDDL(t *testing.T) { + migrationContext := base.NewMigrationContext() + migrationContext.DatabaseName = "test" + migrationContext.OriginalTableName = "mytable" + migrationContext.AlterStatementOptions = "ADD INDEX (foo)" + applier := NewApplier(migrationContext) + + t.Run("instantDDLstmt", func(t *testing.T) { + stmt := applier.generateInstantDDLQuery() + test.S(t).ExpectEquals(stmt, "ALTER /* gh-ost */ TABLE `test`.`mytable` ADD INDEX (foo), ALGORITHM=INSTANT") + }) +} diff --git a/go/logic/hooks.go b/go/logic/hooks.go index 0ff296d..2543f8e 100644 --- a/go/logic/hooks.go +++ b/go/logic/hooks.go @@ -7,6 +7,7 @@ package logic import ( "fmt" + "io" "os" "os/exec" "path/filepath" @@ -34,18 +35,16 @@ const ( type HooksExecutor struct { migrationContext *base.MigrationContext + writer io.Writer } func NewHooksExecutor(migrationContext *base.MigrationContext) *HooksExecutor { return &HooksExecutor{ migrationContext: migrationContext, + writer: os.Stderr, } } -func (this *HooksExecutor) initHooks() error { - return nil -} - func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) []string { env := os.Environ() env = append(env, fmt.Sprintf("GH_OST_DATABASE_NAME=%s", this.migrationContext.DatabaseName)) @@ -76,13 +75,13 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [ } // executeHook executes a command, and sets relevant environment variables -// combined output & error are printed to gh-ost's standard error. +// combined output & error are printed to the configured writer. func (this *HooksExecutor) executeHook(hook string, extraVariables ...string) error { cmd := exec.Command(hook) cmd.Env = this.applyEnvironmentVariables(extraVariables...) combinedOutput, err := cmd.CombinedOutput() - fmt.Fprintln(os.Stderr, string(combinedOutput)) + fmt.Fprintln(this.writer, string(combinedOutput)) return log.Errore(err) } diff --git a/go/logic/hooks_test.go b/go/logic/hooks_test.go new file mode 100644 index 0000000..3b28afe --- /dev/null +++ b/go/logic/hooks_test.go @@ -0,0 +1,113 @@ +/* + Copyright 2022 GitHub Inc. + See https://github.com/github/gh-ost/blob/master/LICENSE +*/ + +package logic + +import ( + "bufio" + "bytes" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + "github.com/openark/golib/tests" + + "github.com/github/gh-ost/go/base" +) + +func TestHooksExecutorExecuteHooks(t *testing.T) { + migrationContext := base.NewMigrationContext() + migrationContext.AlterStatement = "ENGINE=InnoDB" + migrationContext.DatabaseName = "test" + migrationContext.Hostname = "test.example.com" + migrationContext.OriginalTableName = "tablename" + migrationContext.RowsDeltaEstimate = 1 + migrationContext.RowsEstimate = 122 + migrationContext.TotalRowsCopied = 123456 + migrationContext.SetETADuration(time.Minute) + migrationContext.SetProgressPct(50) + hooksExecutor := NewHooksExecutor(migrationContext) + + writeTmpHookFunc := func(testName, hookName, script string) (path string, err error) { + if path, err = os.MkdirTemp("", testName); err != nil { + return path, err + } + err = os.WriteFile(filepath.Join(path, hookName), []byte(script), 0777) + return path, err + } + + t.Run("does-not-exist", func(t *testing.T) { + migrationContext.HooksPath = "/does/not/exist" + tests.S(t).ExpectNil(hooksExecutor.executeHooks("test-hook")) + }) + + t.Run("failed", func(t *testing.T) { + var err error + if migrationContext.HooksPath, err = writeTmpHookFunc( + "TestHooksExecutorExecuteHooks-failed", + "failed-hook", + "#!/bin/sh\nexit 1", + ); err != nil { + panic(err) + } + defer os.RemoveAll(migrationContext.HooksPath) + tests.S(t).ExpectNotNil(hooksExecutor.executeHooks("failed-hook")) + }) + + t.Run("success", func(t *testing.T) { + var err error + if migrationContext.HooksPath, err = writeTmpHookFunc( + "TestHooksExecutorExecuteHooks-success", + "success-hook", + "#!/bin/sh\nenv", + ); err != nil { + panic(err) + } + defer os.RemoveAll(migrationContext.HooksPath) + + var buf bytes.Buffer + hooksExecutor.writer = &buf + tests.S(t).ExpectNil(hooksExecutor.executeHooks("success-hook", "TEST="+t.Name())) + + scanner := bufio.NewScanner(&buf) + for scanner.Scan() { + split := strings.SplitN(scanner.Text(), "=", 2) + switch split[0] { + case "GH_OST_COPIED_ROWS": + copiedRows, _ := strconv.ParseInt(split[1], 10, 64) + tests.S(t).ExpectEquals(copiedRows, migrationContext.TotalRowsCopied) + case "GH_OST_DATABASE_NAME": + tests.S(t).ExpectEquals(split[1], migrationContext.DatabaseName) + case "GH_OST_DDL": + tests.S(t).ExpectEquals(split[1], migrationContext.AlterStatement) + case "GH_OST_DRY_RUN": + tests.S(t).ExpectEquals(split[1], "false") + case "GH_OST_ESTIMATED_ROWS": + estimatedRows, _ := strconv.ParseInt(split[1], 10, 64) + tests.S(t).ExpectEquals(estimatedRows, int64(123)) + case "GH_OST_ETA_SECONDS": + etaSeconds, _ := strconv.ParseInt(split[1], 10, 64) + tests.S(t).ExpectEquals(etaSeconds, int64(60)) + case "GH_OST_EXECUTING_HOST": + tests.S(t).ExpectEquals(split[1], migrationContext.Hostname) + case "GH_OST_GHOST_TABLE_NAME": + tests.S(t).ExpectEquals(split[1], fmt.Sprintf("_%s_gho", migrationContext.OriginalTableName)) + case "GH_OST_OLD_TABLE_NAME": + tests.S(t).ExpectEquals(split[1], fmt.Sprintf("_%s_del", migrationContext.OriginalTableName)) + case "GH_OST_PROGRESS": + progress, _ := strconv.ParseFloat(split[1], 64) + tests.S(t).ExpectEquals(progress, 50.0) + case "GH_OST_TABLE_NAME": + tests.S(t).ExpectEquals(split[1], migrationContext.OriginalTableName) + case "TEST": + tests.S(t).ExpectEquals(split[1], t.Name()) + } + } + }) +} diff --git a/go/logic/inspect.go b/go/logic/inspect.go index f548624..c618a6b 100644 --- a/go/logic/inspect.go +++ b/go/logic/inspect.go @@ -8,6 +8,7 @@ package logic import ( "context" gosql "database/sql" + "errors" "fmt" "reflect" "strings" @@ -555,13 +556,11 @@ func (this *Inspector) CountTableRows(ctx context.Context) error { query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) var rowsEstimate int64 if err := conn.QueryRowContext(ctx, query).Scan(&rowsEstimate); err != nil { - switch err { - case context.Canceled, context.DeadlineExceeded: + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { this.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err()) return mysql.Kill(this.db, connectionID) - default: - return err } + return err } // row count query finished. nil out the cancel func, so the main migration thread diff --git a/go/logic/migrator.go b/go/logic/migrator.go index 865814d..a102188 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -98,6 +98,7 @@ type Migrator struct { func NewMigrator(context *base.MigrationContext, appVersion string) *Migrator { migrator := &Migrator{ appVersion: appVersion, + hooksExecutor: NewHooksExecutor(context), migrationContext: context, parser: sql.NewAlterTableParser(), ghostTableMigrated: make(chan bool), @@ -113,15 +114,6 @@ func NewMigrator(context *base.MigrationContext, appVersion string) *Migrator { return migrator } -// initiateHooksExecutor -func (this *Migrator) initiateHooksExecutor() (err error) { - this.hooksExecutor = NewHooksExecutor(this.migrationContext) - if err := this.hooksExecutor.initHooks(); err != nil { - return err - } - return nil -} - // sleepWhileTrue sleeps indefinitely until the given function returns 'false' // (or fails with error) func (this *Migrator) sleepWhileTrue(operation func() (bool, error)) error { @@ -342,9 +334,6 @@ func (this *Migrator) Migrate() (err error) { go this.listenOnPanicAbort() - if err := this.initiateHooksExecutor(); err != nil { - return err - } if err := this.hooksExecutor.onStartup(); err != nil { return err } @@ -371,6 +360,17 @@ func (this *Migrator) Migrate() (err error) { if err := this.createFlagFiles(); err != nil { return err } + // In MySQL 8.0 (and possibly earlier) some DDL statements can be applied instantly. + // Attempt to do this if AttemptInstantDDL is set. + if this.migrationContext.AttemptInstantDDL { + this.migrationContext.Log.Infof("Attempting to execute alter with ALGORITHM=INSTANT") + if err := this.applier.AttemptInstantDDL(); err == nil { + this.migrationContext.Log.Infof("Success! table %s.%s migrated instantly", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + return nil + } else { + this.migrationContext.Log.Infof("ALGORITHM=INSTANT not supported for this operation, proceeding with original algorithm: %s", err) + } + } initialLag, _ := this.inspector.getReplicationLag() this.migrationContext.Log.Infof("Waiting for ghost table to be migrated. Current lag is %+v", initialLag) @@ -402,9 +402,9 @@ func (this *Migrator) Migrate() (err error) { if err := this.applier.ReadMigrationRangeValues(); err != nil { return err } - if err := this.initiateThrottler(); err != nil { - return err - } + + this.initiateThrottler() + if err := this.hooksExecutor.onBeforeRowCopy(); err != nil { return err } @@ -1047,6 +1047,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) { ) w := io.MultiWriter(writers...) fmt.Fprintln(w, status) + this.migrationContext.Log.Infof(status) hooksStatusIntervalSec := this.migrationContext.HooksStatusIntervalSec if hooksStatusIntervalSec > 0 && elapsedSeconds%hooksStatusIntervalSec == 0 { @@ -1107,7 +1108,7 @@ func (this *Migrator) addDMLEventsListener() error { } // initiateThrottler kicks in the throttling collection and the throttling checks. -func (this *Migrator) initiateThrottler() error { +func (this *Migrator) initiateThrottler() { this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector, this.appVersion) go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected) @@ -1117,8 +1118,6 @@ func (this *Migrator) initiateThrottler() error { <-this.firstThrottlingCollected // other, general metrics this.migrationContext.Log.Infof("First throttle metrics collected") go this.throttler.initiateThrottlerChecks() - - return nil } func (this *Migrator) initiateApplier() error { diff --git a/go/logic/throttler.go b/go/logic/throttler.go index 1e7bc97..9c8dcfc 100644 --- a/go/logic/throttler.go +++ b/go/logic/throttler.go @@ -308,6 +308,8 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<- if err != nil { return false, err } + defer resp.Body.Close() + atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(resp.StatusCode)) return false, nil } diff --git a/go/mysql/connection.go b/go/mysql/connection.go index 6a5c890..6250925 100644 --- a/go/mysql/connection.go +++ b/go/mysql/connection.go @@ -18,18 +18,18 @@ import ( ) const ( - transactionIsolation = "REPEATABLE-READ" - TLS_CONFIG_KEY = "ghost" + TLS_CONFIG_KEY = "ghost" ) // ConnectionConfig is the minimal configuration required to connect to a MySQL server type ConnectionConfig struct { - Key InstanceKey - User string - Password string - ImpliedKey *InstanceKey - tlsConfig *tls.Config - Timeout float64 + Key InstanceKey + User string + Password string + ImpliedKey *InstanceKey + tlsConfig *tls.Config + Timeout float64 + TransactionIsolation string } func NewConnectionConfig() *ConnectionConfig { @@ -43,11 +43,12 @@ func NewConnectionConfig() *ConnectionConfig { // DuplicateCredentials creates a new connection config with given key and with same credentials as this config func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionConfig { config := &ConnectionConfig{ - Key: key, - User: this.User, - Password: this.Password, - tlsConfig: this.tlsConfig, - Timeout: this.Timeout, + Key: key, + User: this.User, + Password: this.Password, + tlsConfig: this.tlsConfig, + Timeout: this.Timeout, + TransactionIsolation: this.TransactionIsolation, } config.ImpliedKey = &config.Key return config @@ -126,7 +127,7 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string { "charset=utf8mb4,utf8,latin1", "interpolateParams=true", fmt.Sprintf("tls=%s", tlsOption), - fmt.Sprintf("transaction_isolation=%q", transactionIsolation), + fmt.Sprintf("transaction_isolation=%q", this.TransactionIsolation), fmt.Sprintf("timeout=%fs", this.Timeout), fmt.Sprintf("readTimeout=%fs", this.Timeout), fmt.Sprintf("writeTimeout=%fs", this.Timeout), diff --git a/go/mysql/connection_test.go b/go/mysql/connection_test.go index 390774c..5667235 100644 --- a/go/mysql/connection_test.go +++ b/go/mysql/connection_test.go @@ -13,6 +13,10 @@ import ( test "github.com/openark/golib/tests" ) +const ( + transactionIsolation = "REPEATABLE-READ" +) + func init() { log.SetLevel(log.ERROR) } @@ -25,6 +29,7 @@ func TestNewConnectionConfig(t *testing.T) { test.S(t).ExpectEquals(c.ImpliedKey.Port, 0) test.S(t).ExpectEquals(c.User, "") test.S(t).ExpectEquals(c.Password, "") + test.S(t).ExpectEquals(c.TransactionIsolation, "") } func TestDuplicateCredentials(t *testing.T) { @@ -36,6 +41,7 @@ func TestDuplicateCredentials(t *testing.T) { InsecureSkipVerify: true, ServerName: "feathers", } + c.TransactionIsolation = transactionIsolation dup := c.DuplicateCredentials(InstanceKey{Hostname: "otherhost", Port: 3310}) test.S(t).ExpectEquals(dup.Key.Hostname, "otherhost") @@ -45,6 +51,7 @@ func TestDuplicateCredentials(t *testing.T) { test.S(t).ExpectEquals(dup.User, "gromit") test.S(t).ExpectEquals(dup.Password, "penguin") test.S(t).ExpectEquals(dup.tlsConfig, c.tlsConfig) + test.S(t).ExpectEquals(dup.TransactionIsolation, c.TransactionIsolation) } func TestDuplicate(t *testing.T) { @@ -52,6 +59,7 @@ func TestDuplicate(t *testing.T) { c.Key = InstanceKey{Hostname: "myhost", Port: 3306} c.User = "gromit" c.Password = "penguin" + c.TransactionIsolation = transactionIsolation dup := c.Duplicate() test.S(t).ExpectEquals(dup.Key.Hostname, "myhost") @@ -60,6 +68,7 @@ func TestDuplicate(t *testing.T) { test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3306) test.S(t).ExpectEquals(dup.User, "gromit") test.S(t).ExpectEquals(dup.Password, "penguin") + test.S(t).ExpectEquals(dup.TransactionIsolation, transactionIsolation) } func TestGetDBUri(t *testing.T) { @@ -68,6 +77,7 @@ func TestGetDBUri(t *testing.T) { c.User = "gromit" c.Password = "penguin" c.Timeout = 1.2345 + c.TransactionIsolation = transactionIsolation uri := c.GetDBUri("test") test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=false&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`) @@ -80,6 +90,7 @@ func TestGetDBUriWithTLSSetup(t *testing.T) { c.Password = "penguin" c.Timeout = 1.2345 c.tlsConfig = &tls.Config{} + c.TransactionIsolation = transactionIsolation uri := c.GetDBUri("test") test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=ghost&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`) diff --git a/go/sql/parser.go b/go/sql/parser.go index a72af33..2ddc60f 100644 --- a/go/sql/parser.go +++ b/go/sql/parser.go @@ -62,7 +62,7 @@ func NewParserFromAlterStatement(alterStatement string) *AlterTableParser { return parser } -func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tokens []string, err error) { +func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tokens []string) { terminatingQuote := rune(0) f := func(c rune) bool { switch { @@ -86,7 +86,7 @@ func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tok for i := range tokens { tokens[i] = strings.TrimSpace(tokens[i]) } - return tokens, nil + return tokens } func (this *AlterTableParser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) { @@ -95,7 +95,7 @@ func (this *AlterTableParser) sanitizeQuotesFromAlterStatement(alterStatement st return strippedStatement } -func (this *AlterTableParser) parseAlterToken(alterToken string) (err error) { +func (this *AlterTableParser) parseAlterToken(alterToken string) { { // rename allStringSubmatch := renameColumnRegexp.FindAllStringSubmatch(alterToken, -1) @@ -131,7 +131,6 @@ func (this *AlterTableParser) parseAlterToken(alterToken string) (err error) { this.isAutoIncrementDefined = true } } - return nil } func (this *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) { @@ -151,8 +150,7 @@ func (this *AlterTableParser) ParseAlterStatement(alterStatement string) (err er break } } - alterTokens, _ := this.tokenizeAlterStatement(this.alterStatementOptions) - for _, alterToken := range alterTokens { + for _, alterToken := range this.tokenizeAlterStatement(this.alterStatementOptions) { alterToken = this.sanitizeQuotesFromAlterStatement(alterToken) this.parseAlterToken(alterToken) this.alterTokens = append(this.alterTokens, alterToken) diff --git a/go/sql/parser_test.go b/go/sql/parser_test.go index 79a9b7b..df92842 100644 --- a/go/sql/parser_test.go +++ b/go/sql/parser_test.go @@ -99,37 +99,37 @@ func TestTokenizeAlterStatement(t *testing.T) { parser := NewAlterTableParser() { alterStatement := "add column t int" - tokens, _ := parser.tokenizeAlterStatement(alterStatement) + tokens := parser.tokenizeAlterStatement(alterStatement) test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int"})) } { alterStatement := "add column t int, change column i int" - tokens, _ := parser.tokenizeAlterStatement(alterStatement) + tokens := parser.tokenizeAlterStatement(alterStatement) test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int"})) } { alterStatement := "add column t int, change column i int 'some comment'" - tokens, _ := parser.tokenizeAlterStatement(alterStatement) + tokens := parser.tokenizeAlterStatement(alterStatement) test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment'"})) } { alterStatement := "add column t int, change column i int 'some comment, with comma'" - tokens, _ := parser.tokenizeAlterStatement(alterStatement) + tokens := parser.tokenizeAlterStatement(alterStatement) test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment, with comma'"})) } { alterStatement := "add column t int, add column d decimal(10,2)" - tokens, _ := parser.tokenizeAlterStatement(alterStatement) + tokens := parser.tokenizeAlterStatement(alterStatement) test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column d decimal(10,2)"})) } { alterStatement := "add column t int, add column e enum('a','b','c')" - tokens, _ := parser.tokenizeAlterStatement(alterStatement) + tokens := parser.tokenizeAlterStatement(alterStatement) test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column e enum('a','b','c')"})) } { alterStatement := "add column t int(11), add column e enum('a','b','c')" - tokens, _ := parser.tokenizeAlterStatement(alterStatement) + tokens := parser.tokenizeAlterStatement(alterStatement) test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int(11)", "add column e enum('a','b','c')"})) } } diff --git a/localtests/attempt-instant-ddl/create.sql b/localtests/attempt-instant-ddl/create.sql new file mode 100644 index 0000000..9371238 --- /dev/null +++ b/localtests/attempt-instant-ddl/create.sql @@ -0,0 +1,13 @@ +drop table if exists gh_ost_test; +create table gh_ost_test ( + id int auto_increment, + i int not null, + color varchar(32), + primary key(id) +) auto_increment=1; + +drop event if exists gh_ost_test; + +insert into gh_ost_test values (null, 11, 'red'); +insert into gh_ost_test values (null, 13, 'green'); +insert into gh_ost_test values (null, 17, 'blue'); diff --git a/localtests/attempt-instant-ddl/extra_args b/localtests/attempt-instant-ddl/extra_args new file mode 100644 index 0000000..70c8a52 --- /dev/null +++ b/localtests/attempt-instant-ddl/extra_args @@ -0,0 +1 @@ +--attempt-instant-ddl diff --git a/localtests/convert-utf8mb4/create.sql b/localtests/convert-utf8mb4/create.sql index 6d83b31..e35e688 100644 --- a/localtests/convert-utf8mb4/create.sql +++ b/localtests/convert-utf8mb4/create.sql @@ -7,9 +7,6 @@ create table gh_ost_test ( primary key(id) ) auto_increment=1; -insert into gh_ost_test values (null, 'átesting', '', ''); - - insert into gh_ost_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial'); drop event if exists gh_ost_test; diff --git a/localtests/discard-fk/ignore_versions b/localtests/discard-fk/ignore_versions new file mode 100644 index 0000000..cf02abe --- /dev/null +++ b/localtests/discard-fk/ignore_versions @@ -0,0 +1 @@ +Percona \ No newline at end of file diff --git a/localtests/fail-fk-parent/ignore_versions b/localtests/fail-fk-parent/ignore_versions new file mode 100644 index 0000000..cf02abe --- /dev/null +++ b/localtests/fail-fk-parent/ignore_versions @@ -0,0 +1 @@ +Percona \ No newline at end of file diff --git a/localtests/fail-fk/ignore_versions b/localtests/fail-fk/ignore_versions new file mode 100644 index 0000000..cf02abe --- /dev/null +++ b/localtests/fail-fk/ignore_versions @@ -0,0 +1 @@ +Percona \ No newline at end of file diff --git a/localtests/generated-columns-add/ignore_versions b/localtests/generated-columns-add/ignore_versions new file mode 100644 index 0000000..cf02abe --- /dev/null +++ b/localtests/generated-columns-add/ignore_versions @@ -0,0 +1 @@ +Percona \ No newline at end of file diff --git a/localtests/generated-columns-rename/ignore_versions b/localtests/generated-columns-rename/ignore_versions new file mode 100644 index 0000000..cf02abe --- /dev/null +++ b/localtests/generated-columns-rename/ignore_versions @@ -0,0 +1 @@ +Percona \ No newline at end of file diff --git a/localtests/generated-columns-unique/ignore_versions b/localtests/generated-columns-unique/ignore_versions new file mode 100644 index 0000000..cf02abe --- /dev/null +++ b/localtests/generated-columns-unique/ignore_versions @@ -0,0 +1 @@ +Percona \ No newline at end of file diff --git a/localtests/generated-columns/ignore_versions b/localtests/generated-columns/ignore_versions new file mode 100644 index 0000000..cf02abe --- /dev/null +++ b/localtests/generated-columns/ignore_versions @@ -0,0 +1 @@ +Percona \ No newline at end of file diff --git a/localtests/geometry/ignore_versions b/localtests/geometry/ignore_versions new file mode 100644 index 0000000..cf02abe --- /dev/null +++ b/localtests/geometry/ignore_versions @@ -0,0 +1 @@ +Percona \ No newline at end of file diff --git a/localtests/spatial/ignore_versions b/localtests/spatial/ignore_versions new file mode 100644 index 0000000..cf02abe --- /dev/null +++ b/localtests/spatial/ignore_versions @@ -0,0 +1 @@ +Percona \ No newline at end of file diff --git a/localtests/test.sh b/localtests/test.sh index f66c813..14ecd83 100755 --- a/localtests/test.sh +++ b/localtests/test.sh @@ -11,6 +11,7 @@ tests_path=$(dirname $0) test_logfile=/tmp/gh-ost-test.log default_ghost_binary=/tmp/gh-ost-test ghost_binary="" +storage_engine=innodb exec_command_file=/tmp/gh-ost-test.bash ghost_structure_output_file=/tmp/gh-ost-test.ghost.structure.sql orig_content_output_file=/tmp/gh-ost-test.orig.content.csv @@ -24,12 +25,13 @@ replica_port= original_sql_mode= OPTIND=1 -while getopts "b:" OPTION +while getopts "b:s:" OPTION do case $OPTION in b) - ghost_binary="$OPTARG" - ;; + ghost_binary="$OPTARG";; + s) + storage_engine="$OPTARG";; esac done shift $((OPTIND-1)) @@ -99,9 +101,13 @@ test_single() { if [ -f $tests_path/$test_name/ignore_versions ] ; then ignore_versions=$(cat $tests_path/$test_name/ignore_versions) mysql_version=$(gh-ost-test-mysql-master -s -s -e "select @@version") + mysql_version_comment=$(gh-ost-test-mysql-master -s -s -e "select @@version_comment") if echo "$mysql_version" | egrep -q "^${ignore_versions}" ; then echo -n "Skipping: $test_name" return 0 + elif echo "$mysql_version_comment" | egrep -i -q "^${ignore_versions}" ; then + echo -n "Skipping: $test_name" + return 0 fi fi @@ -117,6 +123,14 @@ test_single() { fi gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/create.sql + test_create_result=$? + + if [ $test_create_result -ne 0 ] ; then + echo + echo "ERROR $test_name create failure. cat $tests_path/$test_name/create.sql:" + cat $tests_path/$test_name/create.sql + return 1 + fi extra_args="" if [ -f $tests_path/$test_name/extra_args ] ; then @@ -146,7 +160,8 @@ test_single() { --assume-master-host=${master_host}:${master_port} --database=test \ --table=gh_ost_test \ - --alter='engine=innodb' \ + --storage-engine=${storage_engine} \ + --alter='engine=${storage_engine}' \ --exact-rowcount \ --assume-rbr \ --initially-drop-old-table \ @@ -255,7 +270,7 @@ build_binary() { test_all() { build_binary - find $tests_path ! -path . -type d -mindepth 1 -maxdepth 1 | cut -d "/" -f 3 | egrep "$test_pattern" | while read test_name ; do + find $tests_path ! -path . -type d -mindepth 1 -maxdepth 1 | cut -d "/" -f 3 | egrep "$test_pattern" | sort | while read test_name ; do test_single "$test_name" if [ $? -ne 0 ] ; then create_statement=$(gh-ost-test-mysql-replica test -t -e "show create table _gh_ost_test_gho \G") diff --git a/script/cibuild-gh-ost-replica-tests b/script/cibuild-gh-ost-replica-tests index c4dbfd2..90eb856 100755 --- a/script/cibuild-gh-ost-replica-tests +++ b/script/cibuild-gh-ost-replica-tests @@ -36,8 +36,16 @@ test_mysql_version() { mkdir -p sandbox/binary rm -rf sandbox/binary/* - gh-ost-ci-env/bin/linux/dbdeployer unpack gh-ost-ci-env/mysql-tarballs/"$mysql_version".tar.xz --sandbox-binary ${PWD}/sandbox/binary - + local mysql_server=${mysql_version%-*} + if echo "$mysql_server" | egrep -i "percona" ; then + tarball_name=Percona-Server-${mysql_version#*-}-12-Linux.x86_64.glibc2.12-minimal.tar.gz + rm -f gh-ost-ci-env/mysql-tarballs/${tarball_name} + ln -s "$mysql_version".tar.xz gh-ost-ci-env/mysql-tarballs/${tarball_name} + gh-ost-ci-env/bin/linux/dbdeployer unpack gh-ost-ci-env/mysql-tarballs/${tarball_name} --sandbox-binary ${PWD}/sandbox/binary + rm -f gh-ost-ci-env/mysql-tarballs/${tarball_name} + else + gh-ost-ci-env/bin/linux/dbdeployer unpack gh-ost-ci-env/mysql-tarballs/"$mysql_version".tar.xz --sandbox-binary ${PWD}/sandbox/binary + fi mkdir -p sandboxes rm -rf sandboxes/* @@ -60,9 +68,21 @@ test_mysql_version() { gh-ost-test-mysql-master -uroot -e "create user 'gh-ost'@'%' identified by 'gh-ost'" gh-ost-test-mysql-master -uroot -e "grant all on *.* to 'gh-ost'@'%'" - echo "### Running gh-ost tests for $mysql_version" - ./localtests/test.sh -b bin/gh-ost + if echo "$mysql_server" | egrep -i "percona" ; then + echo "### Preparing for rocksdb in PerconaServer" + gh-ost-test-mysql-master -uroot -e 'INSTALL PLUGIN ROCKSDB SONAME "ha_rocksdb.so"' + gh-ost-test-mysql-master -uroot -e 'set global default_storage_engine="ROCKSDB"' + gh-ost-test-mysql-master -uroot -e 'set global transaction_isolation="READ-COMMITTED"' + gh-ost-test-mysql-replica -uroot -e 'INSTALL PLUGIN ROCKSDB SONAME "ha_rocksdb.so"' + gh-ost-test-mysql-replica -uroot -e 'set global default_storage_engine="ROCKSDB"' + gh-ost-test-mysql-replica -uroot -e 'set global transaction_isolation="READ-COMMITTED"' + echo "### Running gh-ost tests for $mysql_version" + ./localtests/test.sh -b bin/gh-ost -s rocksdb + else + echo "### Running gh-ost tests for $mysql_version" + ./localtests/test.sh -b bin/gh-ost -s innodb + fi find sandboxes -name "stop_all" | bash } diff --git a/script/test b/script/test index 7e757b5..5c32b37 100755 --- a/script/test +++ b/script/test @@ -14,4 +14,4 @@ script/build cd .gopath/src/github.com/github/gh-ost echo "Running unit tests" -go test ./go/... +go test -v -covermode=atomic ./go/...