Merge branch 'master' into changelog-migrated-state
This commit is contained in:
commit
d6d1d3b436
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@ -10,10 +10,10 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.17
|
||||
|
||||
- name: Build
|
||||
run: script/cibuild
|
||||
|
1
.github/workflows/codeql.yml
vendored
1
.github/workflows/codeql.yml
vendored
@ -2,6 +2,7 @@ name: "CodeQL analysis"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
|
||||
|
2
.github/workflows/golangci-lint.yml
vendored
2
.github/workflows/golangci-lint.yml
vendored
@ -15,7 +15,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.17
|
||||
- uses: actions/checkout@v3
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
|
4
.github/workflows/replica-tests.yml
vendored
4
.github/workflows/replica-tests.yml
vendored
@ -13,10 +13,10 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.17
|
||||
|
||||
- name: migration tests
|
||||
env:
|
||||
|
@ -5,8 +5,9 @@ run:
|
||||
linters:
|
||||
disable:
|
||||
- errcheck
|
||||
- staticcheck
|
||||
enable:
|
||||
- gosimple
|
||||
- govet
|
||||
- noctx
|
||||
- rowserrcheck
|
||||
- sqlclosecheck
|
||||
- unused
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.16.4
|
||||
FROM golang:1.17
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y ruby ruby-dev rubygems build-essential
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.16.4
|
||||
FROM golang:1.17
|
||||
LABEL maintainer="github@github.com"
|
||||
|
||||
RUN apt-get update
|
||||
|
14
go.mod
14
go.mod
@ -1,6 +1,6 @@
|
||||
module github.com/github/gh-ost
|
||||
|
||||
go 1.16
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/go-ini/ini v1.62.0
|
||||
@ -8,10 +8,20 @@ require (
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/openark/golib v0.0.0-20210531070646-355f37940af8
|
||||
github.com/satori/go.uuid v1.2.0
|
||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
|
||||
golang.org/x/text v0.3.6
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
|
||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
|
||||
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 // indirect
|
||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 // indirect
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
)
|
||||
|
@ -82,6 +82,8 @@ type MigrationContext struct {
|
||||
AlterStatement string
|
||||
AlterStatementOptions string // anything following the 'ALTER TABLE [schema.]table' from AlterStatement
|
||||
|
||||
countMutex sync.Mutex
|
||||
countTableRowsCancelFunc func()
|
||||
CountTableRows bool
|
||||
ConcurrentCountTableRows bool
|
||||
AllowedRunningOnMaster bool
|
||||
@ -184,7 +186,9 @@ type MigrationContext struct {
|
||||
CurrentLag int64
|
||||
currentProgress uint64
|
||||
etaNanoseonds int64
|
||||
ThrottleHTTPIntervalMillis int64
|
||||
ThrottleHTTPStatusCode int64
|
||||
ThrottleHTTPTimeoutMillis int64
|
||||
controlReplicasLagResult mysql.ReplicationLagResult
|
||||
TotalRowsCopied int64
|
||||
TotalDMLEventsApplied int64
|
||||
@ -426,6 +430,36 @@ func (this *MigrationContext) IsTransactionalTable() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// SetCountTableRowsCancelFunc sets the cancel function for the CountTableRows query context
|
||||
func (this *MigrationContext) SetCountTableRowsCancelFunc(f func()) {
|
||||
this.countMutex.Lock()
|
||||
defer this.countMutex.Unlock()
|
||||
|
||||
this.countTableRowsCancelFunc = f
|
||||
}
|
||||
|
||||
// IsCountingTableRows returns true if the migration has a table count query running
|
||||
func (this *MigrationContext) IsCountingTableRows() bool {
|
||||
this.countMutex.Lock()
|
||||
defer this.countMutex.Unlock()
|
||||
|
||||
return this.countTableRowsCancelFunc != nil
|
||||
}
|
||||
|
||||
// CancelTableRowsCount cancels the CountTableRows query context. It is safe to
|
||||
// call function even when IsCountingTableRows is false.
|
||||
func (this *MigrationContext) CancelTableRowsCount() {
|
||||
this.countMutex.Lock()
|
||||
defer this.countMutex.Unlock()
|
||||
|
||||
if this.countTableRowsCancelFunc == nil {
|
||||
return
|
||||
}
|
||||
|
||||
this.countTableRowsCancelFunc()
|
||||
this.countTableRowsCancelFunc = nil
|
||||
}
|
||||
|
||||
// ElapsedTime returns time since very beginning of the process
|
||||
func (this *MigrationContext) ElapsedTime() time.Duration {
|
||||
return time.Since(this.StartTime)
|
||||
@ -812,30 +846,30 @@ func (this *MigrationContext) ReadConfigFile() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.Section("client").Haskey("user") {
|
||||
if cfg.Section("client").HasKey("user") {
|
||||
this.config.Client.User = cfg.Section("client").Key("user").String()
|
||||
}
|
||||
|
||||
if cfg.Section("client").Haskey("password") {
|
||||
if cfg.Section("client").HasKey("password") {
|
||||
this.config.Client.Password = cfg.Section("client").Key("password").String()
|
||||
}
|
||||
|
||||
if cfg.Section("osc").Haskey("chunk_size") {
|
||||
if cfg.Section("osc").HasKey("chunk_size") {
|
||||
this.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read osc chunk size: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Section("osc").Haskey("max_load") {
|
||||
if cfg.Section("osc").HasKey("max_load") {
|
||||
this.config.Osc.Max_Load = cfg.Section("osc").Key("max_load").String()
|
||||
}
|
||||
|
||||
if cfg.Section("osc").Haskey("replication_lag_query") {
|
||||
if cfg.Section("osc").HasKey("replication_lag_query") {
|
||||
this.config.Osc.Replication_Lag_Query = cfg.Section("osc").Key("replication_lag_query").String()
|
||||
}
|
||||
|
||||
if cfg.Section("osc").Haskey("max_lag_millis") {
|
||||
if cfg.Section("osc").HasKey("max_lag_millis") {
|
||||
this.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read max lag millis: %s", err.Error())
|
||||
|
@ -69,7 +69,7 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
|
||||
return "", err
|
||||
}
|
||||
extraPortQuery := `select @@global.extra_port`
|
||||
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil {
|
||||
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil { // nolint:staticcheck
|
||||
// swallow this error. not all servers support extra_port
|
||||
}
|
||||
// AliyunRDS set users port to "NULL", replace it by gh-ost param
|
||||
|
@ -28,31 +28,24 @@ type GoMySQLReader struct {
|
||||
LastAppliedRowsEventHint mysql.BinlogCoordinates
|
||||
}
|
||||
|
||||
func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *GoMySQLReader, err error) {
|
||||
binlogReader = &GoMySQLReader{
|
||||
func NewGoMySQLReader(migrationContext *base.MigrationContext) *GoMySQLReader {
|
||||
connectionConfig := migrationContext.InspectorConnectionConfig
|
||||
return &GoMySQLReader{
|
||||
migrationContext: migrationContext,
|
||||
connectionConfig: migrationContext.InspectorConnectionConfig,
|
||||
connectionConfig: connectionConfig,
|
||||
currentCoordinates: mysql.BinlogCoordinates{},
|
||||
currentCoordinatesMutex: &sync.Mutex{},
|
||||
binlogSyncer: nil,
|
||||
binlogStreamer: nil,
|
||||
binlogSyncer: replication.NewBinlogSyncer(replication.BinlogSyncerConfig{
|
||||
ServerID: uint32(migrationContext.ReplicaServerId),
|
||||
Flavor: gomysql.MySQLFlavor,
|
||||
Host: connectionConfig.Key.Hostname,
|
||||
Port: uint16(connectionConfig.Key.Port),
|
||||
User: connectionConfig.User,
|
||||
Password: connectionConfig.Password,
|
||||
TLSConfig: connectionConfig.TLSConfig(),
|
||||
UseDecimal: true,
|
||||
}),
|
||||
}
|
||||
|
||||
serverId := uint32(migrationContext.ReplicaServerId)
|
||||
|
||||
binlogSyncerConfig := replication.BinlogSyncerConfig{
|
||||
ServerID: serverId,
|
||||
Flavor: "mysql",
|
||||
Host: binlogReader.connectionConfig.Key.Hostname,
|
||||
Port: uint16(binlogReader.connectionConfig.Key.Port),
|
||||
User: binlogReader.connectionConfig.User,
|
||||
Password: binlogReader.connectionConfig.Password,
|
||||
TLSConfig: binlogReader.connectionConfig.TLSConfig(),
|
||||
UseDecimal: true,
|
||||
}
|
||||
binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
|
||||
|
||||
return binlogReader, err
|
||||
}
|
||||
|
||||
// ConnectBinlogStreamer
|
||||
@ -145,15 +138,17 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
|
||||
defer this.currentCoordinatesMutex.Unlock()
|
||||
this.currentCoordinates.LogPos = int64(ev.Header.LogPos)
|
||||
}()
|
||||
if rotateEvent, ok := ev.Event.(*replication.RotateEvent); ok {
|
||||
|
||||
switch binlogEvent := ev.Event.(type) {
|
||||
case *replication.RotateEvent:
|
||||
func() {
|
||||
this.currentCoordinatesMutex.Lock()
|
||||
defer this.currentCoordinatesMutex.Unlock()
|
||||
this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
|
||||
this.currentCoordinates.LogFile = string(binlogEvent.NextLogName)
|
||||
}()
|
||||
this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), rotateEvent.NextLogName)
|
||||
} else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
|
||||
if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
|
||||
this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), binlogEvent.NextLogName)
|
||||
case *replication.RowsEvent:
|
||||
if err := this.handleRowsEvent(ev, binlogEvent, entriesChannel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 GitHub Inc.
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -19,7 +19,8 @@ import (
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/openark/golib/log"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
// TODO: move to golang.org/x/term
|
||||
"golang.org/x/crypto/ssh/terminal" // nolint:staticcheck
|
||||
)
|
||||
|
||||
var AppVersion string
|
||||
@ -110,6 +111,8 @@ func main() {
|
||||
throttleControlReplicas := flag.String("throttle-control-replicas", "", "List of replicas on which to check for lag; comma delimited. Example: myhost1.com:3306,myhost2.com,myhost3.com:3307")
|
||||
throttleQuery := flag.String("throttle-query", "", "when given, issued (every second) to check if operation should throttle. Expecting to return zero for no-throttle, >0 for throttle. Query is issued on the migrated server. Make sure this query is lightweight")
|
||||
throttleHTTP := flag.String("throttle-http", "", "when given, gh-ost checks given URL via HEAD request; any response code other than 200 (OK) causes throttling; make sure it has low latency response")
|
||||
flag.Int64Var(&migrationContext.ThrottleHTTPIntervalMillis, "throttle-http-interval-millis", 100, "Number of milliseconds to wait before triggering another HTTP throttle check")
|
||||
flag.Int64Var(&migrationContext.ThrottleHTTPTimeoutMillis, "throttle-http-timeout-millis", 1000, "Number of milliseconds to use as an HTTP throttle check timeout")
|
||||
ignoreHTTPErrors := flag.Bool("ignore-http-errors", false, "ignore HTTP connection errors during throttle check")
|
||||
heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 100, "how frequently would gh-ost inject a heartbeat value")
|
||||
flag.StringVar(&migrationContext.ThrottleFlagFile, "throttle-flag-file", "", "operation pauses when this file exists; hint: use a file that is specific to the table being altered")
|
||||
@ -297,7 +300,7 @@ func main() {
|
||||
log.Infof("starting gh-ost %+v", AppVersion)
|
||||
acceptSignals(migrationContext)
|
||||
|
||||
migrator := logic.NewMigrator(migrationContext)
|
||||
migrator := logic.NewMigrator(migrationContext, AppVersion)
|
||||
err := migrator.Migrate()
|
||||
if err != nil {
|
||||
migrator.ExecOnFailureHook()
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2021 GitHub Inc.
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
@ -22,7 +22,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
atomicCutOverMagicHint = "ghost-cut-over-sentry"
|
||||
GhostChangelogTableComment = "gh-ost changelog"
|
||||
atomicCutOverMagicHint = "ghost-cut-over-sentry"
|
||||
)
|
||||
|
||||
type dmlBuildResult struct {
|
||||
@ -71,7 +72,6 @@ func NewApplier(migrationContext *base.MigrationContext) *Applier {
|
||||
}
|
||||
|
||||
func (this *Applier) InitDBConnections() (err error) {
|
||||
|
||||
applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
|
||||
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
|
||||
return err
|
||||
@ -233,16 +233,16 @@ func (this *Applier) CreateChangelogTable() error {
|
||||
return err
|
||||
}
|
||||
query := fmt.Sprintf(`create /* gh-ost */ table %s.%s (
|
||||
id bigint auto_increment,
|
||||
id bigint unsigned auto_increment,
|
||||
last_update timestamp not null DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
hint varchar(64) charset ascii not null,
|
||||
value varchar(4096) charset ascii not null,
|
||||
primary key(id),
|
||||
unique key hint_uidx(hint)
|
||||
) auto_increment=256
|
||||
`,
|
||||
) auto_increment=256 comment='%s'`,
|
||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
|
||||
GhostChangelogTableComment,
|
||||
)
|
||||
this.migrationContext.Log.Infof("Creating changelog table %s.%s",
|
||||
sql.EscapeName(this.migrationContext.DatabaseName),
|
||||
@ -344,8 +344,9 @@ func (this *Applier) InitiateHeartbeat() {
|
||||
}
|
||||
injectHeartbeat()
|
||||
|
||||
heartbeatTick := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||
for range heartbeatTick {
|
||||
ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
@ -382,10 +383,13 @@ func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := this.db.Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
this.migrationContext.MigrationRangeMinValues = sql.NewColumnValues(uniqueKey.Len())
|
||||
if err = rows.Scan(this.migrationContext.MigrationRangeMinValues.ValuesPointers...); err != nil {
|
||||
@ -394,8 +398,7 @@ func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
|
||||
}
|
||||
this.migrationContext.Log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues)
|
||||
|
||||
err = rows.Err()
|
||||
return err
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
// ReadMigrationMaxValues returns the maximum values to be iterated on rowcopy
|
||||
@ -405,10 +408,13 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := this.db.Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
this.migrationContext.MigrationRangeMaxValues = sql.NewColumnValues(uniqueKey.Len())
|
||||
if err = rows.Scan(this.migrationContext.MigrationRangeMaxValues.ValuesPointers...); err != nil {
|
||||
@ -417,12 +423,31 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
|
||||
}
|
||||
this.migrationContext.Log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues)
|
||||
|
||||
err = rows.Err()
|
||||
return err
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
// ReadMigrationRangeValues reads min/max values that will be used for rowcopy
|
||||
// ReadMigrationRangeValues reads min/max values that will be used for rowcopy.
|
||||
// Before read min/max, write a changelog state into the ghc table to avoid lost data in mysql two-phase commit.
|
||||
/*
|
||||
Detail description of the lost data in mysql two-phase commit issue by @Fanduzi:
|
||||
When using semi-sync and setting rpl_semi_sync_master_wait_point=AFTER_SYNC,
|
||||
if an INSERT statement is being committed but blocks due to an unmet ack count,
|
||||
the data inserted by the transaction is not visible to ReadMigrationRangeValues,
|
||||
so the copy of the existing data in the table does not include the new row inserted by the transaction.
|
||||
However, the binlog event for the transaction is already written to the binlog,
|
||||
so the addDMLEventsListener only captures the binlog event after the transaction,
|
||||
and thus the transaction's binlog event is not captured, resulting in data loss.
|
||||
|
||||
If write a changelog into ghc table before ReadMigrationRangeValues, and the transaction commit blocks
|
||||
because the ack is not met, then the changelog will not be able to write, so the ReadMigrationRangeValues
|
||||
will not be run. When the changelog writes successfully, the ReadMigrationRangeValues will read the
|
||||
newly inserted data, thus Avoiding data loss due to the above problem.
|
||||
*/
|
||||
func (this *Applier) ReadMigrationRangeValues() error {
|
||||
if _, err := this.WriteChangelogState(string(ReadMigrationRangeValues)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := this.ReadMigrationMinValues(this.migrationContext.UniqueKey); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -459,10 +484,13 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo
|
||||
if err != nil {
|
||||
return hasFurtherRange, err
|
||||
}
|
||||
|
||||
rows, err := this.db.Query(query, explodedArgs...)
|
||||
if err != nil {
|
||||
return hasFurtherRange, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
iterationRangeMaxValues := sql.NewColumnValues(this.migrationContext.UniqueKey.Len())
|
||||
for rows.Next() {
|
||||
if err = rows.Scan(iterationRangeMaxValues.ValuesPointers...); err != nil {
|
||||
|
@ -6,6 +6,7 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
gosql "database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@ -532,18 +533,48 @@ func (this *Inspector) estimateTableRowsViaExplain() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Kill kills a query for connectionID.
|
||||
// - @amason: this should go somewhere _other_ than `logic`, but I couldn't decide
|
||||
// between `base`, `sql`, or `mysql`.
|
||||
func Kill(db *gosql.DB, connectionID string) error {
|
||||
_, err := db.Exec(`KILL QUERY %s`, connectionID)
|
||||
return err
|
||||
}
|
||||
|
||||
// CountTableRows counts exact number of rows on the original table
|
||||
func (this *Inspector) CountTableRows() error {
|
||||
func (this *Inspector) CountTableRows(ctx context.Context) error {
|
||||
atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1)
|
||||
defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0)
|
||||
|
||||
this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
|
||||
|
||||
query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
var rowsEstimate int64
|
||||
if err := this.db.QueryRow(query).Scan(&rowsEstimate); err != nil {
|
||||
conn, err := this.db.Conn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
var connectionID string
|
||||
if err := conn.QueryRowContext(ctx, `SELECT /* gh-ost */ CONNECTION_ID()`).Scan(&connectionID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
|
||||
var rowsEstimate int64
|
||||
if err := conn.QueryRowContext(ctx, query).Scan(&rowsEstimate); err != nil {
|
||||
switch err {
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
this.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err())
|
||||
return Kill(this.db, connectionID)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// row count query finished. nil out the cancel func, so the main migration thread
|
||||
// doesn't bother calling it after row copy is done.
|
||||
this.migrationContext.SetCountTableRowsCancelFunc(nil)
|
||||
|
||||
atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate)
|
||||
this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@ -26,7 +27,8 @@ type ChangelogState string
|
||||
const (
|
||||
AllEventsUpToLockProcessed ChangelogState = "AllEventsUpToLockProcessed"
|
||||
GhostTableMigrated ChangelogState = "GhostTableMigrated"
|
||||
Migrated ChangelogState = "Migrated"
|
||||
Migrated ChangelogState = "Migrated"
|
||||
ReadMigrationRangeValues ChangelogState = "ReadMigrationRangeValues"
|
||||
)
|
||||
|
||||
func ReadChangelogState(s string) ChangelogState {
|
||||
@ -62,6 +64,7 @@ const (
|
||||
|
||||
// Migrator is the main schema migration flow manager.
|
||||
type Migrator struct {
|
||||
appVersion string
|
||||
parser *sql.AlterTableParser
|
||||
inspector *Inspector
|
||||
applier *Applier
|
||||
@ -87,8 +90,9 @@ type Migrator struct {
|
||||
finishedMigrating int64
|
||||
}
|
||||
|
||||
func NewMigrator(context *base.MigrationContext) *Migrator {
|
||||
func NewMigrator(context *base.MigrationContext, appVersion string) *Migrator {
|
||||
migrator := &Migrator{
|
||||
appVersion: appVersion,
|
||||
migrationContext: context,
|
||||
parser: sql.NewAlterTableParser(),
|
||||
ghostTableMigrated: make(chan bool),
|
||||
@ -237,6 +241,8 @@ func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (er
|
||||
this.applyEventsQueue <- newApplyEventStructByFunc(&applyEventFunc)
|
||||
}()
|
||||
}
|
||||
case ReadMigrationRangeValues:
|
||||
// no-op event
|
||||
default:
|
||||
{
|
||||
return fmt.Errorf("Unknown changelog state: %+v", changelogState)
|
||||
@ -293,8 +299,8 @@ func (this *Migrator) countTableRows() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
countRowsFunc := func() error {
|
||||
if err := this.inspector.CountTableRows(); err != nil {
|
||||
countRowsFunc := func(ctx context.Context) error {
|
||||
if err := this.inspector.CountTableRows(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := this.hooksExecutor.onRowCountComplete(); err != nil {
|
||||
@ -304,12 +310,17 @@ func (this *Migrator) countTableRows() (err error) {
|
||||
}
|
||||
|
||||
if this.migrationContext.ConcurrentCountTableRows {
|
||||
// store a cancel func so we can stop this query before a cut over
|
||||
rowCountContext, rowCountCancel := context.WithCancel(context.Background())
|
||||
this.migrationContext.SetCountTableRowsCancelFunc(rowCountCancel)
|
||||
|
||||
this.migrationContext.Log.Infof("As instructed, counting rows in the background; meanwhile I will use an estimated count, and will update it later on")
|
||||
go countRowsFunc()
|
||||
go countRowsFunc(rowCountContext)
|
||||
|
||||
// and we ignore errors, because this turns to be a background job
|
||||
return nil
|
||||
}
|
||||
return countRowsFunc()
|
||||
return countRowsFunc(context.Background())
|
||||
}
|
||||
|
||||
func (this *Migrator) createFlagFiles() (err error) {
|
||||
@ -413,6 +424,10 @@ func (this *Migrator) Migrate() (err error) {
|
||||
}
|
||||
this.printStatus(ForcePrintStatusRule)
|
||||
|
||||
if this.migrationContext.IsCountingTableRows() {
|
||||
this.migrationContext.Log.Info("stopping query for exact row count, because that can accidentally lock out the cut over")
|
||||
this.migrationContext.CancelTableRowsCount()
|
||||
}
|
||||
if err := this.hooksExecutor.onBeforeCutOver(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -537,19 +552,19 @@ func (this *Migrator) cutOver() (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if this.migrationContext.CutOverType == base.CutOverAtomic {
|
||||
|
||||
switch this.migrationContext.CutOverType {
|
||||
case base.CutOverAtomic:
|
||||
// Atomic solution: we use low timeout and multiple attempts. But for
|
||||
// each failed attempt, we throttle until replication lag is back to normal
|
||||
err := this.atomicCutOver()
|
||||
this.handleCutOverResult(err)
|
||||
return err
|
||||
err = this.atomicCutOver()
|
||||
case base.CutOverTwoStep:
|
||||
err = this.cutOverTwoStep()
|
||||
default:
|
||||
return this.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType)
|
||||
}
|
||||
if this.migrationContext.CutOverType == base.CutOverTwoStep {
|
||||
err := this.cutOverTwoStep()
|
||||
this.handleCutOverResult(err)
|
||||
return err
|
||||
}
|
||||
return this.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType)
|
||||
this.handleCutOverResult(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Inject the "AllEventsUpToLockProcessed" state hint, wait for it to appear in the binary logs,
|
||||
@ -797,17 +812,16 @@ func (this *Migrator) initiateInspector() (err error) {
|
||||
}
|
||||
|
||||
// initiateStatus sets and activates the printStatus() ticker
|
||||
func (this *Migrator) initiateStatus() error {
|
||||
func (this *Migrator) initiateStatus() {
|
||||
this.printStatus(ForcePrintStatusAndHintRule)
|
||||
statusTick := time.Tick(1 * time.Second)
|
||||
for range statusTick {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
go this.printStatus(HeuristicPrintStatusRule)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// printMigrationStatusHint prints a detailed configuration dump, that is useful
|
||||
@ -1009,7 +1023,8 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) {
|
||||
w := io.MultiWriter(writers...)
|
||||
fmt.Fprintln(w, status)
|
||||
|
||||
if elapsedSeconds%this.migrationContext.HooksStatusIntervalSec == 0 {
|
||||
hooksStatusIntervalSec := this.migrationContext.HooksStatusIntervalSec
|
||||
if hooksStatusIntervalSec > 0 && elapsedSeconds%hooksStatusIntervalSec == 0 {
|
||||
this.hooksExecutor.onStatus(status)
|
||||
}
|
||||
}
|
||||
@ -1039,8 +1054,9 @@ func (this *Migrator) initiateStreaming() error {
|
||||
}()
|
||||
|
||||
go func() {
|
||||
ticker := time.Tick(1 * time.Second)
|
||||
for range ticker {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
@ -1067,7 +1083,7 @@ func (this *Migrator) addDMLEventsListener() error {
|
||||
|
||||
// initiateThrottler kicks in the throttling collection and the throttling checks.
|
||||
func (this *Migrator) initiateThrottler() error {
|
||||
this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector)
|
||||
this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector, this.appVersion)
|
||||
|
||||
go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected)
|
||||
this.migrationContext.Log.Infof("Waiting for first throttle metrics to be collected")
|
||||
|
@ -134,7 +134,7 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
|
||||
}
|
||||
}
|
||||
argIsQuestion := (arg == "?")
|
||||
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged\n"
|
||||
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged"
|
||||
|
||||
if err := this.hooksExecutor.onInteractiveCommand(command); err != nil {
|
||||
return NoPrintStatusRule, err
|
||||
@ -282,7 +282,7 @@ help # This message
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
this.migrationContext.SetThrottleQuery(arg)
|
||||
fmt.Fprintf(writer, throttleHint)
|
||||
fmt.Fprintln(writer, throttleHint)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
case "throttle-http":
|
||||
@ -292,7 +292,7 @@ help # This message
|
||||
return NoPrintStatusRule, nil
|
||||
}
|
||||
this.migrationContext.SetThrottleHTTP(arg)
|
||||
fmt.Fprintf(writer, throttleHint)
|
||||
fmt.Fprintln(writer, throttleHint)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
case "throttle-control-replicas":
|
||||
@ -315,7 +315,7 @@ help # This message
|
||||
return NoPrintStatusRule, err
|
||||
}
|
||||
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
|
||||
fmt.Fprintf(writer, throttleHint)
|
||||
fmt.Fprintln(writer, throttleHint)
|
||||
return ForcePrintStatusAndHintRule, nil
|
||||
}
|
||||
case "no-throttle", "unthrottle", "resume", "continue":
|
||||
|
@ -87,10 +87,10 @@ func (this *EventsStreamer) notifyListeners(binlogEvent *binlog.BinlogDMLEvent)
|
||||
|
||||
for _, listener := range this.listeners {
|
||||
listener := listener
|
||||
if strings.ToLower(listener.databaseName) != strings.ToLower(binlogEvent.DatabaseName) {
|
||||
if !strings.EqualFold(listener.databaseName, binlogEvent.DatabaseName) {
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(listener.tableName) != strings.ToLower(binlogEvent.TableName) {
|
||||
if !strings.EqualFold(listener.tableName, binlogEvent.TableName) {
|
||||
continue
|
||||
}
|
||||
if listener.async {
|
||||
@ -123,10 +123,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
|
||||
|
||||
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
|
||||
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
|
||||
goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
goMySQLReader := binlog.NewGoMySQLReader(this.migrationContext)
|
||||
if err := goMySQLReader.ConnectBinlogStreamer(*binlogCoordinates); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -42,16 +43,22 @@ const frenoMagicHint = "freno"
|
||||
// Throttler collects metrics related to throttling and makes informed decision
|
||||
// whether throttling should take place.
|
||||
type Throttler struct {
|
||||
appVersion string
|
||||
migrationContext *base.MigrationContext
|
||||
applier *Applier
|
||||
httpClient *http.Client
|
||||
httpClientTimeout time.Duration
|
||||
inspector *Inspector
|
||||
finishedMigrating int64
|
||||
}
|
||||
|
||||
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector) *Throttler {
|
||||
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector, appVersion string) *Throttler {
|
||||
return &Throttler{
|
||||
appVersion: appVersion,
|
||||
migrationContext: migrationContext,
|
||||
applier: applier,
|
||||
httpClient: &http.Client{},
|
||||
httpClientTimeout: time.Duration(migrationContext.ThrottleHTTPTimeoutMillis) * time.Millisecond,
|
||||
inspector: inspector,
|
||||
finishedMigrating: 0,
|
||||
}
|
||||
@ -161,8 +168,9 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
|
||||
collectFunc()
|
||||
firstThrottlingCollected <- true
|
||||
|
||||
ticker := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||
for range ticker {
|
||||
ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
@ -237,12 +245,14 @@ func (this *Throttler) collectControlReplicasLag() {
|
||||
}
|
||||
this.migrationContext.SetControlReplicasLagResult(readControlReplicasLag())
|
||||
}
|
||||
aggressiveTicker := time.Tick(100 * time.Millisecond)
|
||||
|
||||
relaxedFactor := 10
|
||||
counter := 0
|
||||
shouldReadLagAggressively := false
|
||||
|
||||
for range aggressiveTicker {
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
@ -285,7 +295,17 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
|
||||
if url == "" {
|
||||
return true, nil
|
||||
}
|
||||
resp, err := http.Head(url)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), this.httpClientTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("User-Agent", fmt.Sprintf("gh-ost/%s", this.appVersion))
|
||||
|
||||
resp, err := this.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -303,8 +323,10 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
|
||||
|
||||
firstThrottlingCollected <- true
|
||||
|
||||
ticker := time.Tick(100 * time.Millisecond)
|
||||
for range ticker {
|
||||
collectInterval := time.Duration(this.migrationContext.ThrottleHTTPIntervalMillis) * time.Millisecond
|
||||
ticker := time.NewTicker(collectInterval)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
@ -423,8 +445,9 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
|
||||
this.collectGeneralThrottleMetrics()
|
||||
firstThrottlingCollected <- true
|
||||
|
||||
throttlerMetricsTick := time.Tick(1 * time.Second)
|
||||
for range throttlerMetricsTick {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return
|
||||
}
|
||||
@ -435,9 +458,7 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
|
||||
}
|
||||
|
||||
// initiateThrottlerChecks initiates the throttle ticker and sets the basic behavior of throttling.
|
||||
func (this *Throttler) initiateThrottlerChecks() error {
|
||||
throttlerTick := time.Tick(100 * time.Millisecond)
|
||||
|
||||
func (this *Throttler) initiateThrottlerChecks() {
|
||||
throttlerFunction := func() {
|
||||
alreadyThrottling, currentReason, _ := this.migrationContext.IsThrottled()
|
||||
shouldThrottle, throttleReason, throttleReasonHint := this.shouldThrottle()
|
||||
@ -454,14 +475,15 @@ func (this *Throttler) initiateThrottlerChecks() error {
|
||||
this.migrationContext.SetThrottled(shouldThrottle, throttleReason, throttleReasonHint)
|
||||
}
|
||||
throttlerFunction()
|
||||
for range throttlerTick {
|
||||
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
throttlerFunction()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// throttle sees if throttling needs take place, and if so, continuously sleeps (blocks)
|
||||
|
@ -1,36 +1,21 @@
|
||||
/*
|
||||
Copyright 2015 Shlomi Noach, courtesy Booking.com
|
||||
Copyright 2022 GitHub Inc.
|
||||
See https://github.com/github/gh-ost/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var detachPattern *regexp.Regexp
|
||||
|
||||
func init() {
|
||||
detachPattern, _ = regexp.Compile(`//([^/:]+):([\d]+)`) // e.g. `//binlog.01234:567890`
|
||||
}
|
||||
|
||||
type BinlogType int
|
||||
|
||||
const (
|
||||
BinaryLog BinlogType = iota
|
||||
RelayLog
|
||||
)
|
||||
|
||||
// BinlogCoordinates described binary log coordinates in the form of log file & log position.
|
||||
type BinlogCoordinates struct {
|
||||
LogFile string
|
||||
LogPos int64
|
||||
Type BinlogType
|
||||
}
|
||||
|
||||
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
|
||||
@ -62,7 +47,7 @@ func (this *BinlogCoordinates) Equals(other *BinlogCoordinates) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos && this.Type == other.Type
|
||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
|
||||
}
|
||||
|
||||
// IsEmpty returns true if the log file is empty, unnamed
|
||||
@ -87,76 +72,5 @@ func (this *BinlogCoordinates) SmallerThanOrEquals(other *BinlogCoordinates) boo
|
||||
if this.SmallerThan(other) {
|
||||
return true
|
||||
}
|
||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos // No Type comparison
|
||||
}
|
||||
|
||||
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
|
||||
func (this *BinlogCoordinates) FileSmallerThan(other *BinlogCoordinates) bool {
|
||||
return this.LogFile < other.LogFile
|
||||
}
|
||||
|
||||
// FileNumberDistance returns the numeric distance between this coordinate's file number and the other's.
|
||||
// Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's"
|
||||
func (this *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int {
|
||||
thisNumber, _ := this.FileNumber()
|
||||
otherNumber, _ := other.FileNumber()
|
||||
return otherNumber - thisNumber
|
||||
}
|
||||
|
||||
// FileNumber returns the numeric value of the file, and the length in characters representing the number in the filename.
|
||||
// Example: FileNumber() of mysqld.log.000789 is (789, 6)
|
||||
func (this *BinlogCoordinates) FileNumber() (int, int) {
|
||||
tokens := strings.Split(this.LogFile, ".")
|
||||
numPart := tokens[len(tokens)-1]
|
||||
numLen := len(numPart)
|
||||
fileNum, err := strconv.Atoi(numPart)
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
return fileNum, numLen
|
||||
}
|
||||
|
||||
// PreviousFileCoordinatesBy guesses the filename of the previous binlog/relaylog, by given offset (number of files back)
|
||||
func (this *BinlogCoordinates) PreviousFileCoordinatesBy(offset int) (BinlogCoordinates, error) {
|
||||
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
|
||||
|
||||
fileNum, numLen := this.FileNumber()
|
||||
if fileNum == 0 {
|
||||
return result, errors.New("Log file number is zero, cannot detect previous file")
|
||||
}
|
||||
newNumStr := fmt.Sprintf("%d", (fileNum - offset))
|
||||
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
|
||||
|
||||
tokens := strings.Split(this.LogFile, ".")
|
||||
tokens[len(tokens)-1] = newNumStr
|
||||
result.LogFile = strings.Join(tokens, ".")
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
|
||||
func (this *BinlogCoordinates) PreviousFileCoordinates() (BinlogCoordinates, error) {
|
||||
return this.PreviousFileCoordinatesBy(1)
|
||||
}
|
||||
|
||||
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
|
||||
func (this *BinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) {
|
||||
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
|
||||
|
||||
fileNum, numLen := this.FileNumber()
|
||||
newNumStr := fmt.Sprintf("%d", (fileNum + 1))
|
||||
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
|
||||
|
||||
tokens := strings.Split(this.LogFile, ".")
|
||||
tokens[len(tokens)-1] = newNumStr
|
||||
result.LogFile = strings.Join(tokens, ".")
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
|
||||
func (this *BinlogCoordinates) DetachedCoordinates() (isDetached bool, detachedLogFile string, detachedLogPos string) {
|
||||
detachedCoordinatesSubmatch := detachPattern.FindStringSubmatch(this.LogFile)
|
||||
if len(detachedCoordinatesSubmatch) == 0 {
|
||||
return false, "", ""
|
||||
}
|
||||
return true, detachedCoordinatesSubmatch[1], detachedCoordinatesSubmatch[2]
|
||||
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
|
||||
}
|
||||
|
@ -37,57 +37,6 @@ func TestBinlogCoordinates(t *testing.T) {
|
||||
test.S(t).ExpectTrue(c1.SmallerThanOrEquals(&c3))
|
||||
}
|
||||
|
||||
func TestBinlogNext(t *testing.T) {
|
||||
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||
cres, err := c1.NextFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00018")
|
||||
|
||||
c2 := BinlogCoordinates{LogFile: "mysql-bin.00099", LogPos: 104}
|
||||
cres, err = c2.NextFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00100")
|
||||
|
||||
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00099", LogPos: 104}
|
||||
cres, err = c3.NextFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00100")
|
||||
}
|
||||
|
||||
func TestBinlogPrevious(t *testing.T) {
|
||||
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||
cres, err := c1.PreviousFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00016")
|
||||
|
||||
c2 := BinlogCoordinates{LogFile: "mysql-bin.00100", LogPos: 104}
|
||||
cres, err = c2.PreviousFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00099")
|
||||
|
||||
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00100", LogPos: 104}
|
||||
cres, err = c3.PreviousFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNil(err)
|
||||
test.S(t).ExpectEquals(c1.Type, cres.Type)
|
||||
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00099")
|
||||
|
||||
c4 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00000", LogPos: 104}
|
||||
_, err = c4.PreviousFileCoordinates()
|
||||
|
||||
test.S(t).ExpectNotNil(err)
|
||||
}
|
||||
|
||||
func TestBinlogCoordinatesAsKey(t *testing.T) {
|
||||
m := make(map[BinlogCoordinates]bool)
|
||||
|
||||
@ -103,20 +52,3 @@ func TestBinlogCoordinatesAsKey(t *testing.T) {
|
||||
|
||||
test.S(t).ExpectEquals(len(m), 3)
|
||||
}
|
||||
|
||||
func TestBinlogFileNumber(t *testing.T) {
|
||||
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||
c2 := BinlogCoordinates{LogFile: "mysql-bin.00022", LogPos: 104}
|
||||
|
||||
test.S(t).ExpectEquals(c1.FileNumberDistance(&c1), 0)
|
||||
test.S(t).ExpectEquals(c1.FileNumberDistance(&c2), 5)
|
||||
test.S(t).ExpectEquals(c2.FileNumberDistance(&c1), -5)
|
||||
}
|
||||
|
||||
func TestBinlogFileNumberDistance(t *testing.T) {
|
||||
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
|
||||
fileNum, numLen := c1.FileNumber()
|
||||
|
||||
test.S(t).ExpectEquals(fileNum, 17)
|
||||
test.S(t).ExpectEquals(numLen, 5)
|
||||
}
|
||||
|
@ -501,6 +501,9 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
|
||||
}
|
||||
|
||||
equalsComparison, err := BuildEqualsPreparedComparison(uniqueKeyColumns.Names())
|
||||
if err != nil {
|
||||
return "", sharedArgs, uniqueKeyArgs, err
|
||||
}
|
||||
result = fmt.Sprintf(`
|
||||
update /* gh-ost %s.%s */
|
||||
%s.%s
|
||||
|
40
localtests/compound-pk-ts/create.sql
Normal file
40
localtests/compound-pk-ts/create.sql
Normal file
@ -0,0 +1,40 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
ts0 timestamp(6) default current_timestamp(6),
|
||||
updated tinyint unsigned default 0,
|
||||
primary key(id, ts0)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 13, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 17, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 19, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 23, sysdate(6), 0);
|
||||
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 29, sysdate(6), 0);
|
||||
insert into gh_ost_test values (null, 31, sysdate(6), 0);
|
||||
insert into gh_ost_test values (null, 37, sysdate(6), 0);
|
||||
insert into gh_ost_test values (null, 41, sysdate(6), 0);
|
||||
delete from gh_ost_test where i = 31 order by id desc limit 1;
|
||||
end ;;
|
40
localtests/compound-pk/create.sql
Normal file
40
localtests/compound-pk/create.sql
Normal file
@ -0,0 +1,40 @@
|
||||
drop table if exists gh_ost_test;
|
||||
create table gh_ost_test (
|
||||
id int auto_increment,
|
||||
i int not null,
|
||||
v varchar(128),
|
||||
updated tinyint unsigned default 0,
|
||||
primary key(id, v)
|
||||
) auto_increment=1;
|
||||
|
||||
drop event if exists gh_ost_test;
|
||||
delimiter ;;
|
||||
create event gh_ost_test
|
||||
on schedule every 1 second
|
||||
starts current_timestamp
|
||||
ends current_timestamp + interval 60 second
|
||||
on completion not preserve
|
||||
enable
|
||||
do
|
||||
begin
|
||||
insert into gh_ost_test values (null, 11, 'eleven', 0);
|
||||
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 13, 'thirteen', 0);
|
||||
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 17, 'seventeen', 0);
|
||||
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 19, 'nineteen', 0);
|
||||
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 23, 'twenty three', 0);
|
||||
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
|
||||
|
||||
insert into gh_ost_test values (null, 29, 'twenty nine', 0);
|
||||
insert into gh_ost_test values (null, 31, 'thirty one', 0);
|
||||
insert into gh_ost_test values (null, 37, 'thirty seven', 0);
|
||||
insert into gh_ost_test values (null, 41, 'forty one', 0);
|
||||
delete from gh_ost_test where i = 31 order by id desc limit 1;
|
||||
end ;;
|
@ -224,6 +224,8 @@ test_single() {
|
||||
ghost_checksum=$(cat $ghost_content_output_file | md5sum)
|
||||
|
||||
if [ "$orig_checksum" != "$ghost_checksum" ] ; then
|
||||
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test" -ss > $orig_content_output_file
|
||||
gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho" -ss > $ghost_content_output_file
|
||||
echo "ERROR $test_name: checksum mismatch"
|
||||
echo "---"
|
||||
diff $orig_content_output_file $ghost_content_output_file
|
||||
|
@ -1,13 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
PREFERRED_GO_VERSION=go1.16.4
|
||||
SUPPORTED_GO_VERSIONS='go1.1[56]'
|
||||
PREFERRED_GO_VERSION=go1.17.11
|
||||
SUPPORTED_GO_VERSIONS='go1.1[567]'
|
||||
|
||||
GO_PKG_DARWIN=${PREFERRED_GO_VERSION}.darwin-amd64.pkg
|
||||
GO_PKG_DARWIN_SHA=0f215de06019a054a3da46a0722989986c956d719c7a0a8fc38a5f3c216d6f6b
|
||||
GO_PKG_DARWIN_SHA=4f924c534230de8f0e1c7369f611c0310efd21fc2d9438b13bc2703af9dda25a
|
||||
|
||||
GO_PKG_LINUX=${PREFERRED_GO_VERSION}.linux-amd64.tar.gz
|
||||
GO_PKG_LINUX_SHA=4a7fa60f323ee1416a4b1425aefc37ea359e9d64df19c326a58953a97ad41ea5
|
||||
GO_PKG_LINUX_SHA=d69a4fe2694f795d8e525c72b497ededc209cb7185f4c3b62d7a98dd6227b3fe
|
||||
|
||||
export ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
cd $ROOTDIR
|
||||
|
3
vendor/github.com/go-sql-driver/mysql/go.mod
generated
vendored
3
vendor/github.com/go-sql-driver/mysql/go.mod
generated
vendored
@ -1,3 +0,0 @@
|
||||
module github.com/go-sql-driver/mysql
|
||||
|
||||
go 1.10
|
11
vendor/github.com/pingcap/errors/go.mod
generated
vendored
11
vendor/github.com/pingcap/errors/go.mod
generated
vendored
@ -1,11 +0,0 @@
|
||||
module github.com/pingcap/errors
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8
|
||||
github.com/pingcap/log v0.0.0-20200511115504-543df19646ad
|
||||
github.com/pkg/errors v0.9.1
|
||||
go.uber.org/atomic v1.6.0
|
||||
go.uber.org/zap v1.15.0
|
||||
)
|
66
vendor/github.com/pingcap/errors/go.sum
generated
vendored
66
vendor/github.com/pingcap/errors/go.sum
generated
vendored
@ -1,66 +0,0 @@
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
|
||||
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
|
||||
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pingcap/log v0.0.0-20200511115504-543df19646ad h1:SveG82rmu/GFxYanffxsSF503SiQV+2JLnWEiGiF+Tc=
|
||||
github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
|
||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
8
vendor/go.uber.org/atomic/go.mod
generated
vendored
8
vendor/go.uber.org/atomic/go.mod
generated
vendored
@ -1,8 +0,0 @@
|
||||
module go.uber.org/atomic
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
)
|
||||
|
||||
go 1.13
|
9
vendor/go.uber.org/atomic/go.sum
generated
vendored
9
vendor/go.uber.org/atomic/go.sum
generated
vendored
@ -1,9 +0,0 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
5
vendor/golang.org/x/term/go.mod
generated
vendored
5
vendor/golang.org/x/term/go.mod
generated
vendored
@ -1,5 +0,0 @@
|
||||
module golang.org/x/term
|
||||
|
||||
go 1.11
|
||||
|
||||
require golang.org/x/sys v0.0.0-20201119102817-f84b799fce68
|
2
vendor/golang.org/x/term/go.sum
generated
vendored
2
vendor/golang.org/x/term/go.sum
generated
vendored
@ -1,2 +0,0 @@
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
21
vendor/modules.txt
vendored
21
vendor/modules.txt
vendored
@ -2,51 +2,58 @@
|
||||
## explicit
|
||||
github.com/go-ini/ini
|
||||
# github.com/go-mysql-org/go-mysql v1.3.0
|
||||
## explicit
|
||||
## explicit; go 1.16
|
||||
github.com/go-mysql-org/go-mysql/client
|
||||
github.com/go-mysql-org/go-mysql/mysql
|
||||
github.com/go-mysql-org/go-mysql/packet
|
||||
github.com/go-mysql-org/go-mysql/replication
|
||||
github.com/go-mysql-org/go-mysql/utils
|
||||
# github.com/go-sql-driver/mysql v1.6.0
|
||||
## explicit
|
||||
## explicit; go 1.10
|
||||
github.com/go-sql-driver/mysql
|
||||
# github.com/openark/golib v0.0.0-20210531070646-355f37940af8
|
||||
## explicit
|
||||
## explicit; go 1.16
|
||||
github.com/openark/golib/log
|
||||
github.com/openark/golib/sqlutils
|
||||
github.com/openark/golib/tests
|
||||
# github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3
|
||||
## explicit; go 1.14
|
||||
github.com/pingcap/errors
|
||||
# github.com/satori/go.uuid v1.2.0
|
||||
## explicit
|
||||
github.com/satori/go.uuid
|
||||
# github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24
|
||||
## explicit
|
||||
github.com/shopspring/decimal
|
||||
# github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726
|
||||
## explicit
|
||||
github.com/siddontang/go/hack
|
||||
# github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07
|
||||
## explicit
|
||||
github.com/siddontang/go-log/log
|
||||
github.com/siddontang/go-log/loggers
|
||||
# github.com/smartystreets/goconvey v1.6.4
|
||||
## explicit
|
||||
# go.uber.org/atomic v1.7.0
|
||||
## explicit; go 1.13
|
||||
go.uber.org/atomic
|
||||
# golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||
## explicit
|
||||
## explicit; go 1.11
|
||||
golang.org/x/crypto/ssh/terminal
|
||||
# golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
|
||||
## explicit
|
||||
## explicit; go 1.11
|
||||
golang.org/x/net/context
|
||||
# golang.org/x/sys v0.0.0-20201119102817-f84b799fce68
|
||||
## explicit; go 1.12
|
||||
golang.org/x/sys/internal/unsafeheader
|
||||
golang.org/x/sys/plan9
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
# golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
|
||||
## explicit; go 1.11
|
||||
golang.org/x/term
|
||||
# golang.org/x/text v0.3.6
|
||||
## explicit
|
||||
## explicit; go 1.11
|
||||
golang.org/x/text/encoding
|
||||
golang.org/x/text/encoding/charmap
|
||||
golang.org/x/text/encoding/internal
|
||||
@ -54,6 +61,6 @@ golang.org/x/text/encoding/internal/identifier
|
||||
golang.org/x/text/encoding/simplifiedchinese
|
||||
golang.org/x/text/transform
|
||||
# gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
## explicit
|
||||
## explicit; go 1.11
|
||||
# gopkg.in/ini.v1 v1.62.0
|
||||
## explicit
|
||||
|
Loading…
Reference in New Issue
Block a user