Merge branch 'master' into enum-to-varchar

This commit is contained in:
Shlomi Noach 2021-05-04 18:45:20 +03:00 committed by GitHub
commit 91dec1a0f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 54 additions and 8 deletions

View File

@ -1 +1 @@
1.1.0 1.1.1

View File

@ -181,6 +181,9 @@ Optionally involve the process ID, for example: `--replica-server-id=$((10000000
It's on you to choose a number that does not collide with another `gh-ost` or another running replica. It's on you to choose a number that does not collide with another `gh-ost` or another running replica.
See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet. See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet.
### serve-socket-file
Defaults to an auto-determined and advertised upon startup file. Defines Unix socket file to serve on.
### skip-foreign-key-checks ### skip-foreign-key-checks
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`. By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.

View File

@ -18,6 +18,8 @@ Both interfaces may serve at the same time. Both respond to simple text command,
- `status`: returns a detailed status summary of migration progress and configuration - `status`: returns a detailed status summary of migration progress and configuration
- `sup`: returns a brief status summary of migration progress - `sup`: returns a brief status summary of migration progress
- `coordinates`: returns recent (though not exactly up to date) binary log coordinates of the inspected server - `coordinates`: returns recent (though not exactly up to date) binary log coordinates of the inspected server
- `applier`: returns the hostname of the applier
- `inspector`: returns the hostname of the inspector
- `chunk-size=<newsize>`: modify the `chunk-size`; applies on next running copy-iteration - `chunk-size=<newsize>`: modify the `chunk-size`; applies on next running copy-iteration
- `dml-batch-size=<newsize>`: modify the `dml-batch-size`; applies on next applying of binary log events - `dml-batch-size=<newsize>`: modify the `dml-batch-size`; applies on next applying of binary log events
- `max-lag-millis=<max-lag>`: modify the maximum replication lag threshold (milliseconds, minimum value is `100`, i.e. `0.1` second) - `max-lag-millis=<max-lag>`: modify the maximum replication lag threshold (milliseconds, minimum value is `100`, i.e. `0.1` second)

View File

@ -567,6 +567,7 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
columnName := m.GetString("COLUMN_NAME") columnName := m.GetString("COLUMN_NAME")
columnType := m.GetString("COLUMN_TYPE") columnType := m.GetString("COLUMN_TYPE")
columnOctetLength := m.GetUint("CHARACTER_OCTET_LENGTH")
for _, columnsList := range columnsLists { for _, columnsList := range columnsLists {
column := columnsList.GetColumn(columnName) column := columnsList.GetColumn(columnName)
if column == nil { if column == nil {
@ -595,6 +596,10 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
column.Type = sql.EnumColumnType column.Type = sql.EnumColumnType
column.EnumValues = sql.ParseEnumValues(m.GetString("COLUMN_TYPE")) column.EnumValues = sql.ParseEnumValues(m.GetString("COLUMN_TYPE"))
} }
if strings.HasPrefix(columnType, "binary") {
column.Type = sql.BinaryColumnType
column.BinaryOctetLength = columnOctetLength
}
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" { if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
column.Charset = charset column.Charset = charset
} }

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2016 GitHub Inc. Copyright 2021 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE See https://github.com/github/gh-ost/blob/master/LICENSE
*/ */
@ -146,7 +146,9 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
fmt.Fprint(writer, `available commands: fmt.Fprint(writer, `available commands:
status # Print a detailed status message status # Print a detailed status message
sup # Print a short status message sup # Print a short status message
coordinates # Print the currently inspected coordinates coordinates # Print the currently inspected coordinates
applier # Print the hostname of the applier
inspector # Print the hostname of the inspector
chunk-size=<newsize> # Set a new chunk-size chunk-size=<newsize> # Set a new chunk-size
dml-batch-size=<newsize> # Set a new dml-batch-size dml-batch-size=<newsize> # Set a new dml-batch-size
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...) nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
@ -177,6 +179,22 @@ help # This message
} }
return NoPrintStatusRule, fmt.Errorf("coordinates are read-only") return NoPrintStatusRule, fmt.Errorf("coordinates are read-only")
} }
case "applier":
if this.migrationContext.ApplierConnectionConfig != nil && this.migrationContext.ApplierConnectionConfig.ImpliedKey != nil {
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
this.migrationContext.ApplierConnectionConfig.ImpliedKey.String(),
this.migrationContext.ApplierMySQLVersion,
)
}
return NoPrintStatusRule, nil
case "inspector":
if this.migrationContext.InspectorConnectionConfig != nil && this.migrationContext.InspectorConnectionConfig.ImpliedKey != nil {
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
this.migrationContext.InspectorConnectionConfig.ImpliedKey.String(),
this.migrationContext.InspectorMySQLVersion,
)
}
return NoPrintStatusRule, nil
case "chunk-size": case "chunk-size":
{ {
if argIsQuestion { if argIsQuestion {

View File

@ -400,7 +400,7 @@ func BuildDMLDeleteQuery(databaseName, tableName string, tableColumns, uniqueKey
} }
for _, column := range uniqueKeyColumns.Columns() { for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name] tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(args[tableOrdinal]) arg := column.convertArg(args[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg) uniqueKeyArgs = append(uniqueKeyArgs, arg)
} }
databaseName = EscapeName(databaseName) databaseName = EscapeName(databaseName)
@ -437,7 +437,7 @@ func BuildDMLInsertQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() { for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name] tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(args[tableOrdinal]) arg := column.convertArg(args[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg) sharedArgs = append(sharedArgs, arg)
} }
@ -485,13 +485,13 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() { for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name] tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(valueArgs[tableOrdinal]) arg := column.convertArg(valueArgs[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg) sharedArgs = append(sharedArgs, arg)
} }
for _, column := range uniqueKeyColumns.Columns() { for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name] tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(whereArgs[tableOrdinal]) arg := column.convertArg(whereArgs[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg) uniqueKeyArgs = append(uniqueKeyArgs, arg)
} }

View File

@ -6,6 +6,7 @@
package sql package sql
import ( import (
"bytes"
"fmt" "fmt"
"reflect" "reflect"
"strconv" "strconv"
@ -22,6 +23,7 @@ const (
MediumIntColumnType MediumIntColumnType
JSONColumnType JSONColumnType
FloatColumnType FloatColumnType
BinaryColumnType
) )
const maxMediumintUnsigned int32 = 16777215 const maxMediumintUnsigned int32 = 16777215
@ -38,14 +40,30 @@ type Column struct {
EnumValues string EnumValues string
timezoneConversion *TimezoneConversion timezoneConversion *TimezoneConversion
enumToTextConversion bool enumToTextConversion bool
// add Octet length for binary type, fix bytes with suffix "00" get clipped in mysql binlog.
// https://github.com/github/gh-ost/issues/909
BinaryOctetLength uint
} }
func (this *Column) convertArg(arg interface{}) interface{} { func (this *Column) convertArg(arg interface{}, isUniqueKeyColumn bool) interface{} {
if s, ok := arg.(string); ok { if s, ok := arg.(string); ok {
// string, charset conversion // string, charset conversion
if encoding, ok := charsetEncodingMap[this.Charset]; ok { if encoding, ok := charsetEncodingMap[this.Charset]; ok {
arg, _ = encoding.NewDecoder().String(s) arg, _ = encoding.NewDecoder().String(s)
} }
if this.Type == BinaryColumnType && isUniqueKeyColumn {
arg2Bytes := []byte(arg.(string))
size := len(arg2Bytes)
if uint(size) < this.BinaryOctetLength {
buf := bytes.NewBuffer(arg2Bytes)
for i := uint(0); i < (this.BinaryOctetLength - uint(size)); i++ {
buf.Write([]byte{0})
}
arg = buf.String()
}
}
return arg return arg
} }