Add golangci-lint CI action, fix gosimple, govet + unused lint errors (#1127)

* Add `golangci-lint`, fix `gosimple`, `govet` and `unused` linter complaints

* Go 1.16

* Update copyright dates
This commit is contained in:
Tim Vaillancourt 2022-05-31 21:23:39 +02:00 committed by GitHub
parent 8f361f6445
commit ed46138c06
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 94 additions and 77 deletions

21
.github/workflows/golangci-lint.yml vendored Normal file
View File

@ -0,0 +1,21 @@
name: golangci-lint
on:
push:
branches:
- master
pull_request:
permissions:
contents: read
# Optional: allow read access to pull request. Use with `only-new-issues` option.
# pull-requests: read
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: 1.16
- uses: actions/checkout@v3
- name: golangci-lint
uses: golangci/golangci-lint-action@v3

12
.golangci.yml Normal file
View File

@ -0,0 +1,12 @@
run:
timeout: 5m
modules-download-mode: readonly
linters:
disable:
- errcheck
- staticcheck
enable:
- gosimple
- govet
- unused

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -15,7 +15,7 @@ import (
"sync/atomic"
"time"
"github.com/satori/go.uuid"
uuid "github.com/satori/go.uuid"
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"

View File

@ -1,12 +1,11 @@
/*
Copyright 2021 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package base
import (
"fmt"
"io/ioutil"
"os"
"testing"
@ -89,7 +88,7 @@ func TestReadConfigFile(t *testing.T) {
}
defer os.Remove(f.Name())
f.Write([]byte(fmt.Sprintf("[client]\nuser=test\npassword=123456")))
f.Write([]byte("[client]\nuser=test\npassword=123456"))
context := NewMigrationContext()
context.ConfigFile = f.Name()
if err := context.ReadConfigFile(); err != nil {
@ -109,7 +108,7 @@ func TestReadConfigFile(t *testing.T) {
}
defer os.Remove(f.Name())
f.Write([]byte(fmt.Sprintf("[osc]\nmax_load=10")))
f.Write([]byte("[osc]\nmax_load=10"))
context := NewMigrationContext()
context.ConfigFile = f.Name()
if err := context.ReadConfigFile(); err != nil {

View File

@ -1,3 +1,8 @@
/*
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package base
import (
@ -12,22 +17,18 @@ func NewDefaultLogger() *simpleLogger {
func (*simpleLogger) Debug(args ...interface{}) {
log.Debug(args[0].(string), args[1:])
return
}
func (*simpleLogger) Debugf(format string, args ...interface{}) {
log.Debugf(format, args...)
return
}
func (*simpleLogger) Info(args ...interface{}) {
log.Info(args[0].(string), args[1:])
return
}
func (*simpleLogger) Infof(format string, args ...interface{}) {
log.Infof(format, args...)
return
}
func (*simpleLogger) Warning(args ...interface{}) error {
@ -64,10 +65,8 @@ func (*simpleLogger) Fatale(err error) error {
func (*simpleLogger) SetLevel(level log.LogLevel) {
log.SetLevel(level)
return
}
func (*simpleLogger) SetPrintStackTrace(printStackTraceFlag bool) {
log.SetPrintStackTrace(printStackTraceFlag)
return
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -25,9 +25,7 @@ func PrettifyDurationOutput(d time.Duration) string {
if d < time.Second {
return "0s"
}
result := fmt.Sprintf("%s", d)
result = prettifyDurationRegexp.ReplaceAllString(result, "")
return result
return prettifyDurationRegexp.ReplaceAllString(d.String(), "")
}
func FileExists(fileName string) bool {

View File

@ -1,5 +1,5 @@
/*
Copyright 2021 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -64,7 +64,10 @@ func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordin
this.currentCoordinates = coordinates
this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
// Start sync with specified binlog file and position
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{
Name: this.currentCoordinates.LogFile,
Pos: uint32(this.currentCoordinates.LogPos),
})
return err
}

View File

@ -1,6 +1,5 @@
/*
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -72,9 +71,7 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
for _, variable := range extraVariables {
env = append(env, variable)
}
env = append(env, extraVariables...)
return env
}

View File

@ -804,5 +804,4 @@ func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err er
func (this *Inspector) Teardown() {
this.db.Close()
this.informationSchemaDb.Close()
return
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -176,16 +176,6 @@ func (this *Migrator) retryOperationWithExponentialBackoff(operation func() erro
return err
}
// executeAndThrottleOnError executes a given function. If it errors, it
// throttles.
func (this *Migrator) executeAndThrottleOnError(operation func() error) (err error) {
if err := operation(); err != nil {
this.throttler.throttle(nil)
return err
}
return nil
}
// consumeRowCopyComplete blocks on the rowCopyComplete channel once, and then
// consumes and drops any further incoming events that may be left hanging.
func (this *Migrator) consumeRowCopyComplete() {
@ -823,57 +813,57 @@ func (this *Migrator) initiateStatus() error {
// migration, and as response to the "status" interactive command.
func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
w := io.MultiWriter(writers...)
fmt.Fprintln(w, fmt.Sprintf("# Migrating %s.%s; Ghost table is %s.%s",
fmt.Fprintf(w, "# Migrating %s.%s; Ghost table is %s.%s\n",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
))
fmt.Fprintln(w, fmt.Sprintf("# Migrating %+v; inspecting %+v; executing on %+v",
)
fmt.Fprintf(w, "# Migrating %+v; inspecting %+v; executing on %+v\n",
*this.applier.connectionConfig.ImpliedKey,
*this.inspector.connectionConfig.ImpliedKey,
this.migrationContext.Hostname,
))
fmt.Fprintln(w, fmt.Sprintf("# Migration started at %+v",
)
fmt.Fprintf(w, "# Migration started at %+v\n",
this.migrationContext.StartTime.Format(time.RubyDate),
))
)
maxLoad := this.migrationContext.GetMaxLoad()
criticalLoad := this.migrationContext.GetCriticalLoad()
fmt.Fprintln(w, fmt.Sprintf("# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f",
fmt.Fprintf(w, "# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f\n",
atomic.LoadInt64(&this.migrationContext.ChunkSize),
atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold),
atomic.LoadInt64(&this.migrationContext.DMLBatchSize),
maxLoad.String(),
criticalLoad.String(),
this.migrationContext.GetNiceRatio(),
))
)
if this.migrationContext.ThrottleFlagFile != "" {
setIndicator := ""
if base.FileExists(this.migrationContext.ThrottleFlagFile) {
setIndicator = "[set]"
}
fmt.Fprintln(w, fmt.Sprintf("# throttle-flag-file: %+v %+v",
fmt.Fprintf(w, "# throttle-flag-file: %+v %+v\n",
this.migrationContext.ThrottleFlagFile, setIndicator,
))
)
}
if this.migrationContext.ThrottleAdditionalFlagFile != "" {
setIndicator := ""
if base.FileExists(this.migrationContext.ThrottleAdditionalFlagFile) {
setIndicator = "[set]"
}
fmt.Fprintln(w, fmt.Sprintf("# throttle-additional-flag-file: %+v %+v",
fmt.Fprintf(w, "# throttle-additional-flag-file: %+v %+v\n",
this.migrationContext.ThrottleAdditionalFlagFile, setIndicator,
))
)
}
if throttleQuery := this.migrationContext.GetThrottleQuery(); throttleQuery != "" {
fmt.Fprintln(w, fmt.Sprintf("# throttle-query: %+v",
fmt.Fprintf(w, "# throttle-query: %+v\n",
throttleQuery,
))
)
}
if throttleControlReplicaKeys := this.migrationContext.GetThrottleControlReplicaKeys(); throttleControlReplicaKeys.Len() > 0 {
fmt.Fprintln(w, fmt.Sprintf("# throttle-control-replicas count: %+v",
fmt.Fprintf(w, "# throttle-control-replicas count: %+v\n",
throttleControlReplicaKeys.Len(),
))
)
}
if this.migrationContext.PostponeCutOverFlagFile != "" {
@ -881,20 +871,20 @@ func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
if base.FileExists(this.migrationContext.PostponeCutOverFlagFile) {
setIndicator = "[set]"
}
fmt.Fprintln(w, fmt.Sprintf("# postpone-cut-over-flag-file: %+v %+v",
fmt.Fprintf(w, "# postpone-cut-over-flag-file: %+v %+v\n",
this.migrationContext.PostponeCutOverFlagFile, setIndicator,
))
)
}
if this.migrationContext.PanicFlagFile != "" {
fmt.Fprintln(w, fmt.Sprintf("# panic-flag-file: %+v",
fmt.Fprintf(w, "# panic-flag-file: %+v\n",
this.migrationContext.PanicFlagFile,
))
)
}
fmt.Fprintln(w, fmt.Sprintf("# Serving on unix socket: %+v",
fmt.Fprintf(w, "# Serving on unix socket: %+v\n",
this.migrationContext.ServeSocketFile,
))
)
if this.migrationContext.ServeTCPPort != 0 {
fmt.Fprintln(w, fmt.Sprintf("# Serving on TCP port: %+v", this.migrationContext.ServeTCPPort))
fmt.Fprintf(w, "# Serving on TCP port: %+v\n", this.migrationContext.ServeTCPPort)
}
}
@ -1195,7 +1185,6 @@ func (this *Migrator) iterateChunks() error {
// Enqueue copy operation; to be executed by executeWriteFuncs()
this.copyRowsQueue <- copyRowsFunc
}
return nil
}
func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error {
@ -1301,7 +1290,6 @@ func (this *Migrator) executeWriteFuncs() error {
}
}
}
return nil
}
// finalCleanup takes actions at very end of migration, dropping tables etc.

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -220,5 +220,4 @@ func (this *EventsStreamer) Close() (err error) {
func (this *EventsStreamer) Teardown() {
this.db.Close()
return
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/

View File

@ -1,5 +1,6 @@
/*
Copyright 2015 Shlomi Noach, courtesy Booking.com
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -12,15 +13,16 @@ import (
"strings"
)
const (
DefaultInstancePort = 3306
)
const DefaultInstancePort = 3306
var (
ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$")
ipv4HostRegexp = regexp.MustCompile("^([^:]+)$")
ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") // e.g. 2001:db8:1f70::999:de8:7648:6e8
// e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") //nolint:gosimple
// e.g. 2001:db8:1f70::999:de8:7648:6e8
ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$")
)
// InstanceKey is an instance indicator, identified by hostname and port

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -33,7 +33,7 @@ func EscapeName(name string) string {
}
func buildColumnsPreparedValues(columns *ColumnList) []string {
values := make([]string, columns.Len(), columns.Len())
values := make([]string, columns.Len())
for i, column := range columns.Columns() {
var token string
if column.timezoneConversion != nil {
@ -51,7 +51,7 @@ func buildColumnsPreparedValues(columns *ColumnList) []string {
}
func buildPreparedValues(length int) []string {
values := make([]string, length, length)
values := make([]string, length)
for i := 0; i < length; i++ {
values[i] = "?"
}
@ -59,7 +59,7 @@ func buildPreparedValues(length int) []string {
}
func duplicateNames(names []string) []string {
duplicate := make([]string, len(names), len(names))
duplicate := make([]string, len(names))
copy(duplicate, names)
return duplicate
}
@ -261,8 +261,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@ -316,8 +316,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, tableName str
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@ -368,7 +368,7 @@ func buildUniqueKeyMinMaxValuesPreparedQuery(databaseName, tableName string, uni
tableName = EscapeName(tableName)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {