nice-ratio is now float64

This commit is contained in:
Shlomi Noach 2016-07-28 14:37:17 +02:00
parent 8a8401be49
commit be8a023350
3 changed files with 30 additions and 8 deletions

View File

@ -63,7 +63,7 @@ type MigrationContext struct {
defaultNumRetries int64 defaultNumRetries int64
ChunkSize int64 ChunkSize int64
NiceRatio int64 niceRatio float64
MaxLagMillisecondsThrottleThreshold int64 MaxLagMillisecondsThrottleThreshold int64
replicationLagQuery string replicationLagQuery string
throttleControlReplicaKeys *mysql.InstanceKeyMap throttleControlReplicaKeys *mysql.InstanceKeyMap
@ -382,6 +382,26 @@ func (this *MigrationContext) GetCriticalLoad() LoadMap {
return this.criticalLoad.Duplicate() return this.criticalLoad.Duplicate()
} }
func (this *MigrationContext) GetNiceRatio() float64 {
this.throttleMutex.Lock()
defer this.throttleMutex.Unlock()
return this.niceRatio
}
func (this *MigrationContext) SetNiceRatio(newRatio float64) {
if newRatio < 0.0 {
newRatio = 0.0
}
if newRatio > 100.0 {
newRatio = 100.0
}
this.throttleMutex.Lock()
defer this.throttleMutex.Unlock()
this.niceRatio = newRatio
}
// ReadMaxLoad parses the `--max-load` flag, which is in multiple key-value format, // ReadMaxLoad parses the `--max-load` flag, which is in multiple key-value format,
// such as: 'Threads_running=100,Threads_connected=500' // such as: 'Threads_running=100,Threads_connected=500'
// It only applies changes in case there's no parsing error. // It only applies changes in case there's no parsing error.

View File

@ -72,7 +72,7 @@ func main() {
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)") chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking") defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)") cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)")
flag.Int64Var(&migrationContext.NiceRatio, "nice-ratio", 0, "force being 'nice', imply sleep time per chunk time. Example values: 0 is aggressive. 3: for every ms spend in a rowcopy chunk, spend 3ms sleeping immediately after") niceRatio := flag.Float64("nice-ratio", 0, "force being 'nice', imply sleep time per chunk time; range: [0.0..100.0]. Example values: 0 is aggressive. 1.5: for every ms spend in a rowcopy chunk, spend 1.5ms sleeping immediately after")
maxLagMillis := flag.Int64("max-lag-millis", 1500, "replication lag at which to throttle operation") maxLagMillis := flag.Int64("max-lag-millis", 1500, "replication lag at which to throttle operation")
replicationLagQuery := flag.String("replication-lag-query", "", "Query that detects replication lag in seconds. Result can be a floating point (by default gh-ost issues SHOW SLAVE STATUS and reads Seconds_behind_master). If you're using pt-heartbeat, query would be something like: SELECT ROUND(UNIX_TIMESTAMP() - MAX(UNIX_TIMESTAMP(ts))) AS delay FROM my_schema.heartbeat") replicationLagQuery := flag.String("replication-lag-query", "", "Query that detects replication lag in seconds. Result can be a floating point (by default gh-ost issues SHOW SLAVE STATUS and reads Seconds_behind_master). If you're using pt-heartbeat, query would be something like: SELECT ROUND(UNIX_TIMESTAMP() - MAX(UNIX_TIMESTAMP(ts))) AS delay FROM my_schema.heartbeat")
@ -169,6 +169,7 @@ func main() {
if migrationContext.ServeSocketFile == "" { if migrationContext.ServeSocketFile == "" {
migrationContext.ServeSocketFile = fmt.Sprintf("/tmp/gh-ost.%s.%s.sock", migrationContext.DatabaseName, migrationContext.OriginalTableName) migrationContext.ServeSocketFile = fmt.Sprintf("/tmp/gh-ost.%s.%s.sock", migrationContext.DatabaseName, migrationContext.OriginalTableName)
} }
migrationContext.SetNiceRatio(*niceRatio)
migrationContext.SetChunkSize(*chunkSize) migrationContext.SetChunkSize(*chunkSize)
migrationContext.SetMaxLagMillisecondsThrottleThreshold(*maxLagMillis) migrationContext.SetMaxLagMillisecondsThrottleThreshold(*maxLagMillis)
migrationContext.SetReplicationLagQuery(*replicationLagQuery) migrationContext.SetReplicationLagQuery(*replicationLagQuery)

View File

@ -705,11 +705,11 @@ help # This message
} }
case "nice-ratio": case "nice-ratio":
{ {
if niceRatio, err := strconv.Atoi(arg); err != nil { if niceRatio, err := strconv.ParseFloat(arg, 64); err != nil {
fmt.Fprintf(writer, "%s\n", err.Error()) fmt.Fprintf(writer, "%s\n", err.Error())
return log.Errore(err) return log.Errore(err)
} else { } else {
atomic.StoreInt64(&this.migrationContext.NiceRatio, int64(niceRatio)) this.migrationContext.SetNiceRatio(niceRatio)
this.printStatus(ForcePrintStatusAndHint, writer) this.printStatus(ForcePrintStatusAndHint, writer)
} }
} }
@ -866,12 +866,12 @@ func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) {
)) ))
maxLoad := this.migrationContext.GetMaxLoad() maxLoad := this.migrationContext.GetMaxLoad()
criticalLoad := this.migrationContext.GetCriticalLoad() criticalLoad := this.migrationContext.GetCriticalLoad()
fmt.Fprintln(w, fmt.Sprintf("# chunk-size: %+v; max-lag-millis: %+vms; max-load: %s; critical-load: %s; nice-ratio: %d", fmt.Fprintln(w, fmt.Sprintf("# chunk-size: %+v; max-lag-millis: %+vms; max-load: %s; critical-load: %s; nice-ratio: %f",
atomic.LoadInt64(&this.migrationContext.ChunkSize), atomic.LoadInt64(&this.migrationContext.ChunkSize),
atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold), atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold),
maxLoad.String(), maxLoad.String(),
criticalLoad.String(), criticalLoad.String(),
atomic.LoadInt64(&this.migrationContext.NiceRatio), this.migrationContext.GetNiceRatio(),
)) ))
if replicationLagQuery := this.migrationContext.GetReplicationLagQuery(); replicationLagQuery != "" { if replicationLagQuery := this.migrationContext.GetReplicationLagQuery(); replicationLagQuery != "" {
fmt.Fprintln(w, fmt.Sprintf("# Replication lag query: %+v", fmt.Fprintln(w, fmt.Sprintf("# Replication lag query: %+v",
@ -1190,9 +1190,10 @@ func (this *Migrator) executeWriteFuncs() error {
if err := copyRowsFunc(); err != nil { if err := copyRowsFunc(); err != nil {
return log.Errore(err) return log.Errore(err)
} }
if niceRatio := atomic.LoadInt64(&this.migrationContext.NiceRatio); niceRatio > 0 { if niceRatio := this.migrationContext.GetNiceRatio(); niceRatio > 0 {
copyRowsDuration := time.Now().Sub(copyRowsStartTime) copyRowsDuration := time.Now().Sub(copyRowsStartTime)
sleepTime := copyRowsDuration * time.Duration(niceRatio) sleepTimeNanosecondFloat64 := niceRatio * float64(copyRowsDuration.Nanoseconds())
sleepTime := time.Duration(time.Duration(int64(sleepTimeNanosecondFloat64)) * time.Nanosecond)
time.Sleep(sleepTime) time.Sleep(sleepTime)
} }
} }