From 5dbd2e1c85d73654df09344732e97124def1d7fc Mon Sep 17 00:00:00 2001 From: Shlomi Noach Date: Thu, 18 Aug 2016 13:13:51 +0200 Subject: [PATCH] clarifying meaning of sleep-ratio --- doc/interactive-commands.md | 2 +- go/cmd/gh-ost/main.go | 2 +- go/logic/migrator.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/interactive-commands.md b/doc/interactive-commands.md index 1d96250..0c1ac95 100644 --- a/doc/interactive-commands.md +++ b/doc/interactive-commands.md @@ -22,7 +22,7 @@ replication lag on to determine throttling - `max-load=`: modify the `max-load` config; applies on next running copy-iteration The `max-load` format must be: `some_status=[,some_status=...]`. For example: `Threads_running=50,threads_connected=1000`, and you would then write/echo `max-load=Threads_running=50,threads_connected=1000` to the socket. - `critical-load=`: change critical load setting (exceeding given thresholds causes panic and abort) -- `nice-ratio=`: change _nice_ ratio: 0 for aggressive, positive integer `n`: for any unit of time spent copying rows, spend `n` units of time sleeping. +- `nice-ratio=`: change _nice_ ratio: 0 for aggressive (not nice, not sleeping), positive integer `n`: for any `1ms` spent copying rows, spend `n*1ms` units of time sleeping. Examples: assume a single rows chunk copy takes `100ms` to complete. `nice-ratio=0.5` will cause `gh-ost` to sleep for `50ms` immediately following. `nice-ratio=1` will cause `gh-ost` to sleep for `100ms`, effectively doubling runtime; value of `2` will effectively triple the runtime; etc. - `throttle-query`: change throttle query - `throttle-control-replicas`: change list of throttle-control replicas, these are replicas `gh-ost` will check - `throttle`: force migration suspend diff --git a/go/cmd/gh-ost/main.go b/go/cmd/gh-ost/main.go index def84a3..b614831 100644 --- a/go/cmd/gh-ost/main.go +++ b/go/cmd/gh-ost/main.go @@ -73,7 +73,7 @@ func main() { chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)") defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking") cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)") - niceRatio := flag.Float64("nice-ratio", 0, "force being 'nice', imply sleep time per chunk time; range: [0.0..100.0]. Example values: 0 is aggressive. 1.5: for every ms spend in a rowcopy chunk, spend 1.5ms sleeping immediately after") + niceRatio := flag.Float64("nice-ratio", 0, "force being 'nice', imply sleep time per chunk time; range: [0.0..100.0]. Example values: 0 is aggressive. 1: for every 1ms spent copying rows, sleep additional 1ms (effectively doubling runtime); 0.7: for every 10ms spend in a rowcopy chunk, spend 7ms sleeping immediately after") maxLagMillis := flag.Int64("max-lag-millis", 1500, "replication lag at which to throttle operation") replicationLagQuery := flag.String("replication-lag-query", "", "Query that detects replication lag in seconds. Result can be a floating point (by default gh-ost issues SHOW SLAVE STATUS and reads Seconds_behind_master). If you're using pt-heartbeat, query would be something like: SELECT ROUND(UNIX_TIMESTAMP() - MAX(UNIX_TIMESTAMP(ts))) AS delay FROM my_schema.heartbeat") diff --git a/go/logic/migrator.go b/go/logic/migrator.go index cc418a9..55adef9 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -664,7 +664,7 @@ func (this *Migrator) onServerCommand(command string, writer *bufio.Writer) (err fmt.Fprintln(writer, `available commands: status # Print a status message chunk-size= # Set a new chunk-size -nice-ratio= # Set a new nice-ratio, integer (0 is agrressive) +nice-ratio= # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is agrressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...) critical-load= # Set a new set of max-load thresholds max-lag-millis= # Set a new replication lag threshold replication-lag-query= # Set a new query that determines replication lag (no quotes)