Compare commits

..

352 Commits

Author SHA1 Message Date
Shao Hou Kun
0a033c76c1
feat(feat-set-binlogsyncer-maxreconnectattempts): feat-set-binlogsyncer-maxreconnectattempts (#1279)
Co-authored-by: shaohoukun <shaohoukun@meituan.com>
2023-05-25 22:45:53 +02:00
Rashiq
b7db8c6ca7
Merge pull request #1180 from lmtwga/master 2023-02-06 14:42:04 +01:00
Tim Vaillancourt
bea5323816
Merge branch 'master' into master 2023-01-28 00:19:37 +01:00
dm-2
7320fda848
Merge pull request #1190 from wangzihuacool/add-rocksdb-as-transactional-engine
add rocksdb as transactional engine
2022-11-29 11:38:54 +00:00
wangzihuacool
20af3af283
Apply suggestions from code review
Co-authored-by: Tim Vaillancourt <tim@timvaillancourt.com>
2022-11-29 10:30:01 +08:00
wangzihuacool
9f3cf74444
Apply suggestions from code review
Co-authored-by: dm-2 <45519614+dm-2@users.noreply.github.com>
2022-11-29 10:16:58 +08:00
lukelewang
6e2be1d44f add description and optimize tests 2022-11-27 13:54:01 +08:00
lukelewang
ed71099ce6 add percona to versions in workflows 2022-11-26 13:08:19 +08:00
lukelewang
af1e0d647f add support for rocksdb 2022-11-26 01:43:11 +08:00
lukelewang
da3514253f add support for rocksdb 2022-11-26 01:38:12 +08:00
lukelewang
094909f267 Merge branch 'add-rocksdb-as-transactional-engine' of https://github.com/wangzihuacool/gh-ost into add-rocksdb-as-transactional-engine 2022-11-25 18:52:54 +08:00
lukelewang
9c7857bd46 SetConnectionConfig 2022-11-25 18:48:48 +08:00
wangzihuacool
ccf5b2e01d gofmt 2022-11-25 09:39:57 +00:00
lukelewang
3ee667a40e Modify tests to support rocksdb tests 2022-11-25 17:18:32 +08:00
lukelewang
f053ccd9a6 support rocksdb as transactional engine 2022-11-23 17:32:53 +08:00
wangzihuacool
9c0babdc07
Merge branch 'github:master' into add-rocksdb-as-transactional-engine 2022-11-23 11:01:33 +08:00
dm-2
1a131d4398
Merge pull request #1202 from github/ensure-test-errors-detected
Fix migration test passing when `convert-utf8mb4` test setup was partially failing
2022-11-18 16:08:41 +00:00
dm-2
45cf63c839
Merge branch 'master' into ensure-test-errors-detected 2022-11-18 15:58:20 +00:00
dm-2
8abd584826 fix broken test by removing invalid insert statement 2022-11-18 15:49:04 +00:00
dm-2
756f3d30e8 add error detection for test setup, sort tests to make it easier to track progress 2022-11-18 15:44:14 +00:00
dm-2
7cebc16c6f debugging 2022-11-18 15:32:34 +00:00
dm-2
cd65c7e9ad add extra debugging output 2022-11-18 15:11:44 +00:00
dm-2
3613f22ea7
Merge pull request #1192 from hasanMshawrab/patch-1
Fix: Change table name
2022-11-16 16:31:31 +00:00
dm-2
a673363c16
Merge branch 'master' into patch-1 2022-11-16 16:22:33 +00:00
dm-2
a523042703
Merge pull request #1201 from morgo/attempt-instant-ddl
Attempt INSTANT DDL first
2022-11-16 16:22:28 +00:00
Morgan Tocker
59c1a24774 remove useless func per review 2022-11-15 15:40:21 -07:00
Morgan Tocker
0310f9f27b Merge remote-tracking branch 'origin/attempt-instant-ddl' into attempt-instant-ddl
* origin/attempt-instant-ddl:
  Update go/logic/migrator.go
2022-11-15 12:06:38 -07:00
Morgan Tocker
5283b46ec2 Make it clear in docs it is disabled by default but safe. 2022-11-15 12:06:11 -07:00
Morgan Tocker
74fb8e80b2
Update go/logic/migrator.go
Co-authored-by: dm-2 <45519614+dm-2@users.noreply.github.com>
2022-11-15 12:05:27 -07:00
dm-2
512751968f more testing 2022-11-15 16:01:54 +00:00
dm-2
1b22f784e3 temp commit to investigate datetime-with-zero test failure 2022-11-15 15:06:19 +00:00
Morgan Tocker
3f3a10a213 Address PR feedback 2022-11-14 14:48:49 -07:00
Tim Vaillancourt
cfd83728ed
Merge branch 'master' into attempt-instant-ddl 2022-11-14 22:09:55 +01:00
Morgan Tocker
b06c1cd498 Improve docs 2022-11-14 12:35:49 -07:00
Morgan Tocker
75a346be93 Add tests, incorporate feedback 2022-11-14 11:47:30 -07:00
dm-2
fb82caf9e7
Merge pull request #1194 from SocalNick/nc--send-status-to-migration-context-logger-simple-alternative
Print status to migrationContext logger
2022-11-14 15:26:27 +00:00
dm-2
ca12039646
Merge branch 'master' into nc--send-status-to-migration-context-logger-simple-alternative 2022-11-14 15:12:56 +00:00
dm-2
96d708c956
Merge pull request #1200 from github/fix-ci
Change CI tests to use `ubuntu-20.04` because `ubuntu-latest` doesn't support MySQL 5.7
2022-11-14 15:12:24 +00:00
dm-2
9cbb42b924 fix CI tests to ubuntu-20.04 because ubuntu-22.04 (current -latest) doesn't support MySQL 5.7 2022-11-14 15:00:50 +00:00
Morgan Tocker
05f32ebf29 minor cleanup 2022-11-10 09:30:13 -07:00
Morgan Tocker
1be6a4c082 Attempt instant DDL if supported 2022-11-09 20:11:49 -07:00
Nicholas Calugar
515aa72d3d Print status to migration context logger 2022-10-27 11:42:12 -07:00
Hasan Mshawrab
2793e2b6b3
Fix: Change table name
table name is 'tbl' not 'tble'
2022-10-26 10:28:27 +03:00
dm-2
b5387331f8
Merge branch 'master' into master 2022-10-21 17:02:17 +01:00
Tim Vaillancourt
9b3fa793ac
Enable more golangci-lint linters (#1181) 2022-10-21 18:02:06 +02:00
dm-2
659e4401ba
Merge branch 'master' into master 2022-10-21 17:00:05 +01:00
Tim Vaillancourt
df4cf7b38e
Add basic test for hooks (#1179) 2022-10-21 17:47:39 +02:00
wangzihuacool
3f43400e3a add-rocksdb-as-transactional-engine 2022-10-20 13:34:31 +00:00
lmtwga
321e5847ba fix: because lock is not release, drop cutover sentry table is hanged 2022-09-16 17:49:09 +08:00
Tim Vaillancourt
1df37c207f
Add basic tests to migrator (#1168) 2022-09-06 15:48:48 +02:00
Tim Vaillancourt
05c7ed5f8f
Add basic test for inspector (#1166)
* Add basic test for inspector

* Add header

* Fix return
2022-09-06 15:23:18 +02:00
Tim Vaillancourt
1fa3d4f75a
Add basic tests for applier (#1165)
* Add basic tests for applier

* Add header
2022-09-06 15:06:14 +02:00
Tim Vaillancourt
3c946e97d7
Improve applier .ReadMigrationRangeValues() func accuracy (#1164)
* Use a transaction in applier `ReadMigrationRangeValues` func

* Private func names
2022-09-06 14:07:47 +02:00
dm-2
1a473a4f66
Merge pull request #1158 from wangzihuacool/fix-charset
Fix: Convert column value in binlog events to bytes instead of utf8 encoded unicode
2022-09-06 11:56:56 +01:00
dm-2
9c2c7bad1b
Merge branch 'master' into fix-charset 2022-09-06 11:44:07 +01:00
Abirdcfly
ca8aef5d6d
chore: remove duplicate word in comments (#1175)
Signed-off-by: Abirdcfly <fp544037857@gmail.com>

Signed-off-by: Abirdcfly <fp544037857@gmail.com>
2022-08-30 23:49:13 +02:00
wangzihuacool
e9cb4a233f convert to bytes if character string without charsetConversion. 2022-08-27 01:55:36 +00:00
wangzihuacool
a17389e97a Some fix to unit tests. 2022-08-20 01:06:00 +08:00
Tim Vaillancourt
2ad614ab1e
Merge branch 'master' into fix-charset 2022-08-18 22:36:24 +02:00
Tim Vaillancourt
3f44e04365
Set a transaction isolation level for MySQL connections (#1156)
* Set transaction isolation in connections

* Revert load_map.go change

* Var rename

* Restore comment
2022-08-12 18:52:22 +02:00
Tim Vaillancourt
5de91c9f90
Add missing doc from PR #1131 (#1162) 2022-08-11 10:32:00 +02:00
wangzihuacool
c93060e743 restore connection charset to utf8mb4 2022-08-11 13:14:38 +08:00
Tim Vaillancourt
bee009b9e3
Allow zero in dates (#1161)
* Merge pull request #31 from openark/zero-date

Support zero date and zero in date, via dedicated command line flag

* Merge pull request #32 from openark/existing-date-with-zero

Support tables with existing zero dates

* Remove un-needed ignore_versions file

* Fix new lint errors from golang-ci update

Co-authored-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>
2022-08-10 21:50:59 +02:00
wangzihuacool
8a193e0024
delete junk files 2022-08-03 14:18:50 +08:00
wangzihuacool
113094bbd2 Convert character to bytes and insert into table using latin1 2022-08-03 11:43:18 +08:00
dm-2
7d8e4e8d3d
Merge pull request #1154 from github/fix-build-script
Only build RPM and deb packages for amd64
2022-07-20 15:31:34 +01:00
dm-2
2d3e8d998e Only build RPM and deb packages for amd64 2022-07-20 14:47:51 +01:00
Tim Vaillancourt
ffe54f48ed
Enable more golang-ci linters (#1149) 2022-07-18 18:37:18 +02:00
Tim Vaillancourt
84dca03311
Add script and docs for linter (#1151) 2022-07-18 18:21:32 +02:00
Tim Vaillancourt
f527d63c86
Move .Kill() func from inspector to go/mysql (#1148) 2022-07-11 11:02:04 +02:00
dm-2
ae02941f67
Merge pull request #1147 from timvaillancourt/rm-go-os-process-go
Remove unused `go/os/process.go`
2022-07-08 11:22:09 +01:00
dm-2
ed8c05c210
Merge branch 'master' into rm-go-os-process-go 2022-07-08 10:50:05 +01:00
dm-2
4648da15ec
Merge pull request #1134 from github/changelog-migrated-state
Add `Migrated` changelog event state
2022-07-08 10:48:40 +01:00
Tim Vaillancourt
582d4cb115
Merge branch 'master' into rm-go-os-process-go 2022-07-08 00:21:25 +02:00
Tim Vaillancourt
30c868816c Remove unused go/os/process.go 2022-07-08 00:19:54 +02:00
dm-2
607ea8d3a9
Merge branch 'master' into changelog-migrated-state 2022-07-07 17:01:16 +01:00
dm-2
3da9a9c015
Merge pull request #1146 from github/switch-to-term-package
Switch to golang.org/x/term; go mod vendor
2022-07-07 17:00:22 +01:00
Tim Vaillancourt
54db4174b3 Consolidate no-op states 2022-07-07 17:40:01 +02:00
Tim Vaillancourt
246800e053 Fix lint 2022-07-07 17:38:41 +02:00
dm-2
9c611bda50 switch to golang.org/x/term from deprecated golang.org/x/crypto/ssh/terminal module; go mod vendor 2022-07-07 16:38:22 +01:00
Tim Vaillancourt
d6d1d3b436
Merge branch 'master' into changelog-migrated-state 2022-07-07 17:35:59 +02:00
dm-2
eee8ffe7dc
Merge pull request #1145 from timvaillancourt/golang-ci-staticcheck-linter
`golang-ci`: enable/fix `staticcheck` lint warnings
2022-07-07 16:22:49 +01:00
dm-2
f9fa4231d7
Merge branch 'master' into golang-ci-staticcheck-linter 2022-07-07 15:47:59 +01:00
dm-2
78da0d09c4
Merge pull request #1138 from github/changlog-table-comment
Changlog table comment
2022-07-07 15:47:21 +01:00
dm-2
6d5d49616f
Merge branch 'master' into golang-ci-staticcheck-linter 2022-07-07 15:46:18 +01:00
dm-2
8eb9708755
Merge branch 'master' into changlog-table-comment 2022-07-07 15:34:30 +01:00
dm-2
02258ac15d
Merge pull request #1137 from github/go-mysql-binlog-dead-code
Remove unused code in `go/mysql/binlog.go`
2022-07-07 15:07:24 +01:00
Tim Vaillancourt
e45357097f cleanup v5 2022-07-07 05:10:22 +02:00
Tim Vaillancourt
eead61cb6d cleanup v4 2022-07-07 05:05:37 +02:00
Tim Vaillancourt
c3593c94ff cleanup v3 2022-07-07 03:27:36 +02:00
Tim Vaillancourt
0303031fc2 cleanup v2 2022-07-07 03:22:53 +02:00
Tim Vaillancourt
0ec555badf Cleanup 2022-07-07 03:19:53 +02:00
Tim Vaillancourt
500fbefdb9 golang-ci: fix staticcheck linter warnings 2022-07-07 03:12:44 +02:00
Tim Vaillancourt
09bb5caf88
Merge branch 'master' into go-mysql-binlog-dead-code 2022-07-07 00:39:24 +02:00
Andrew Mason
b751499091
Cancel any row count queries before attempting to cut over (#846)
* Cancel any row count queries before attempting to cut over

Closes #830. Switches from using `QueryRow` to `QueryRowContext`, and
stores a context.CancelFunc in the migration context, which is called to
halt any running row count query before beginning the cut over.

* Make it threadsafe

* Kill the count query on the database side as well

* Explicitly grab a connection to run the count, store its connection id
* When the query context is canceled, run a `KILL QUERY ?` on that connection id

* Rewrite these to use the threadsafe functions, stop exporting the cancel func

* Update logger

* Update logger

Co-authored-by: Tim Vaillancourt <timvaillancourt@github.com>
Co-authored-by: Tim Vaillancourt <tim@timvaillancourt.com>
Co-authored-by: dm-2 <45519614+dm-2@users.noreply.github.com>
2022-07-07 00:23:23 +02:00
Tim Vaillancourt
308ba7f915
Default to go1.17.11 (#1136)
* Default to go1.17.11

* `go mod vendor`

Co-authored-by: dm-2 <45519614+dm-2@users.noreply.github.com>
2022-07-07 00:05:23 +02:00
Tim Vaillancourt
0918bab29b
Add context/timeout to HTTP throttle check (#1131)
* Add context/timeout to HTTP throttle check

* Dont run `.GetThrottleHTTPInterval()` on every loop

* Update help message

* Var rename

* 2022

* Add timeout flag

* Add unix/tcp server commands, use ParseInt() for string->int64

* Var rename

* Re-check http timeout on every loop iteration

* Remove stale comment

* Make throttle interval idempotent

* var rename

* Usage grammar

* Make http timeout idempotent too

* Parse time.Duration once

* Move timeout to NewThrottler

* Help update

* Set User-Agent header

* Re-add newline

Co-authored-by: dm-2 <45519614+dm-2@users.noreply.github.com>
2022-07-06 23:56:07 +02:00
Tim Vaillancourt
0b066c16a5
Use switch statements for readability, simplify .NewGoMySQLReader() (#1135)
* Use `switch` statements for readability

* Simplify initBinlogReader()
2022-07-06 23:45:26 +02:00
Tim Vaillancourt
6bf32f2015
Ensure mysql rows responses are closed (#1132)
Co-authored-by: dm-2 <45519614+dm-2@users.noreply.github.com>
2022-07-06 23:32:15 +02:00
Shlomi Noach
cc38a17e50
compound pk tests (#387)
* compound pk tests

* more details in failure diff

* more elaborate test; the pk-ts one consistently fails

* Fix merge conflict

Co-authored-by: Shlomi Noach <shlomi-noach@github.com>
Co-authored-by: Tim Vaillancourt <timvaillancourt@github.com>
Co-authored-by: Tim Vaillancourt <tim@timvaillancourt.com>
2022-07-06 23:22:21 +02:00
dm-2
3901a86422
Merge pull request #1140 from timvaillancourt/fix-issue-1117
Fix `integer divide by zero` panic in migrator
2022-07-06 17:11:16 +01:00
dm-2
b7f7e2c0b0
Merge branch 'master' into fix-issue-1117 2022-07-06 16:58:51 +01:00
dm-2
8d9761d616
Merge pull request #1141 from shaohk/fix-two-phase-commit-lost-data_v2
fix(lost data in mysql two-phase commit): lost data in mysql two-phas…
2022-07-06 16:58:25 +01:00
dm-2
f35bfd3997
Merge branch 'master' into fix-issue-1117 2022-07-06 16:32:02 +01:00
dm-2
b80b6e78fd
Merge branch 'master' into fix-two-phase-commit-lost-data_v2 2022-07-06 16:25:54 +01:00
dm-2
f0209e8c71
Merge pull request #1143 from github/codeql-run-on-prs
Run CodeQL analysis on PRs
2022-07-06 16:25:31 +01:00
dm-2
d07c4f097d
Run CodeQL analysis on PRs 2022-07-06 16:23:15 +01:00
Tim Vaillancourt
38e86e5791
Merge branch 'master' into fix-two-phase-commit-lost-data_v2 2022-06-25 00:37:12 +02:00
shaohk
87970f6312 fix(lost data in mysql two-phase commit): lost data in mysql two-phase commit 2022-06-24 11:38:05 +08:00
Tim Vaillancourt
de339dd042 Fix integer divide by zero panic in migrator 2022-06-23 03:37:18 +02:00
Tim Vaillancourt
387b32594e Var rename 2022-06-07 01:49:18 +02:00
Tim Vaillancourt
bf0c13b46d Add table comment to changelog 2022-06-07 01:47:51 +02:00
Tim Vaillancourt
eb801441c2 Remove unused Type field 2022-06-06 02:31:47 +02:00
Tim Vaillancourt
261e1f7820 Remove unused code in go/mysql/binlog.go 2022-06-06 02:29:01 +02:00
Tim Vaillancourt
ec199f1185 revert out of scope change 2022-06-05 00:06:41 +02:00
Tim Vaillancourt
af99b247f9 Add Migrated changelog event 2022-06-04 23:59:00 +02:00
Tim Vaillancourt
ed46138c06
Add golangci-lint CI action, fix gosimple, govet + unused lint errors (#1127)
* Add `golangci-lint`, fix `gosimple`, `govet` and `unused` linter complaints

* Go 1.16

* Update copyright dates
2022-05-31 21:23:39 +02:00
dm-2
8f361f6445
Merge pull request #1108 from jecepeda/add-binaries-for-arm64-architectures
Add binaries for arm64 architectures
2022-03-11 16:37:18 +00:00
Jaime Cepeda
520fd7beb2
Merge branch 'master' into add-binaries-for-arm64-architectures 2022-03-11 14:26:06 +01:00
jecepeda
df6443bb61 Add binaries for arm64 architectures 2022-03-11 14:10:58 +01:00
dm-2
68c614e117
Merge pull request #1101 from github/remove-5.6-support
Remove support for MySQL 5.6 (end-of-life Feb 2021)
2022-03-11 11:14:29 +00:00
dm-2
f62e9dc4f4 Remove support for MySQL 5.6 (end-of-life Feb 2021) 2022-03-01 11:39:55 +00:00
dm-2
e15166ecb9
Merge pull request #1097 from timvaillancourt/remove-eol-5.5
Remove end-of-life MySQL 5.5.x version
2022-02-25 15:01:35 +00:00
dm-2
31f3ac0c8b
Merge branch 'master' into remove-eol-5.5 2022-02-25 14:45:58 +00:00
dm-2
d3ea39c669
Merge pull request #1100 from github/fix-rpm-build-os
fix: update build script to explicitly build RPMs for linux
2022-02-25 14:26:26 +00:00
dm-2
605104c9a2
Merge branch 'master' into fix-rpm-build-os 2022-02-25 14:15:39 +00:00
dm-2
522fbb849c fix: update build script to explicitly build RPMs for linux 2022-02-25 14:10:42 +00:00
Tim Vaillancourt
5ea949f31e Remove end-of-life MySQL 5.5.x 2022-02-25 00:02:27 +01:00
dm-2
4142f5d07b
Merge pull request #1095 from timvaillancourt/inspector-connectionConfig-instanceKey-string
Use `.String()` for logging connection-config `InstanceKey`
2022-02-24 10:45:11 +00:00
dm-2
260e662197
Merge branch 'master' into inspector-connectionConfig-instanceKey-string 2022-02-24 10:34:19 +00:00
dm-2
3824eec0eb
Merge pull request #1096 from timvaillancourt/inspector-fix-needless-fmt-sprintf
Fix needless `fmt.Sprintf` call in `go/logic/inspector.go`
2022-02-24 10:34:12 +00:00
Tim Vaillancourt
1aa74bfe60 Fix needless fmt.Sprintf call in go/logic/inspector.go 2022-02-24 01:30:29 +01:00
Tim Vaillancourt
f6db40e4cd Use .String() for logging connection-config InstanceKey 2022-02-23 23:28:03 +01:00
Tim Vaillancourt
ac23094497
Merge branch 'github:master' into master 2022-02-22 00:57:58 +01:00
dm-2
0c7c9a52f3
Merge pull request #1090 from github/add-hooks-status-interval-docs
Add docs for `hooks-status-interval`
2022-02-07 16:29:50 +00:00
dm-2
d84c687d7c
Add docs for hooks-status-interval 2022-02-07 15:35:14 +00:00
dm-2
ebd5645989
Merge pull request #1083 from tknodell-recurly/customize_status_hook_interval
Add flag to customize the interval which the onStatus hook is called
2022-02-07 12:46:43 +00:00
dm-2
94ffadf101
Merge branch 'master' into customize_status_hook_interval 2022-02-07 11:51:42 +00:00
dm-2
dc9a2e3f24
Merge pull request #1087 from github/arthur/reduce-minimal-chunk-size
Reduce the minimal chunk size from `100` to `10`.
2022-02-07 11:51:08 +00:00
Arthur Schreiber
997c5b8bff Reduce the minimal chunk size from 100 to 10. 2022-02-07 11:23:39 +00:00
Tyler Knodell
fbb53f4b04 Add flag to customize the interval which the onStatus hook is called 2022-02-01 08:54:55 -07:00
dm-2
d6c83638d7
Merge pull request #1066 from EagleEyeJohn/documentation-update
Documentation update
2022-01-18 17:00:16 +00:00
dm-2
619d9d3f5e
Merge branch 'master' into documentation-update 2022-01-18 16:41:18 +00:00
dm-2
39bba9c92a
Merge pull request #1078 from github/fix-build-script
Update build script to generate releases
2022-01-18 16:32:01 +00:00
dm-2
09c35bd393 Update build script to generate releases 2022-01-18 16:15:36 +00:00
dm-2
1f780ae973
Merge pull request from GHSA-rrp4-2xx3-mv29
Security: fix vulnerability where `-database` parameter accepts arbitrary DSN strings
2022-01-18 15:59:32 +00:00
dm-2
8cd027c7f5
Merge branch 'master' into advisory-fix-1 2022-01-18 12:09:26 +00:00
dm-2
83413c339e Security: fix vulnerability where -database parameter accepts arbitrary DSN strings 2022-01-18 11:23:49 +00:00
John Nicholls
66fadc71d7
Merge branch 'master' into documentation-update 2022-01-18 09:27:40 +00:00
Rashiq
be644ebd43
Merge pull request #1076 from github/rashiq-update_gh_ost_hook_docs 2022-01-14 14:52:53 +01:00
Rashiq
0dc9092d5c
Update hooks.md 2022-01-14 02:49:16 +01:00
John Nicholls
2c13d814ac Improve command line flags documentation grammar 2021-12-15 16:37:39 +00:00
John Nicholls
47fd51f804 Document critical-load-hibernate-seconds 2021-12-15 16:34:05 +00:00
Ed Toro
e484824bbd
typo (#1011)
"or and" => "or an"
2021-07-27 17:38:50 +02:00
Tim Vaillancourt
a1862908c9
Use github.com/go-sql-driver 1.6.0 (#1006)
* Replace deprecated go-mysql library

* Use github.com/go-sql-driver 1.6.0

* go mod vendor

* go mod vendor again
2021-07-19 17:00:35 +02:00
Tim Vaillancourt
732a064231
Replace deprecated go-mysql library (#994) 2021-07-15 21:49:50 +02:00
Tim Vaillancourt
43d3afea5a Merge branch 'master' of https://github.com/timvaillancourt/gh-ost 2021-07-15 21:08:01 +02:00
Andrew Mason
6e1daf90ee
Check RowsAffected when applying DML events to get more accurate statistics (#844)
* Check RowsAffected when applying DML events to get more accurate statistics

Addresses #600.

When applying a DML event, check the RowsAffected on the `Result`
struct. Since all DML event queries are point queries, this will only
ever be 0 or 1. The applier then takes this value and multiplies by
the `rowsDelta` of the event, resulting in a properly-signed, accurate
row delta to use in the statistics.

If an error occurs here, log it, but do not surface this as an
actual error .. simply assume the DML affected a row and move on. It
will be inaccurate, but this is already the case.

* Fix import

* update wording to warning log message

Co-authored-by: Tim Vaillancourt <timvaillancourt@github.com>

Co-authored-by: Tim Vaillancourt <timvaillancourt@github.com>
2021-07-14 16:48:03 +02:00
Tim Vaillancourt
d726b20dda
Add CodeSQL actions workflow (#1005) 2021-07-13 20:16:33 +02:00
Tim Vaillancourt
b5a1fecb11 Add CodeSQL actions workflow 2021-07-13 16:30:58 +02:00
Tim Vaillancourt
47d49c6b92
Add go mod (#935)
* Add a go.mod file

* run go mod vendor again

* Move to a well-supported ini file reader

* Remove GO111MODULE=off

* Use go 1.16

* Rename github.com/outbrain/golib -> github.com/openark/golib

* Remove *.go-e files

* Fix for `strconv.ParseInt: parsing "": invalid syntax` error

* Add test for '[osc]' section

Co-authored-by: Nate Wernimont <nate.wernimont@workiva.com>
2021-06-24 20:19:37 +02:00
Tim Vaillancourt
aef2a69903
v1.1.2 (#990) 2021-06-17 15:33:20 +02:00
Dirkjan Bussink
40acde0222
Set the ServerName for TLS configuration (#988)
When TLS hostname validation used for the MySQL connection, the
ServerName property needs to be set so that it knows which name to
validate on the certificate.

Without this option and with InsecureSkipVerify set to false, validation
will error here with a fatal error otherwise:

```
FATAL tls: either ServerName or InsecureSkipVerify must be specified in the tls.Config
```
2021-06-17 14:51:43 +02:00
Shlomi Noach
9bc508f068
Enum to varchar (#963)
* v1.1.0

* WIP: copying AUTO_INCREMENT value to ghost table
Initial commit: towards setting up a test suite

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* greping for 'expect_table_structure' content

* Adding simple test for 'expect_table_structure' scenario

* adding tests for AUTO_INCREMENT value after row deletes. Should initially fail

* clear event beforehand

* parsing AUTO_INCREMENT from alter query, reading AUTO_INCREMENT from original table, applying AUTO_INCREMENT value onto ghost table if applicable and user has not specified AUTO_INCREMENT in alter statement

* support GetUint64

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* minor update to test

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* adding test for user defined AUTO_INCREMENT statement

* Generated column as part of UNIQUE (or PRIMARY) KEY

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* skip analysis of generated column data type in unique key

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* All MySQL DBs limited to max 3 concurrent/idle connections

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* hooks: reporting GH_OST_ETA_SECONDS. ETA stored as part of migration context

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* GH_OST_ETA_NANOSECONDS

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* N/A denoted by negative value

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* ETAUnknown constant

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* Convering enum to varchar

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* test: not null

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* first attempt at setting enum-to-string right

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* fix insert query

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* store enum values, use when populating

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* apply EnumValues to mapped column

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* fix compilation error

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* gofmt

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>
2021-06-10 17:17:49 +02:00
Shlomi Noach
f19f101610
hooks: reporting GH_OST_ETA_SECONDS. ETA as part of migration context (#936)
* v1.1.0

* WIP: copying AUTO_INCREMENT value to ghost table
Initial commit: towards setting up a test suite

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* greping for 'expect_table_structure' content

* Adding simple test for 'expect_table_structure' scenario

* adding tests for AUTO_INCREMENT value after row deletes. Should initially fail

* clear event beforehand

* parsing AUTO_INCREMENT from alter query, reading AUTO_INCREMENT from original table, applying AUTO_INCREMENT value onto ghost table if applicable and user has not specified AUTO_INCREMENT in alter statement

* support GetUint64

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* minor update to test

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* adding test for user defined AUTO_INCREMENT statement

* Generated column as part of UNIQUE (or PRIMARY) KEY

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* skip analysis of generated column data type in unique key

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* All MySQL DBs limited to max 3 concurrent/idle connections

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* hooks: reporting GH_OST_ETA_SECONDS. ETA stored as part of migration context

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* GH_OST_ETA_NANOSECONDS

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* N/A denoted by negative value

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* ETAUnknown constant

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>
2021-05-31 14:15:51 +02:00
Shlomi Noach
c41823ecc9
All MySQL DBs limited to max 3 concurrent/idle connections #15 (#931)
* v1.1.0

* WIP: copying AUTO_INCREMENT value to ghost table
Initial commit: towards setting up a test suite

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* greping for 'expect_table_structure' content

* Adding simple test for 'expect_table_structure' scenario

* adding tests for AUTO_INCREMENT value after row deletes. Should initially fail

* clear event beforehand

* parsing AUTO_INCREMENT from alter query, reading AUTO_INCREMENT from original table, applying AUTO_INCREMENT value onto ghost table if applicable and user has not specified AUTO_INCREMENT in alter statement

* support GetUint64

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* minor update to test

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* adding test for user defined AUTO_INCREMENT statement

* Generated column as part of UNIQUE (or PRIMARY) KEY

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* skip analysis of generated column data type in unique key

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* All MySQL DBs limited to max 3 concurrent/idle connections

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>
2021-05-27 20:00:58 +02:00
Tim Vaillancourt
8f42dedef8
Add GO111MODULE=off to build.sh, use Golang 1.16 (#966)
* Add GO111MODULE=off to build.sh

* Use golang 1.16

* Update go version in README.md

* Add missing GO111MODULE=off

* Add missing GO111MODULE=off again

* Use go1.16.3 explicitly

* Use 1.16 for CI test

* Update min go version

* Use go 1.16.4
2021-05-25 13:22:28 +02:00
Shlomi Noach
36c669dd75
Generated column as part of UNIQUE (or PRIMARY) KEY (#919)
* v1.1.0

* WIP: copying AUTO_INCREMENT value to ghost table
Initial commit: towards setting up a test suite

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* greping for 'expect_table_structure' content

* Adding simple test for 'expect_table_structure' scenario

* adding tests for AUTO_INCREMENT value after row deletes. Should initially fail

* clear event beforehand

* parsing AUTO_INCREMENT from alter query, reading AUTO_INCREMENT from original table, applying AUTO_INCREMENT value onto ghost table if applicable and user has not specified AUTO_INCREMENT in alter statement

* support GetUint64

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* minor update to test

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* adding test for user defined AUTO_INCREMENT statement

* Generated column as part of UNIQUE (or PRIMARY) KEY

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* skip analysis of generated column data type in unique key

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>
2021-05-24 20:16:49 +02:00
Tim Vaillancourt
57a81ceff5
Use GitHub Actions CI badges (#970) 2021-05-19 14:08:32 +02:00
Tim Vaillancourt
c71dbf9ef3
Copy auto increment (#967)
* v1.1.0

* WIP: copying AUTO_INCREMENT value to ghost table
Initial commit: towards setting up a test suite

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* greping for 'expect_table_structure' content

* Adding simple test for 'expect_table_structure' scenario

* adding tests for AUTO_INCREMENT value after row deletes. Should initially fail

* clear event beforehand

* parsing AUTO_INCREMENT from alter query, reading AUTO_INCREMENT from original table, applying AUTO_INCREMENT value onto ghost table if applicable and user has not specified AUTO_INCREMENT in alter statement

* support GetUint64

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* minor update to test

Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>

* adding test for user defined AUTO_INCREMENT statement

Co-authored-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com>
2021-05-14 15:32:56 +02:00
Tim Vaillancourt
fef83af378
Merge pull request #968 from timvaillancourt/parallel-ci-tests
Use matrix build for replica test CI
2021-05-08 01:11:26 +02:00
Tim Vaillancourt
38bec2e207
Merge branch 'master' into parallel-ci-tests 2021-05-08 01:03:02 +02:00
Tim Vaillancourt
d8bd21acdf
Merge pull request #819 from github/tar-xz-dbdeployer-upgrade
replication tests CI: switch to .tar.xz binaries, upgrade dbdeployer
2021-05-08 01:01:49 +02:00
Tim Vaillancourt
5e37110cfd merge tar-xz-dbdeployer-upgrade 2021-05-08 00:45:59 +02:00
Tim Vaillancourt
a25f63aa2a merge tar-xz-dbdeployer-upgrade 2021-05-08 00:44:28 +02:00
Tim Vaillancourt
cd5beeb495
Remove git branch steps 2021-05-08 00:35:19 +02:00
Tim Vaillancourt
535687023e
Fetch master branch of ci env 2021-05-08 00:34:38 +02:00
Tim Vaillancourt
2634534cf5 Fix var name 2021-05-08 00:26:46 +02:00
Tim Vaillancourt
5ebbfaea89 Use matrix build for replica test CI 2021-05-08 00:23:31 +02:00
Tim Vaillancourt
7ae7cc67f7 Use right var name 2021-05-07 23:52:01 +02:00
Tim Vaillancourt
3976b7acf2 Remove 'mysql-' prefix from mysql version so dbdeployer can make a port num 2021-05-07 23:50:24 +02:00
Tim Vaillancourt
8df36d4bfd Fix git checkout 2021-05-07 23:32:24 +02:00
Tim Vaillancourt
6aaab78657
Use fix-8016-tarball branch 2021-05-07 23:22:18 +02:00
Tim Vaillancourt
afa221bb45
Use HEAD 2021-05-07 22:38:25 +02:00
Tim Vaillancourt
90a14d58bd
Merge branch 'master' into tar-xz-dbdeployer-upgrade 2021-05-07 22:24:58 +02:00
Tim Vaillancourt
29b8cfad48
Merge pull request #921 from ccoffey/cathal/safer_cut_over
Cut-over should wait for heartbeat lag to be low enough to succeed
2021-05-07 15:30:13 +02:00
Tim Vaillancourt
0dc64757eb
Merge branch 'master' into cathal/safer_cut_over 2021-05-07 15:13:29 +02:00
Tim Vaillancourt
8ab0a89cdc
Merge pull request #954 from yakirgb/remove-rpm-build-id
Remove build_id files from rpm
2021-05-06 15:31:29 +02:00
Tim Vaillancourt
5af869ac3b
Merge branch 'master' into remove-rpm-build-id 2021-05-06 15:01:36 +02:00
Tim Vaillancourt
0a1fb97b2a
Merge pull request #962 from Fanduzi/master
fix #961
2021-05-06 14:12:54 +02:00
Tim Vaillancourt
0d742e2699
Merge branch 'master' into master 2021-05-06 13:45:35 +02:00
Tim Vaillancourt
6cdf4b572b
Merge pull request #965 from timvaillancourt/v1.1.1
Update RELEASE_VERSION to v1.1.1
2021-05-03 16:55:25 +02:00
Tim Vaillancourt
38eeaa1036 Update RELEASE_VERSION to v1.1.1 2021-05-03 16:30:42 +02:00
Fan()
74f807103e
Update inspect.go 2021-04-29 14:33:10 +08:00
Fan()
94dfef3ae7
Merge pull request #1 from Fanduzi/Fanduzi-patch-1
fix #961
2021-04-29 14:16:49 +08:00
Fan()
f40f14b9ee
Update inspect.go
fix https://github.com/github/gh-ost/issues/961
2021-04-29 13:43:24 +08:00
Yakir Gibraltar
c480ff1337 Remove build_id files from rpm 2021-04-06 18:15:05 +03:00
Tim Vaillancourt
d95dda3a66
Merge pull request #951 from timvaillancourt/add-server-cmd-hosts
Add 'applier' and 'inspector' commands to server
2021-04-04 18:55:57 +02:00
Tim Vaillancourt
157dba920c Add mysql port and version 2021-04-03 23:24:29 +02:00
Tim Vaillancourt
23a421cef7 Add 'Hostname:' prefix to output 2021-04-03 22:34:20 +02:00
Tim Vaillancourt
5e9b913035
Merge branch 'master' into add-server-cmd-hosts 2021-04-02 16:59:56 +02:00
Tim Vaillancourt
123b46f9bb Split into 'applier' and 'inspector' commands 2021-04-02 16:57:13 +02:00
Tim Vaillancourt
cffb523bad Fix help typo 2021-04-02 02:02:08 +02:00
Tim Vaillancourt
7ea47cbfb5 Fix order 2021-04-02 01:58:59 +02:00
Tim Vaillancourt
094d11d722 Use a single line 2021-04-02 01:58:06 +02:00
Tim Vaillancourt
2fb524f43a Adds 'hosts' command to server 2021-04-02 01:50:11 +02:00
Guinevere Saenger
08085b7d08
Merge pull request #941 from guineveresaenger/socket-file-doc
Adds doc entry for --serve-socket-file flag
2021-03-15 11:24:05 -07:00
guineveresaenger
577528a3bd Adds doc entry for --serve-socket-file flag 2021-03-12 12:07:13 -08:00
Rashiq
d3bf3cde4d
Merge branch 'master' into cathal/safer_cut_over 2021-03-08 20:59:14 +01:00
Rashiq
d4c91e6a2c
Merge pull request #915 from cenkore/iss909
fix: issue 909
2021-03-08 20:25:09 +01:00
Rashiq
db513872c1
Merge branch 'master' into iss909 2021-03-08 18:20:03 +01:00
thsun
40bc5aedf2 enhance processing efficiency 2021-02-26 14:23:24 +08:00
thsun
960d801276 canonical code 2021-02-24 17:18:21 +08:00
Tim Vaillancourt
62ce678eee
Merge pull request #515 from akshaychhajed/improve-conn-log
Improved connection type logging
2021-02-08 14:24:58 +01:00
Tim Vaillancourt
048d5838db
Fix whitespace after merge-conflict fix 2021-02-08 13:37:39 +01:00
Tim Vaillancourt
72ccd0b0d0
Fix whitespace after merge-conflict fix 2021-02-08 13:36:59 +01:00
Tim Vaillancourt
edbc8d6733
Merge branch 'master' into improve-conn-log 2021-02-08 13:35:30 +01:00
Cathal Coffey
3135a25c1f HeartbeatLag must be < than --max-lag-millis and --cut-over-lock-timeout-seconds to continue 2021-02-07 14:31:40 +00:00
Cathal Coffey
503b7b0d6c Consolidate the two sleepWhileTrue loops 2021-02-07 13:52:59 +00:00
Cathal Coffey
d5c2414893 Move 'heartbeat is too high' back to Debug logs again 2021-02-05 10:12:54 +00:00
Cathal Coffey
48ce0873de Store lastHeartbeatOnChangelogTime instead of CurrentHeartbeatLag 2021-02-03 10:03:45 +00:00
Cathal Coffey
4efd156759 Move 'heartbeat is too high' to Debug logs 2021-02-02 09:17:31 +00:00
Cathal Coffey
8a26c9ebf4 Don't cut-over until it is safe to do so 2021-02-01 10:14:05 +00:00
Cathal Coffey
a4218cd6f4 Progress should print HeartbeatLag 2021-01-31 18:27:34 +00:00
Cathal Coffey
8aee288fd7 Handle onChangelogHeartbeatEvent and update CurrentHeartbeatLag 2021-01-31 18:24:26 +00:00
Cathal Coffey
7207bc146a Make it easier to handle different onChangelogEvents 2021-01-31 18:23:09 +00:00
thsun
2b5d5e0a11 gofmt 2021-01-14 14:51:14 +08:00
thsun
d0f0b95cf8 rm main 2021-01-13 17:21:21 +08:00
thsun
3665d666b9 fix iss909 2021-01-13 16:29:39 +08:00
Tim Vaillancourt
c681c54e8f
Merge pull request #906 from github/golang1.15
Use golang:1.15.x, remove jessie tarball build
2020-12-22 16:30:50 +01:00
Tim Vaillancourt
60294109ca
Update comment 2020-12-17 23:17:34 +01:00
Tim Vaillancourt
dd2568631a Use golang:1.15.x, remove jessie tarball build 2020-12-17 17:26:51 +01:00
Tim Vaillancourt
8ae02ef69c
Merge pull request #905 from github/azure-support
Support Azure Database for MySQL (merge PR)
2020-12-17 13:45:22 +01:00
Tim Vaillancourt
bf408b0d0e
Merge branch 'master' into master 2020-12-16 12:53:59 +01:00
Tim Vaillancourt
e99b915cf5
Merge pull request #640 from github/ipv6
Parsing ipv6 addresses
2020-10-23 12:11:21 +02:00
Tim Vaillancourt
88e59dd2a3
Merge branch 'master' into master 2020-10-22 23:22:32 +02:00
Tim Vaillancourt
8c4cd10d79
Merge branch 'master' into ipv6 2020-10-22 11:39:46 +02:00
Tim Vaillancourt
447a98a6b5
Merge pull request #888 from github/fix_dup_delete_old_table3
use sync.Once to avoid to send drop cutover sentry table to mysql twice (merge PR)
2020-10-21 18:07:56 +02:00
Tim Vaillancourt
0e2d33ad86 Merge in https://github.com/github/gh-ost/pull/755 2020-10-20 16:08:49 +02:00
Tim Vaillancourt
ce03757deb
Merge pull request #879 from github/fix-buster-tarball-build
Fix buster tarball build
2020-08-20 22:08:00 +02:00
Tim Vaillancourt
ee9174550a Fix typo 2020-08-20 20:57:33 +02:00
Tim Vaillancourt
bcaffc11a2 cp jessie package too 2020-08-20 20:49:51 +02:00
Tim Vaillancourt
69ce8e869a Fix/hack build-deploy-tarball to allow buster builds 2020-08-20 20:45:46 +02:00
Tim Vaillancourt
eef81c0f73 Update RELEASE_VERSION to 1.1.0 2020-08-20 16:00:31 +02:00
Tim Vaillancourt
c940a85a28
Merge pull request #835 from ajm188/handle_driver_timeout_error
Add a check to rows.Err after processing all rows
2020-08-20 14:10:47 +02:00
Tim Vaillancourt
8b3a8bef0a
Merge branch 'master' into handle_driver_timeout_error 2020-08-19 23:20:10 +02:00
Tim Vaillancourt
ebb1de5cdf
Merge pull request #562 from github/limitation-encrypted-binlogs
Documenting limitation: encrypted binlogs
2020-08-19 22:47:21 +02:00
Tim Vaillancourt
81a9538d6f
Merge branch 'master' into limitation-encrypted-binlogs 2020-08-19 22:46:40 +02:00
Tim Vaillancourt
f268259f5e
Merge branch 'master' into handle_driver_timeout_error 2020-08-19 22:04:55 +02:00
Tim Vaillancourt
5e953b7e3e
Merge pull request #878 from github/parse-alter-statement
Support a complete ALTER TABLE statement in --alter (merge PR)
2020-08-19 22:04:15 +02:00
Tim Vaillancourt
10724f08c5 Merge branch 'parse-alter-statement' of https://github.com/openark/gh-ost into parse-alter-statement 2020-08-19 21:39:36 +02:00
Tim Vaillancourt
720182f58b
Merge pull request #824 from github/connection-timeout
Support --mysql-timeout flag
2020-08-19 21:38:31 +02:00
Tim Vaillancourt
28bbe67c6d
Merge branch 'master' into connection-timeout 2020-08-19 21:08:22 +02:00
Tim Vaillancourt
835e86e682
Merge pull request #823 from github/update-go-sql-driver-2020-02
Update go-sql-driver to latest
2020-08-19 21:07:58 +02:00
Tim Vaillancourt
2bb40605d6
Merge branch 'master' into update-go-sql-driver-2020-02 2020-08-19 21:06:50 +02:00
Tim Vaillancourt
06cd4a0a46
Merge pull request #877 from github/workflow-upload-artifact
Workflow upload artifact
2020-08-19 21:01:48 +02:00
Tim Vaillancourt
d5eac588ac Merge branch 'workflow-upload-artifact' of https://github.com/openark/gh-ost into workflow-upload-artifact 2020-08-19 20:59:16 +02:00
Tim Vaillancourt
4bfbe90fca
Merge pull request #876 from github/golang1.14
Update to golang:1.14.7, add @timvaillancourt to README.md
2020-08-19 20:54:44 +02:00
Tim Vaillancourt
0ac3a3833d
Merge branch 'master' into golang1.14 2020-08-19 20:27:47 +02:00
Tim Vaillancourt
42bd9187c1 Use 1.14.7 2020-08-19 20:27:11 +02:00
Tim Vaillancourt
25d28855b2 Copy of PR https://github.com/github/gh-ost/pull/861 2020-08-19 20:21:40 +02:00
Tim Vaillancourt
7f0254f30b
Merge branch 'master' into update-go-sql-driver-2020-02 2020-08-19 19:17:50 +02:00
Tim Vaillancourt
f334dbde5e
Merge pull request #810 from yaserazfar/check_unchecked_errors
Adds error checking for an err variable that was left unchecked
2020-08-14 17:03:10 +02:00
Tim Vaillancourt
4bbc8deb77
Fix tabs from merge conflict 2020-08-13 15:50:38 +02:00
Tim Vaillancourt
5b6da5b7f3
Merge branch 'master' into handle_driver_timeout_error 2020-08-13 15:49:34 +02:00
Shlomi Noach
ae4dd1867a extra unit test checks 2020-07-29 15:06:13 +03:00
Shlomi Noach
b54d256725
Merge branch 'master' into workflow-upload-artifact 2020-07-28 11:40:21 +03:00
Shlomi Noach
34d1624dde
Merge pull request #1 from openark/golang1.14
Using golang 1.14
2020-07-28 11:39:37 +03:00
Shlomi Noach
b9d400abd8
Merge branch 'master' into workflow-upload-artifact 2020-07-27 10:28:16 +03:00
Shlomi Noach
d1fcef4c3d
Merge branch 'master' into golang1.14 2020-07-27 10:24:09 +03:00
Shlomi Noach
731df3cd15 comments 2020-07-23 14:04:14 +03:00
Shlomi Noach
88c73c0338 Merge branch 'master' into parse-alter-statement 2020-07-23 14:02:38 +03:00
Shlomi Noach
87595b1780
Merge pull request #6 from github/master
Updates from upstream
2020-07-23 14:01:49 +03:00
Shlomi Noach
c9249f2b71 Updating and using AlterTableOptions 2020-07-23 11:38:05 +03:00
Shlomi Noach
f482356a94
Merge branch 'master' into parse-alter-statement 2020-07-22 12:36:59 +03:00
Shlomi Noach
6c7b4736e1 Support a complete ALTER TABLE statement in --alter 2020-07-22 12:33:02 +03:00
Justin Fudally
c07d08f8b5
Merge pull request #864 from github/logging-interface-contrib
Logging interface contrib
2020-07-21 16:40:50 -05:00
Justin Fudally
b02900ae06
Merge branch 'master' into logging-interface-contrib 2020-07-21 16:01:29 -05:00
Justin Fudally
6b6f9eccf4
Merge pull request #789 from abeyum/master
implement a logging interface
2020-07-21 15:57:38 -05:00
Shlomi Noach
8eb300bdc8 expect 1.14 and above in build scripts; update to readme.md 2020-06-29 09:52:47 +03:00
Shlomi Noach
2b71b73285 Actions/workflows: upload binary artifact 2020-06-28 08:57:19 +03:00
Shlomi Noach
fb4aca1567 checksums 2020-06-28 08:49:30 +03:00
Shlomi Noach
1a8c372947 Using golang 1.14 2020-06-28 08:39:16 +03:00
Shuode Li
57955b968b
Merge branch 'master' into master 2020-05-06 00:15:57 -07:00
Andrew Mason
90ad7a061f Handle the rest of rows.Err cases 2020-04-21 12:50:23 -04:00
Andrew Mason
61de098072 Add a check to rows.Err after processing all rows
Closes #822.

In https://github.com/go-sql-driver/mysql/issues/1075, @acharis notes
that the way the go-sql driver is written, query timeout errors don't
get set in `rows.Err()` until _after_ a call to `rows.Next()` is made.

Because this kind of error means there will be no rows in the result
set, the `for rows.Next()` will never enter the for loop, so we must
check the value of `rows.Err()` after the loop, and surface the error up
appropriately.
2020-03-31 16:25:16 -04:00
Justin Fudally
4dab06e92b
Merge pull request #833 from github/jfudally-throttle-fix
Throttle on HTTP throttle error
2020-03-30 14:38:56 -05:00
Justin Fudally
2178b5947b remove spammy error 2020-03-30 10:05:23 -05:00
Justin Fudally
5816ede7b3 add error message 2020-03-30 09:55:49 -05:00
Justin Fudally
7893f8e1c3 catch error in collectFunc 2020-03-30 09:53:10 -05:00
Justin Fudally
df60fa4204 add error logging 2020-03-25 16:06:17 -05:00
Justin Fudally
57cf5f3c90 add override to ignore http errors 2020-03-25 15:58:32 -05:00
Justin Fudally
46dabd338b go fmt 2020-03-25 15:48:19 -05:00
Justin Fudally
d31f3aac0c Add CLI flag for ignoring http errors 2020-03-25 15:47:56 -05:00
Justin Fudally
ca0b822a3d Add comment 2020-03-25 15:41:23 -05:00
Justin Fudally
64083e4705 Throttle on no metrics/error 2020-03-25 15:40:09 -05:00
Justin Fudally
0b2702bf53 Throttle on no metrics/error 2020-03-25 14:12:00 -05:00
Justin Fudally
d5a7e43266 update gitignore 2020-03-24 14:54:54 -05:00
Shuode Li
1d4f998124
Merge branch 'master' into master 2020-03-23 19:40:42 -07:00
Shlomi Noach
97f2d71616 fix unit tests 2020-02-16 12:52:26 +02:00
Shlomi Noach
9f5edc923e Support --mysql-timeout flag 2020-02-16 12:48:23 +02:00
Shlomi Noach
c9db40739e
Merge branch 'master' into update-go-sql-driver-2020-02 2020-02-16 06:48:06 +02:00
Shlomi Noach
78dda8ae24
Merge pull request #455 from github/test-latin1text
latin1 tests with TEXT columns
2020-02-16 06:47:37 +02:00
Shlomi Noach
eaa1839a9b
Merge branch 'master' into test-latin1text 2020-02-15 09:27:10 +02:00
Shlomi Noach
5ce00ef161
Merge pull request #655 from github/tests-varbinary
Adding binary/varbinary tests
2020-02-15 09:26:26 +02:00
Shlomi Noach
30e2512b39
Merge branch 'master' into test-latin1text 2020-02-15 08:33:25 +02:00
Shlomi Noach
ef5cbed0ec
Merge branch 'master' into tests-varbinary 2020-02-15 08:33:12 +02:00
Shlomi Noach
86d8cf4948
Merge pull request #692 from github/bigint-change-nullable
Testing nullable int
2020-02-15 08:32:55 +02:00
Shlomi Noach
4fd4be1308
Merge branch 'master' into tests-varbinary 2020-02-14 09:03:38 +02:00
Shlomi Noach
3cc41e24a8
Merge branch 'master' into test-latin1text 2020-02-14 09:02:53 +02:00
Shlomi Noach
6894916516
Merge branch 'master' into bigint-change-nullable 2020-02-14 09:00:05 +02:00
Shlomi Noach
a155700b78
Merge branch 'master' into update-go-sql-driver-2020-02 2020-02-13 13:47:00 +02:00
Shlomi Noach
08f171790b Update go-sql-driver to latest 2020-02-13 13:45:39 +02:00
Shlomi Noach
c6eb8db163 Merge pull request #820 from github/dock-build
Adding script/dock: simplifying testing/packaging
2020-02-10 09:04:54 +02:00
Shlomi Noach
85a285a9e4 cleanup 2020-02-10 08:59:44 +02:00
Shlomi Noach
f6ea377a4b Dockerfile.packaging 2020-02-09 16:11:50 +02:00
Shlomi Noach
40953fe82f Adding script/dock: simplifying testing/packaging 2020-02-09 16:11:34 +02:00
Shlomi Noach
8bdf2d2dbd Adding script/dock: simplifying testing/packaging 2020-02-09 16:11:19 +02:00
Shlomi Noach
05b4ac08b2 Release v1.0.49 2020-02-09 11:43:19 +02:00
Shlomi Noach
1f99b64c31
Merge branch 'master' into tar-xz-dbdeployer-upgrade 2020-02-05 11:10:09 +02:00
Shlomi Noach
5bffec3412
Merge pull request #818 from github/replica-tests-8-0
Replication tests: MySQL 8.0
2020-02-05 11:09:06 +02:00
Shlomi Noach
5a2bcfc09b try not specify version 2020-02-05 10:59:39 +02:00
Shlomi Noach
abc30b96c8 typo 2020-02-05 10:52:25 +02:00
Shlomi Noach
4e5a708f30 testing dbdeployer version 2020-02-05 10:49:52 +02:00
Shlomi Noach
c30e5862f7 8.0 is on master now 2020-02-05 10:43:08 +02:00
Shlomi Noach
4f827a271e 8.0 is on master now 2020-02-05 10:42:36 +02:00
Shlomi Noach
b72ebfbbd2 replication tests CI: switch to .tar.xz binaries, upgrade dbdeployer 2020-02-05 10:28:54 +02:00
Shlomi Noach
a6d0d5ca26 not null for unique key test 2020-02-05 10:12:29 +02:00
Shlomi Noach
b0f487ea61 create user 2020-02-05 10:07:14 +02:00
Shlomi Noach
9201b9bc04 fetch and checkout from git path 2020-02-05 09:54:07 +02:00
Shlomi Noach
3528d67550 fetch the correct branch. Oh and removing travis 2020-02-05 09:51:40 +02:00
Shlomi Noach
919647cbbc removing travis 2020-02-05 09:50:34 +02:00
Shlomi Noach
14674274dc Experiment with https://github.com/github/gh-ost-ci-env/pull/8 2020-02-05 09:48:55 +02:00
Shlomi Noach
9af130d931 Replication tests on 8.0 2020-02-05 09:46:35 +02:00
Yaser Azfar
2eb141010b adds error checking for an err variable that was left unchecked 2019-12-27 16:54:43 +13:00
Shuode Li
13b5f48ca5 Remove useless code 2019-12-17 04:03:51 +00:00
Shuode Li
ae762694fb Support Azure Database for MySQL. 2019-12-17 03:47:14 +00:00
Shlomi Noach
a36d6bee94
Merge branch 'logging-interface-contrib' into master 2019-12-11 07:03:37 +02:00
Abeyu M
3ca5f89fa5 add migrationcontext to gomysql_reader 2019-10-08 13:49:15 -04:00
Abeyu M
f4676bf463 implement a logging interface 2019-10-07 11:10:36 -04:00
Shlomi Noach
809908cb1d
Merge branch 'master' into ipv6 2019-01-20 09:02:34 +02:00
Shlomi Noach
dd41d9a4b1 Testing nullable int 2019-01-03 11:18:07 +02:00
Shlomi Noach
48e01ad39b Adding binary/varbinary tests 2018-10-29 10:09:15 +02:00
Shlomi Noach
6284a34823
Merge branch 'master' into ipv6 2018-10-02 08:12:15 +03:00
Shlomi Noach
04c0be6137
Merge branch 'master' into ipv6 2018-09-17 09:35:18 +03:00
Shlomi Noach
959d1af211 support ipv6 without port 2018-09-16 11:52:59 +03:00
Shlomi Noach
a7cfaa4d33 added testing 2018-09-16 11:48:15 +03:00
Shlomi Noach
49b80df27b Parsing ipv6 addresses 2018-09-16 11:44:52 +03:00
Shlomi Noach
48e12814ca Documenting limitation: encrypted binlogs 2018-03-13 07:48:24 +02:00
Akshay Chhajed
ccb7654235 Improved connection type logging 2017-10-29 19:53:32 +05:30
Shlomi Noach
42fa64ec92 Merge branch 'master' into test-latin1text 2017-10-17 10:02:48 +03:00
Shlomi Noach
a954887fd5 latin1 tests with TEXT columns 2017-07-20 17:05:45 +03:00
1065 changed files with 199037 additions and 186834 deletions

View File

@ -5,16 +5,21 @@ on: [pull_request]
jobs:
build:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@master
- uses: actions/checkout@v2
- name: Set up Go 1.12
- name: Set up Go
uses: actions/setup-go@v1
with:
version: 1.12
id: go
go-version: 1.17
- name: Build
run: script/cibuild
- name: Upload gh-ost binary artifact
uses: actions/upload-artifact@v1
with:
name: gh-ost
path: bin/gh-ost

25
.github/workflows/codeql.yml vendored Normal file
View File

@ -0,0 +1,25 @@
name: "CodeQL analysis"
on:
push:
pull_request:
schedule:
- cron: '0 0 * * 0'
jobs:
codeql:
strategy:
fail-fast: false
runs-on: ubuntu-latest # windows-latest and ubuntu-latest are supported. macos-latest is not supported at this time.
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

23
.github/workflows/golangci-lint.yml vendored Normal file
View File

@ -0,0 +1,23 @@
name: golangci-lint
on:
push:
branches:
- master
pull_request:
permissions:
contents: read
# Optional: allow read access to pull request. Use with `only-new-issues` option.
# pull-requests: read
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: 1.17
- uses: actions/checkout@v3
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.46.2

View File

@ -5,16 +5,20 @@ on: [pull_request]
jobs:
build:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
strategy:
matrix:
version: [mysql-5.7.25,mysql-8.0.16,PerconaServer-8.0.21]
steps:
- uses: actions/checkout@master
- uses: actions/checkout@v2
- name: Set up Go 1.12
- name: Set up Go
uses: actions/setup-go@v1
with:
version: 1.12
id: go
go-version: 1.17
- name: migration tests
env:
TEST_MYSQL_VERSION: ${{ matrix.version }}
run: script/cibuild-gh-ost-replica-tests

1
.gitignore vendored
View File

@ -2,3 +2,4 @@
/bin/
/libexec/
/.vendor/
.idea/

30
.golangci.yml Normal file
View File

@ -0,0 +1,30 @@
run:
timeout: 5m
linters:
disable:
- errcheck
enable:
- bodyclose
- containedctx
- contextcheck
- dogsled
- durationcheck
- errname
- errorlint
- execinquery
- gofmt
- ifshort
- misspell
- nilerr
- nilnil
- noctx
- nolintlint
- nosprintfhostport
- prealloc
- rowserrcheck
- sqlclosecheck
- unconvert
- unparam
- unused
- wastedassign
- whitespace

View File

@ -1,33 +0,0 @@
# http://docs.travis-ci.com/user/languages/go/
language: go
go:
- "1.12.x"
os:
- linux
services:
- mysql
env:
- MYSQL_USER=root
- CURRENT_CI_ENV=travis
addons:
apt:
packages:
- git
- numactl
- libaio1
before_install:
- mysql -e 'CREATE DATABASE IF NOT EXISTS test;'
install: true
script:
- script/cibuild
notifications:
email: false

20
Dockerfile.packaging Normal file
View File

@ -0,0 +1,20 @@
FROM golang:1.17
RUN apt-get update
RUN apt-get install -y ruby ruby-dev rubygems build-essential
RUN gem install --no-ri --no-rdoc fpm
ENV GOPATH=/tmp/go
RUN apt-get install -y curl
RUN apt-get install -y rsync
RUN apt-get install -y gcc
RUN apt-get install -y g++
RUN apt-get install -y bash
RUN apt-get install -y git
RUN apt-get install -y tar
RUN apt-get install -y rpm
RUN mkdir -p $GOPATH/src/github.com/github/gh-ost
WORKDIR $GOPATH/src/github.com/github/gh-ost
COPY . .
RUN bash build.sh

View File

@ -1,4 +1,4 @@
FROM golang:1.12.1
FROM golang:1.17
LABEL maintainer="github@github.com"
RUN apt-get update

View File

@ -1,6 +1,6 @@
# gh-ost
[![build status](https://travis-ci.org/github/gh-ost.svg)](https://travis-ci.org/github/gh-ost) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
[![ci](https://github.com/github/gh-ost/actions/workflows/ci.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/ci.yml) [![replica-tests](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml/badge.svg)](https://github.com/github/gh-ost/actions/workflows/replica-tests.yml) [![downloads](https://img.shields.io/github/downloads/github/gh-ost/total.svg)](https://github.com/github/gh-ost/releases) [![release](https://img.shields.io/github/release/github/gh-ost.svg)](https://github.com/github/gh-ost/releases)
#### GitHub's online schema migration for MySQL <img src="doc/images/gh-ost-logo-light-160.png" align="right">
@ -65,6 +65,7 @@ Also see:
- [the fine print](doc/the-fine-print.md)
- [Community questions](https://github.com/github/gh-ost/issues?q=label%3Aquestion)
- [Using `gh-ost` on AWS RDS](doc/rds.md)
- [Using `gh-ost` on Azure Database for MySQL](doc/azure.md)
## What's in a name?
@ -94,7 +95,7 @@ Please see [Coding gh-ost](doc/coding-ghost.md) for a guide to getting started d
[Download latest release here](https://github.com/github/gh-ost/releases/latest)
`gh-ost` is a Go project; it is built with Go `1.12` and above. To build on your own, use either:
`gh-ost` is a Go project; it is built with Go `1.15` and above. To build on your own, use either:
- [script/build](https://github.com/github/gh-ost/blob/master/script/build) - this is the same build script used by CI hence the authoritative; artifact is `./bin/gh-ost` binary.
- [build.sh](https://github.com/github/gh-ost/blob/master/build.sh) for building `tar.gz` artifacts in `/tmp/gh-ost`
@ -109,3 +110,4 @@ Generally speaking, `master` branch is stable, but only [releases](https://githu
- [@shlomi-noach](https://github.com/shlomi-noach)
- [@jessbreckenridge](https://github.com/jessbreckenridge)
- [@gtowey](https://github.com/gtowey)
- [@timvaillancourt](https://github.com/timvaillancourt)

View File

@ -1 +1 @@
1.0.48
1.1.2

View File

@ -18,30 +18,32 @@ function build {
GOOS=$3
GOARCH=$4
if ! go version | egrep -q 'go(1\.1[234])' ; then
echo "go version must be 1.12 or above"
if ! go version | egrep -q 'go1\.(1[5-9]|[2-9][0-9]{1})' ; then
echo "go version must be 1.15 or above"
exit 1
fi
echo "Building ${osname} binary"
echo "Building ${osname}-${GOARCH} binary"
export GOOS
export GOARCH
go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go
if [ $? -ne 0 ]; then
echo "Build failed for ${osname}"
echo "Build failed for ${osname} ${GOARCH}."
exit 1
fi
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target)
(cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${GOARCH}-${timestamp}.tar.gz $target)
if [ "$GOOS" == "linux" ] ; then
# build RPM and deb for Linux, x86-64 only
if [ "$GOOS" == "linux" ] && [ "$GOARCH" == "amd64" ] ; then
echo "Creating Distro full packages"
builddir=$(setuptree)
cp $buildpath/$target $builddir/gh-ost/usr/bin
cd $buildpath
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach <shlomi-noach+gh-ost-deb@github.com>' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" --rpm-os linux .
fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files .
cd -
fi
}
@ -61,11 +63,16 @@ main() {
mkdir -p ${buildpath}
rm -rf ${buildpath:?}/*
build macOS osx darwin amd64
build GNU/Linux linux linux amd64
build GNU/Linux linux linux arm64
build macOS osx darwin amd64
build macOS osx darwin arm64
echo "Binaries found in:"
ls -1 $buildpath/gh-ost-binary*${timestamp}.tar.gz
find $buildpath/gh-ost* -type f -maxdepth 1
echo "Checksums:"
(cd $buildpath && shasum -a256 gh-ost* 2>/dev/null)
}
main "$@"

26
doc/azure.md Normal file
View File

@ -0,0 +1,26 @@
`gh-ost` has been updated to work with Azure Database for MySQL however due to GitHub does not use it, this documentation is community driven so if you find a bug please [open an issue][new_issue]!
# Azure Database for MySQL
## Limitations
- `gh-ost` runs should be setup use [`--assume-rbr`][assume_rbr_docs] and use `binlog_row_image=FULL`.
- Azure Database for MySQL does not use same user name suffix for master and replica, so master host, user and password need to be pointed out.
## Step
1. Change the replica server's `binlog_row_image` from `MINIMAL` to `FULL`. See [guide](https://docs.microsoft.com/en-us/azure/mysql/howto-server-parameters) on Azure document.
2. Use your `gh-ost` always with additional 5 parameter
```{bash}
gh-ost \
--azure \
--assume-master-host=master-server-dns-name \
--master-user="master-user-name" \
--master-password="master-password" \
--assume-rbr \
[-- other paramters you need]
```
[new_issue]: https://github.com/github/gh-ost/issues/new
[assume_rbr_docs]: https://github.com/github/gh-ost/blob/master/doc/command-line-flags.md#assume-rbr
[migrate_test_on_replica_docs]: https://github.com/github/gh-ost/blob/master/doc/cheatsheet.md#c-migratetest-on-replica

View File

@ -5,7 +5,7 @@
Getting started with gh-ost development is simple!
- First obtain the repository with `git clone` or `go get`.
- From inside of the repository run `script/cibuild`
- From inside of the repository run `script/cibuild`.
- This will bootstrap the environment if needed, format the code, build the code, and then run the unit test.
## CI build workflow
@ -14,6 +14,12 @@ Getting started with gh-ost development is simple!
If additional steps are needed, please add them into this workflow so that the workflow remains simple.
## `golang-ci` linter
To enfore best-practices, Pull Requests are automatically linted by [`golang-ci`](https://golangci-lint.run/). The linter config is located at [`.golangci.yml`](https://github.com/github/gh-ost/blob/master/.golangci.yml) and the `golangci-lint` GitHub Action is located at [`.github/workflows/golangci-lint.yml`](https://github.com/github/gh-ost/blob/master/.github/workflows/golangci-lint.yml).
To run the `golang-ci` linters locally _(recommended before push)_, use `script/lint`.
## Notes:
Currently, `script/ensure-go-installed` will install `go` for Mac OS X and Linux. We welcome PR's to add other platforms.

View File

@ -6,6 +6,14 @@ A more in-depth discussion of various `gh-ost` command line flags: implementatio
Add this flag when executing on Aliyun RDS.
### allow-zero-in-date
Allows the user to make schema changes that include a zero date or zero in date (e.g. adding a `datetime default '0000-00-00 00:00:00'` column), even if global `sql_mode` on MySQL has `NO_ZERO_IN_DATE,NO_ZERO_DATE`.
### azure
Add this flag when executing on Azure Database for MySQL.
### allow-master-master
See [`--assume-master-host`](#assume-master-host).
@ -18,7 +26,7 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c
### approve-renamed-columns
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added.
When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise, the new structure may also look like some column was dropped and another was added.
`gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`.
@ -28,7 +36,7 @@ If you think `gh-ost` is mistaken and that there's actually no _rename_ involved
`gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in:
- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters and you prefer that it picks a specific one
- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters, and you prefer that it picks a specific one
- _tungsten replicator_ topologies (together with [`--tungsten`](#tungsten)), where `gh-ost` is unable to crawl and detect the master
### assume-rbr
@ -37,6 +45,25 @@ If you happen to _know_ your servers use RBR (Row Based Replication, i.e. `binlo
Skipping this step means `gh-ost` would not need the `SUPER` privilege in order to operate.
You may want to use this on Amazon RDS.
### attempt-instant-ddl
MySQL 8.0 supports "instant DDL" for some operations. If an alter statement can be completed with instant DDL, only a metadata change is required internally. Instant operations include:
- Adding a column
- Dropping a column
- Dropping an index
- Extending a varchar column
- Adding a virtual generated column
It is not reliable to parse the `ALTER` statement to determine if it is instant or not. This is because the table might be in an older row format, or have some other incompatibility that is difficult to identify.
`--attempt-instant-ddl` is disabled by default, but the risks of enabling it are relatively minor: `gh-ost` may need to acquire a metadata lock at the start of the operation. This is not a problem for most scenarios, but it could be a problem for users that start the DDL during a period with long running transactions.
`gh-ost` will automatically fallback to the normal DDL process if the attempt to use instant DDL is unsuccessful.
### binlogsyncer-max-reconnect-attempts
`--binlogsyncer-max-reconnect-attempts=0`, the maximum number of attempts to re-establish a broken inspector connection for sync binlog. `0` or `negative number` means infinite retry, default `0`
### conf
`--conf=/path/to/my.cnf`: file where credentials are specified. Should be in (or contain) the following format:
@ -57,7 +84,13 @@ Comma delimited status-name=threshold, same format as [`--max-load`](#max-load).
`--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold.
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10 hour migration.
This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10-hour migration.
### critical-load-hibernate-seconds
When `--critical-load-hibernate-seconds` is non-zero (e.g. `--critical-load-hibernate-seconds=300`), `critical-load` does not panic and bail out; instead, `gh-ost` goes into hibernation for the specified duration. It will not read/write anything from/to any server during this time. Execution then continues upon waking from hibernation.
If `critical-load` is met again, `gh-ost` will repeat this cycle, and never panic and bail out.
### critical-load-interval-millis
@ -94,7 +127,7 @@ Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `g
### exact-rowcount
A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can, and often be, a large number. Exactly what that number is?
A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can and often will be, a large number. Exactly what that number is?
`gh-ost` initially estimates the number of rows in your table by issuing an `explain select * from your_table`. This will use statistics on your table and return with a rough estimate. How rough? It might go as low as half or as high as double the actual number of rows in your table. This is the same method as used in [`pt-online-schema-change`](https://www.percona.com/doc/percona-toolkit/2.2/pt-online-schema-change.html).
`gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen:
@ -131,6 +164,10 @@ Add this flag when executing on a 1st generation Google Cloud Platform (GCP).
Default 100. See [`subsecond-lag`](subsecond-lag.md) for details.
### hooks-status-interval
Defaults to 60 seconds. Configures how often the `gh-ost-on-status` hook is called, see [`hooks`](hooks.md) for full details on how to use hooks.
### initially-drop-ghost-table
`gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand.
@ -177,6 +214,9 @@ Optionally involve the process ID, for example: `--replica-server-id=$((10000000
It's on you to choose a number that does not collide with another `gh-ost` or another running replica.
See also: [`concurrent-migrations`](cheatsheet.md#concurrent-migrations) on the cheatsheet.
### serve-socket-file
Defaults to an auto-determined and advertised upon startup file. Defines Unix socket file to serve on.
### skip-foreign-key-checks
By default `gh-ost` verifies no foreign keys exist on the migrated table. On servers with large number of tables this check can take a long time. If you're absolutely certain no foreign keys exist (table does not reference other table nor is referenced by other tables) and wish to save the check time, provide with `--skip-foreign-key-checks`.
@ -209,6 +249,18 @@ Allows `gh-ost` to connect to the MySQL servers using encrypted connections, but
`--ssl-key=/path/to/ssl-key.key`: SSL private key file (in PEM format).
### storage-engine
Default is `innodb`, and `rocksdb` support is currently experimental. InnoDB and RocksDB are both transactional engines, supporting both shared and exclusive row locks.
But RocksDB currently lacks a few features support compared to InnoDB:
- Gap Locks
- Foreign Key
- Generated Columns
- Spatial
- Geometry
When `--storage-engine=rocksdb`, `gh-ost` will make some changes necessary (e.g. sets isolation level to `READ_COMMITTED`) to support RocksDB.
### test-on-replica
Issue the migration on a replica; do not modify data on master. Useful for validating, testing and benchmarking. See [`testing-on-replica`](testing-on-replica.md)
@ -223,7 +275,15 @@ Provide a command delimited list of replicas; `gh-ost` will throttle when any of
### throttle-http
Provide a HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check.
Provide an HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check.
### throttle-http-interval-millis
Defaults to 100. Configures the HTTP throttle check interval in milliseconds.
### throttle-http-timeout-millis
Defaults to 1000 (1 second). Configures the HTTP throttler check timeout in milliseconds.
### timestamp-old-table

View File

@ -66,7 +66,9 @@ The following variables are available on all hooks:
- `GH_OST_ESTIMATED_ROWS` - estimated total rows in table
- `GH_OST_COPIED_ROWS` - number of rows copied by `gh-ost`
- `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server
- `GH_OST_HEARTBEAT_LAG` - lag in seconds (floating point) of heartbeat
- `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration
- `GH_OST_ETA_SECONDS` - estimated duration until migration finishes in seconds
- `GH_OST_MIGRATED_HOST`
- `GH_OST_INSPECTED_HOST`
- `GH_OST_EXECUTING_HOST`

View File

@ -18,6 +18,8 @@ Both interfaces may serve at the same time. Both respond to simple text command,
- `status`: returns a detailed status summary of migration progress and configuration
- `sup`: returns a brief status summary of migration progress
- `coordinates`: returns recent (though not exactly up to date) binary log coordinates of the inspected server
- `applier`: returns the hostname of the applier
- `inspector`: returns the hostname of the inspector
- `chunk-size=<newsize>`: modify the `chunk-size`; applies on next running copy-iteration
- `dml-batch-size=<newsize>`: modify the `dml-batch-size`; applies on next applying of binary log events
- `max-lag-millis=<max-lag>`: modify the maximum replication lag threshold (milliseconds, minimum value is `100`, i.e. `0.1` second)

View File

@ -2,6 +2,8 @@
### Requirements
- `gh-ost` currently requires MySQL versions 5.7 and greater.
- You will need to have one server serving Row Based Replication (RBR) format binary logs. Right now `FULL` row image is supported. `MINIMAL` to be supported in the near future. `gh-ost` prefers to work with replicas. You may [still have your master configured with Statement Based Replication](migrating-with-sbr.md) (SBR).
- If you are using a replica, the table must have an identical schema between the master and replica.
@ -18,6 +20,8 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- Switching your `binlog_format` to `ROW`, in the case where it is _not_ `ROW` and you explicitly specified `--switch-to-rbr`
- If your replication is already in RBR (`binlog_format=ROW`) you can specify `--assume-rbr` to avoid the `STOP SLAVE/START SLAVE` operations, hence no need for `SUPER`.
- `gh-ost` uses the `REPEATABLE_READ` transaction isolation level for all MySQL connections, regardless of the server default.
- Running `--test-on-replica`: before the cut-over phase, `gh-ost` stops replication so that you can compare the two tables and satisfy that the migration is sound.
### Limitations
@ -41,6 +45,7 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- Amazon RDS works, but has its own [limitations](rds.md).
- Google Cloud SQL works, `--gcp` flag required.
- Aliyun RDS works, `--aliyun-rds` flag required.
- Azure Database for MySQL works, `--azure` flag required, and have detailed document about it. (azure.md)
- Multisource is not supported when migrating via replica. It _should_ work (but never tested) when connecting directly to master (`--allow-on-master`)
@ -50,4 +55,5 @@ The `SUPER` privilege is required for `STOP SLAVE`, `START SLAVE` operations. Th
- Migrating a `FEDERATED` table is unsupported and is irrelevant to the problem `gh-ost` tackles.
- [Encrypted binary logs](https://www.percona.com/blog/2018/03/08/binlog-encryption-percona-server-mysql/) are not supported.
- `ALTER TABLE ... RENAME TO some_other_name` is not supported (and you shouldn't use `gh-ost` for such a trivial operation).

View File

@ -29,7 +29,7 @@ CREATE TABLE tbl (
(This is also the definition of the _ghost_ table, except that that table would be called `_tbl_gho`).
In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tble` onto `_tbl_gho`.
In this migration, the _before_ and _after_ versions contain the same unique not-null key (the PRIMARY KEY). To run this migration, `gh-ost` would iterate through the `tbl` table using the primary key, copy rows from `tbl` to the _ghost_ table `_tbl_gho` in primary key order, while also applying the binlog event writes from `tbl` onto `_tbl_gho`.
The applying of the binlog events is what requires the shared unique key. For example, an `UPDATE` statement to `tbl` translates to a `REPLACE` statement which `gh-ost` applies to `_tbl_gho`. A `REPLACE` statement expects to insert or replace an existing row based on its row's values and the table's unique key constraints. In particular, if inserting that row would result in a unique key violation (e.g., a row with that primary key already exists), it would _replace_ that existing row with the new values.

View File

@ -38,7 +38,7 @@ Note that you may dynamically change both `--max-lag-millis` and the `throttle-c
`--max-load='Threads_running=100,Threads_connected=500'`
Metrics must be valid, numeric [status variables](http://dev.mysql.com/doc/refman/5.6/en/server-status-variables.html)
Metrics must be valid, numeric [status variables](https://dev.mysql.com/doc/refman/5.7/en/server-status-variables.html)
#### Throttle query
@ -97,7 +97,7 @@ Copy: 0/2915 0.0%; Applied: 0; Backlog: 0/100; Elapsed: 42s(copy), 42s(total); s
Throttling time is limited by the availability of the binary logs. When throttling begins, `gh-ost` suspends reading the binary logs, and expects to resume reading from same binary log where it paused.
Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
Your availability of binary logs is typically determined by the [expire_logs_days](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_expire_logs_days) variable. If you have `expire_logs_days = 10` (or check `select @@global.expire_logs_days`), then you should be able to throttle for up to `10` days.
Having said that, throttling for so long is far fetching, in that the `gh-ost` process itself must be kept alive during that time; and the amount of binary logs to process once it resumes will potentially take days to replay.

View File

@ -112,7 +112,7 @@ It is also interesting to observe that `gh-ost` is the only application writing
When `gh-ost` pauses (throttles), it issues no writes on the ghost table. Because there are no triggers, write workload is decoupled from the `gh-ost` write workload. And because we're using an asynchronous approach, the algorithm already handles a time difference between a master write time and the ghost apply time. A difference of a few microseconds is no different from a difference of minutes or hours.
When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or and explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or an explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table.
An exception is the ongoing heartbeat writes onto the changelog table, which we consider to be negligible.
#### Testability

View File

@ -7,7 +7,7 @@ Existing MySQL schema migration tools:
- [LHM](https://github.com/soundcloud/lhm)
- [oak-online-alter-table](https://github.com/shlomi-noach/openarkkit)
are all using [triggers](http://dev.mysql.com/doc/refman/5.6/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
are all using [triggers](https://dev.mysql.com/doc/refman/5.7/en/triggers.html) to propagate live changes on your table onto a ghost/shadow table that is slowly being synchronized. The tools not all work the same: while most use a synchronous approach (all changes applied on the ghost table), the Facebook tool uses an asynchronous approach (changes are appended to a changelog table, later reviewed and applied on ghost table).
Use of triggers simplifies a lot of the flow in doing a live table migration, but also poses some limitations or difficulties. Here are reasons why we choose to [design a triggerless solution](triggerless-design.md) to schema migrations.

27
go.mod Normal file
View File

@ -0,0 +1,27 @@
module github.com/github/gh-ost
go 1.17
require (
github.com/go-ini/ini v1.62.0
github.com/go-mysql-org/go-mysql v1.3.0
github.com/go-sql-driver/mysql v1.6.0
github.com/openark/golib v0.0.0-20210531070646-355f37940af8
github.com/satori/go.uuid v1.2.0
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
golang.org/x/text v0.3.6
)
require (
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 // indirect
github.com/smartystreets/goconvey v1.6.4 // indirect
go.uber.org/atomic v1.7.0 // indirect
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/ini.v1 v1.62.0 // indirect
)

136
go.sum Normal file
View File

@ -0,0 +1,136 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM=
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-ini/ini v1.62.0 h1:7VJT/ZXjzqSrvtraFp4ONq80hTcRQth1c9ZnQ3uNQvU=
github.com/go-ini/ini v1.62.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-mysql-org/go-mysql v1.3.0 h1:lpNqkwdPzIrYSZGdqt8HIgAXZaK6VxBNfr8f7Z4FgGg=
github.com/go-mysql-org/go-mysql v1.3.0/go.mod h1:3lFZKf7l95Qo70+3XB2WpiSf9wu2s3na3geLMaIIrqQ=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/openark/golib v0.0.0-20210531070646-355f37940af8 h1:9ciIHNuyFqRWi9NpMNw9sVLB6z1ItpP5ZhTY9Q1xVu4=
github.com/openark/golib v0.0.0-20210531070646-355f37940af8/go.mod h1:1jj8x1eDVZxgc/Z4VyamX4qTbAdHPUQA6NeVtCd8Sl8=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/errors v0.11.5-0.20201029093017-5a7df2af2ac7/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 h1:LllgC9eGfqzkfubMgjKIDyZYaa609nNWAyNZtpy2B3M=
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/parser v0.0.0-20210415081931-48e7f467fd74/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 h1:oI+RNwuC9jF2g2lP0u0cVEEZrc/AYBCuFdvwrLWM/6Q=
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07/go.mod h1:yFdBgwXP24JziuRl2NMUahT7nGLNOKi1SIiFxMttVD4=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -15,13 +15,13 @@ import (
"sync/atomic"
"time"
"github.com/satori/go.uuid"
uuid "github.com/satori/go.uuid"
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/openark/golib/log"
"gopkg.in/gcfg.v1"
gcfgscanner "gopkg.in/gcfg.v1/scanner"
"github.com/go-ini/ini"
)
// RowsEstimateMethod is the type of row number estimation
@ -51,6 +51,7 @@ const (
const (
HTTPStatusOK = 200
MaxEventsBatchSize = 1000
ETAUnknown = math.MinInt64
)
var (
@ -76,10 +77,13 @@ func NewThrottleCheckResult(throttle bool, reason string, reasonHint ThrottleRea
type MigrationContext struct {
Uuid string
DatabaseName string
OriginalTableName string
AlterStatement string
DatabaseName string
OriginalTableName string
AlterStatement string
AlterStatementOptions string // anything following the 'ALTER TABLE [schema.]table' from AlterStatement
countMutex sync.Mutex
countTableRowsCancelFunc func()
CountTableRows bool
ConcurrentCountTableRows bool
AllowedRunningOnMaster bool
@ -88,6 +92,7 @@ type MigrationContext struct {
AssumeRBR bool
SkipForeignKeyChecks bool
SkipStrictMode bool
AllowZeroInDate bool
NullableUniqueKeyAllowed bool
ApproveRenamedColumns bool
SkipRenamedColumns bool
@ -95,6 +100,8 @@ type MigrationContext struct {
DiscardForeignKeys bool
AliyunRDS bool
GoogleCloudPlatform bool
AzureMySQL bool
AttemptInstantDDL bool
config ContextConfig
configMutex *sync.Mutex
@ -119,6 +126,7 @@ type MigrationContext struct {
ThrottleAdditionalFlagFile string
throttleQuery string
throttleHTTP string
IgnoreHTTPErrors bool
ThrottleCommandedByUser int64
HibernateUntil int64
maxLoad LoadMap
@ -136,6 +144,7 @@ type MigrationContext struct {
HooksHintMessage string
HooksHintOwner string
HooksHintToken string
HooksStatusIntervalSec int64
DropServeSocket bool
ServeSocketFile string
@ -174,9 +183,14 @@ type MigrationContext struct {
RenameTablesEndTime time.Time
pointOfInterestTime time.Time
pointOfInterestTimeMutex *sync.Mutex
lastHeartbeatOnChangelogTime time.Time
lastHeartbeatOnChangelogMutex *sync.Mutex
CurrentLag int64
currentProgress uint64
etaNanoseonds int64
ThrottleHTTPIntervalMillis int64
ThrottleHTTPStatusCode int64
ThrottleHTTPTimeoutMillis int64
controlReplicasLagResult mysql.ReplicationLagResult
TotalRowsCopied int64
TotalDMLEventsApplied int64
@ -200,6 +214,7 @@ type MigrationContext struct {
OriginalTableColumns *sql.ColumnList
OriginalTableVirtualColumns *sql.ColumnList
OriginalTableUniqueKeys [](*sql.UniqueKey)
OriginalTableAutoIncrement uint64
GhostTableColumns *sql.ColumnList
GhostTableVirtualColumns *sql.ColumnList
GhostTableUniqueKeys [](*sql.UniqueKey)
@ -216,6 +231,27 @@ type MigrationContext struct {
ForceTmpTableName string
recentBinlogCoordinates mysql.BinlogCoordinates
BinlogSyncerMaxReconnectAttempts int
Log Logger
}
type Logger interface {
Debug(args ...interface{})
Debugf(format string, args ...interface{})
Info(args ...interface{})
Infof(format string, args ...interface{})
Warning(args ...interface{}) error
Warningf(format string, args ...interface{}) error
Error(args ...interface{}) error
Errorf(format string, args ...interface{}) error
Errore(err error) error
Fatal(args ...interface{}) error
Fatalf(format string, args ...interface{}) error
Fatale(err error) error
SetLevel(level log.LogLevel)
SetPrintStackTrace(printStackTraceFlag bool)
}
type ContextConfig struct {
@ -241,6 +277,7 @@ func NewMigrationContext() *MigrationContext {
MaxLagMillisecondsThrottleThreshold: 1500,
CutOverLockTimeoutSeconds: 3,
DMLBatchSize: 10,
etaNanoseonds: ETAUnknown,
maxLoad: NewLoadMap(),
criticalLoad: NewLoadMap(),
throttleMutex: &sync.Mutex{},
@ -248,11 +285,26 @@ func NewMigrationContext() *MigrationContext {
throttleControlReplicaKeys: mysql.NewInstanceKeyMap(),
configMutex: &sync.Mutex{},
pointOfInterestTimeMutex: &sync.Mutex{},
lastHeartbeatOnChangelogMutex: &sync.Mutex{},
ColumnRenameMap: make(map[string]string),
PanicAbort: make(chan error),
Log: NewDefaultLogger(),
}
}
func (this *MigrationContext) SetConnectionConfig(storageEngine string) error {
var transactionIsolation string
switch storageEngine {
case "rocksdb":
transactionIsolation = "READ-COMMITTED"
default:
transactionIsolation = "REPEATABLE-READ"
}
this.InspectorConnectionConfig.TransactionIsolation = transactionIsolation
this.ApplierConnectionConfig.TransactionIsolation = transactionIsolation
return nil
}
func getSafeTableName(baseName string, suffix string) string {
name := fmt.Sprintf("_%s_%s", baseName, suffix)
if len(name) <= mysql.MaxTableNameLength {
@ -391,10 +443,44 @@ func (this *MigrationContext) IsTransactionalTable() bool {
{
return true
}
case "rocksdb":
{
return true
}
}
return false
}
// SetCountTableRowsCancelFunc sets the cancel function for the CountTableRows query context
func (this *MigrationContext) SetCountTableRowsCancelFunc(f func()) {
this.countMutex.Lock()
defer this.countMutex.Unlock()
this.countTableRowsCancelFunc = f
}
// IsCountingTableRows returns true if the migration has a table count query running
func (this *MigrationContext) IsCountingTableRows() bool {
this.countMutex.Lock()
defer this.countMutex.Unlock()
return this.countTableRowsCancelFunc != nil
}
// CancelTableRowsCount cancels the CountTableRows query context. It is safe to
// call function even when IsCountingTableRows is false.
func (this *MigrationContext) CancelTableRowsCount() {
this.countMutex.Lock()
defer this.countMutex.Unlock()
if this.countTableRowsCancelFunc == nil {
return
}
this.countTableRowsCancelFunc()
this.countTableRowsCancelFunc = nil
}
// ElapsedTime returns time since very beginning of the process
func (this *MigrationContext) ElapsedTime() time.Duration {
return time.Since(this.StartTime)
@ -430,6 +516,10 @@ func (this *MigrationContext) MarkRowCopyEndTime() {
this.RowCopyEndTime = time.Now()
}
func (this *MigrationContext) TimeSinceLastHeartbeatOnChangelog() time.Duration {
return time.Since(this.GetLastHeartbeatOnChangelogTime())
}
func (this *MigrationContext) GetCurrentLagDuration() time.Duration {
return time.Duration(atomic.LoadInt64(&this.CurrentLag))
}
@ -442,6 +532,22 @@ func (this *MigrationContext) SetProgressPct(progressPct float64) {
atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct))
}
func (this *MigrationContext) GetETADuration() time.Duration {
return time.Duration(atomic.LoadInt64(&this.etaNanoseonds))
}
func (this *MigrationContext) SetETADuration(etaDuration time.Duration) {
atomic.StoreInt64(&this.etaNanoseonds, etaDuration.Nanoseconds())
}
func (this *MigrationContext) GetETASeconds() int64 {
nano := atomic.LoadInt64(&this.etaNanoseonds)
if nano < 0 {
return ETAUnknown
}
return nano / int64(time.Second)
}
// math.Float64bits([f=0..100])
// GetTotalRowsCopied returns the accurate number of rows being copied (affected)
@ -469,6 +575,20 @@ func (this *MigrationContext) TimeSincePointOfInterest() time.Duration {
return time.Since(this.pointOfInterestTime)
}
func (this *MigrationContext) SetLastHeartbeatOnChangelogTime(t time.Time) {
this.lastHeartbeatOnChangelogMutex.Lock()
defer this.lastHeartbeatOnChangelogMutex.Unlock()
this.lastHeartbeatOnChangelogTime = t
}
func (this *MigrationContext) GetLastHeartbeatOnChangelogTime() time.Time {
this.lastHeartbeatOnChangelogMutex.Lock()
defer this.lastHeartbeatOnChangelogMutex.Unlock()
return this.lastHeartbeatOnChangelogTime
}
func (this *MigrationContext) SetHeartbeatIntervalMilliseconds(heartbeatIntervalMilliseconds int64) {
if heartbeatIntervalMilliseconds < 100 {
heartbeatIntervalMilliseconds = 100
@ -487,8 +607,8 @@ func (this *MigrationContext) SetMaxLagMillisecondsThrottleThreshold(maxLagMilli
}
func (this *MigrationContext) SetChunkSize(chunkSize int64) {
if chunkSize < 100 {
chunkSize = 100
if chunkSize < 10 {
chunkSize = 10
}
if chunkSize > 100000 {
chunkSize = 100000
@ -574,6 +694,13 @@ func (this *MigrationContext) SetThrottleHTTP(throttleHTTP string) {
this.throttleHTTP = throttleHTTP
}
func (this *MigrationContext) SetIgnoreHTTPErrors(ignoreHTTPErrors bool) {
this.throttleHTTPMutex.Lock()
defer this.throttleHTTPMutex.Unlock()
this.IgnoreHTTPErrors = ignoreHTTPErrors
}
func (this *MigrationContext) GetMaxLoad() LoadMap {
this.throttleMutex.Lock()
defer this.throttleMutex.Unlock()
@ -735,10 +862,39 @@ func (this *MigrationContext) ReadConfigFile() error {
if this.ConfigFile == "" {
return nil
}
gcfg.RelaxedParserMode = true
gcfgscanner.RelaxedScannerMode = true
if err := gcfg.ReadFileInto(&this.config, this.ConfigFile); err != nil {
return fmt.Errorf("Error reading config file %s. Details: %s", this.ConfigFile, err.Error())
cfg, err := ini.Load(this.ConfigFile)
if err != nil {
return err
}
if cfg.Section("client").HasKey("user") {
this.config.Client.User = cfg.Section("client").Key("user").String()
}
if cfg.Section("client").HasKey("password") {
this.config.Client.Password = cfg.Section("client").Key("password").String()
}
if cfg.Section("osc").HasKey("chunk_size") {
this.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64()
if err != nil {
return fmt.Errorf("Unable to read osc chunk size: %w", err)
}
}
if cfg.Section("osc").HasKey("max_load") {
this.config.Osc.Max_Load = cfg.Section("osc").Key("max_load").String()
}
if cfg.Section("osc").HasKey("replication_lag_query") {
this.config.Osc.Replication_Lag_Query = cfg.Section("osc").Key("replication_lag_query").String()
}
if cfg.Section("osc").HasKey("max_lag_millis") {
this.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64()
if err != nil {
return fmt.Errorf("Unable to read max lag millis: %w", err)
}
}
// We accept user & password in the form "${SOME_ENV_VARIABLE}" in which case we pull
@ -752,20 +908,3 @@ func (this *MigrationContext) ReadConfigFile() error {
return nil
}
func (this *MigrationContext) PanicAbortIfTableError(err error) {
if err == nil {
return
}
if strings.Contains(err.Error(), mysql.Error1146TableDoesntExist) || strings.Contains(err.Error(), mysql.Error1017CantFindFile) {
this.PanicAbortOnError(err)
}
// otherwise irrelevant error and we do not panic
}
func (this *MigrationContext) PanicAbortOnError(err error) {
if err == nil {
return
}
this.PanicAbort <- err
}

View File

@ -1,16 +1,18 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package base
import (
"io/ioutil"
"os"
"testing"
"time"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {
@ -56,3 +58,65 @@ func TestGetTableNames(t *testing.T) {
test.S(t).ExpectEquals(context.GetChangelogTableName(), "_tmp_ghc")
}
}
func TestReadConfigFile(t *testing.T) {
{
context := NewMigrationContext()
context.ConfigFile = "/does/not/exist"
if err := context.ReadConfigFile(); err == nil {
t.Fatal("Expected .ReadConfigFile() to return an error, got nil")
}
}
{
f, err := ioutil.TempFile("", t.Name())
if err != nil {
t.Fatalf("Failed to create tmp file: %v", err)
}
defer os.Remove(f.Name())
f.Write([]byte("[client]"))
context := NewMigrationContext()
context.ConfigFile = f.Name()
if err := context.ReadConfigFile(); err != nil {
t.Fatalf(".ReadConfigFile() failed: %v", err)
}
}
{
f, err := ioutil.TempFile("", t.Name())
if err != nil {
t.Fatalf("Failed to create tmp file: %v", err)
}
defer os.Remove(f.Name())
f.Write([]byte("[client]\nuser=test\npassword=123456"))
context := NewMigrationContext()
context.ConfigFile = f.Name()
if err := context.ReadConfigFile(); err != nil {
t.Fatalf(".ReadConfigFile() failed: %v", err)
}
if context.config.Client.User != "test" {
t.Fatalf("Expected client user %q, got %q", "test", context.config.Client.User)
} else if context.config.Client.Password != "123456" {
t.Fatalf("Expected client password %q, got %q", "123456", context.config.Client.Password)
}
}
{
f, err := ioutil.TempFile("", t.Name())
if err != nil {
t.Fatalf("Failed to create tmp file: %v", err)
}
defer os.Remove(f.Name())
f.Write([]byte("[osc]\nmax_load=10"))
context := NewMigrationContext()
context.ConfigFile = f.Name()
if err := context.ReadConfigFile(); err != nil {
t.Fatalf(".ReadConfigFile() failed: %v", err)
}
if context.config.Osc.Max_Load != "10" {
t.Fatalf("Expected osc 'max_load' %q, got %q", "10", context.config.Osc.Max_Load)
}
}
}

72
go/base/default_logger.go Normal file
View File

@ -0,0 +1,72 @@
/*
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package base
import (
"github.com/openark/golib/log"
)
type simpleLogger struct{}
func NewDefaultLogger() *simpleLogger {
return &simpleLogger{}
}
func (*simpleLogger) Debug(args ...interface{}) {
log.Debug(args[0].(string), args[1:])
}
func (*simpleLogger) Debugf(format string, args ...interface{}) {
log.Debugf(format, args...)
}
func (*simpleLogger) Info(args ...interface{}) {
log.Info(args[0].(string), args[1:])
}
func (*simpleLogger) Infof(format string, args ...interface{}) {
log.Infof(format, args...)
}
func (*simpleLogger) Warning(args ...interface{}) error {
return log.Warning(args[0].(string), args[1:])
}
func (*simpleLogger) Warningf(format string, args ...interface{}) error {
return log.Warningf(format, args...)
}
func (*simpleLogger) Error(args ...interface{}) error {
return log.Error(args[0].(string), args[1:])
}
func (*simpleLogger) Errorf(format string, args ...interface{}) error {
return log.Errorf(format, args...)
}
func (*simpleLogger) Errore(err error) error {
return log.Errore(err)
}
func (*simpleLogger) Fatal(args ...interface{}) error {
return log.Fatal(args[0].(string), args[1:])
}
func (*simpleLogger) Fatalf(format string, args ...interface{}) error {
return log.Fatalf(format, args...)
}
func (*simpleLogger) Fatale(err error) error {
return log.Fatale(err)
}
func (*simpleLogger) SetLevel(level log.LogLevel) {
log.SetLevel(level)
}
func (*simpleLogger) SetPrintStackTrace(printStackTraceFlag bool) {
log.SetPrintStackTrace(printStackTraceFlag)
}

View File

@ -8,8 +8,8 @@ package base
import (
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -13,8 +13,8 @@ import (
"time"
gosql "database/sql"
"github.com/github/gh-ost/go/mysql"
"github.com/outbrain/golib/log"
)
var (
@ -25,9 +25,7 @@ func PrettifyDurationOutput(d time.Duration) string {
if d < time.Second {
return "0s"
}
result := fmt.Sprintf("%s", d)
result = prettifyDurationRegexp.ReplaceAllString(result, "")
return result
return prettifyDurationRegexp.ReplaceAllString(d.String(), "")
}
func FileExists(fileName string) bool {
@ -63,7 +61,7 @@ func StringContainsAll(s string, substrings ...string) bool {
return nonEmptyStringsFound
}
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext) (string, error) {
func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext *MigrationContext, name string) (string, error) {
versionQuery := `select @@global.version`
var port, extraPort int
var version string
@ -71,12 +69,13 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
return "", err
}
extraPortQuery := `select @@global.extra_port`
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil {
if err := db.QueryRow(extraPortQuery).Scan(&extraPort); err != nil { // nolint:staticcheck
// swallow this error. not all servers support extra_port
}
// AliyunRDS set users port to "NULL", replace it by gh-ost param
// GCP set users port to "NULL", replace it by gh-ost param
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform {
// Azure MySQL set users port to a different value by design, replace it by gh-ost para
if migrationContext.AliyunRDS || migrationContext.GoogleCloudPlatform || migrationContext.AzureMySQL {
port = connectionConfig.Key.Port
} else {
portQuery := `select @@global.port`
@ -86,7 +85,7 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig,
}
if connectionConfig.Key.Port == port || (extraPort > 0 && connectionConfig.Key.Port == extraPort) {
log.Infof("connection validated on %+v", connectionConfig.Key)
migrationContext.Log.Infof("%s connection validated on %+v", name, connectionConfig.Key)
return version, nil
} else if extraPort == 0 {
return "", fmt.Errorf("Unexpected database port reported: %+v", port)

View File

@ -8,8 +8,8 @@ package base
import (
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -13,13 +13,13 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
gomysql "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/replication"
gomysql "github.com/go-mysql-org/go-mysql/mysql"
"github.com/go-mysql-org/go-mysql/replication"
"golang.org/x/net/context"
)
type GoMySQLReader struct {
migrationContext *base.MigrationContext
connectionConfig *mysql.ConnectionConfig
binlogSyncer *replication.BinlogSyncer
binlogStreamer *replication.BinlogStreamer
@ -28,42 +28,40 @@ type GoMySQLReader struct {
LastAppliedRowsEventHint mysql.BinlogCoordinates
}
func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *GoMySQLReader, err error) {
binlogReader = &GoMySQLReader{
connectionConfig: migrationContext.InspectorConnectionConfig,
func NewGoMySQLReader(migrationContext *base.MigrationContext) *GoMySQLReader {
connectionConfig := migrationContext.InspectorConnectionConfig
return &GoMySQLReader{
migrationContext: migrationContext,
connectionConfig: connectionConfig,
currentCoordinates: mysql.BinlogCoordinates{},
currentCoordinatesMutex: &sync.Mutex{},
binlogSyncer: nil,
binlogStreamer: nil,
binlogSyncer: replication.NewBinlogSyncer(replication.BinlogSyncerConfig{
ServerID: uint32(migrationContext.ReplicaServerId),
Flavor: gomysql.MySQLFlavor,
Host: connectionConfig.Key.Hostname,
Port: uint16(connectionConfig.Key.Port),
User: connectionConfig.User,
Password: connectionConfig.Password,
TLSConfig: connectionConfig.TLSConfig(),
UseDecimal: true,
MaxReconnectAttempts: migrationContext.BinlogSyncerMaxReconnectAttempts,
}),
}
serverId := uint32(migrationContext.ReplicaServerId)
binlogSyncerConfig := replication.BinlogSyncerConfig{
ServerID: serverId,
Flavor: "mysql",
Host: binlogReader.connectionConfig.Key.Hostname,
Port: uint16(binlogReader.connectionConfig.Key.Port),
User: binlogReader.connectionConfig.User,
Password: binlogReader.connectionConfig.Password,
TLSConfig: binlogReader.connectionConfig.TLSConfig(),
UseDecimal: true,
}
binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig)
return binlogReader, err
}
// ConnectBinlogStreamer
func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) {
if coordinates.IsEmpty() {
return log.Errorf("Empty coordinates at ConnectBinlogStreamer()")
return this.migrationContext.Log.Errorf("Empty coordinates at ConnectBinlogStreamer()")
}
this.currentCoordinates = coordinates
log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates)
// Start sync with specified binlog file and position
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)})
this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{
Name: this.currentCoordinates.LogFile,
Pos: uint32(this.currentCoordinates.LogPos),
})
return err
}
@ -78,7 +76,7 @@ func (this *GoMySQLReader) GetCurrentBinlogCoordinates() *mysql.BinlogCoordinate
// StreamEvents
func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *replication.RowsEvent, entriesChannel chan<- *BinlogEntry) error {
if this.currentCoordinates.SmallerThanOrEquals(&this.LastAppliedRowsEventHint) {
log.Debugf("Skipping handled query at %+v", this.currentCoordinates)
this.migrationContext.Log.Debugf("Skipping handled query at %+v", this.currentCoordinates)
return nil
}
@ -141,20 +139,22 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
defer this.currentCoordinatesMutex.Unlock()
this.currentCoordinates.LogPos = int64(ev.Header.LogPos)
}()
if rotateEvent, ok := ev.Event.(*replication.RotateEvent); ok {
switch binlogEvent := ev.Event.(type) {
case *replication.RotateEvent:
func() {
this.currentCoordinatesMutex.Lock()
defer this.currentCoordinatesMutex.Unlock()
this.currentCoordinates.LogFile = string(rotateEvent.NextLogName)
this.currentCoordinates.LogFile = string(binlogEvent.NextLogName)
}()
log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), rotateEvent.NextLogName)
} else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok {
if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil {
this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), binlogEvent.NextLogName)
case *replication.RowsEvent:
if err := this.handleRowsEvent(ev, binlogEvent, entriesChannel); err != nil {
return err
}
}
}
log.Debugf("done streaming events")
this.migrationContext.Log.Debugf("done streaming events")
return nil
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -8,16 +8,18 @@ package main
import (
"flag"
"fmt"
"net/url"
"os"
"os/signal"
"syscall"
"github.com/github/gh-ost/go/base"
"github.com/github/gh-ost/go/logic"
"github.com/github/gh-ost/go/sql"
_ "github.com/go-sql-driver/mysql"
"github.com/outbrain/golib/log"
"github.com/openark/golib/log"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/term"
)
var AppVersion string
@ -31,7 +33,7 @@ func acceptSignals(migrationContext *base.MigrationContext) {
for sig := range c {
switch sig {
case syscall.SIGHUP:
log.Infof("Received SIGHUP. Reloading configuration")
migrationContext.Log.Infof("Received SIGHUP. Reloading configuration")
if err := migrationContext.ReadConfigFile(); err != nil {
log.Errore(err)
} else {
@ -48,6 +50,7 @@ func main() {
flag.StringVar(&migrationContext.InspectorConnectionConfig.Key.Hostname, "host", "127.0.0.1", "MySQL hostname (preferably a replica, not the master)")
flag.StringVar(&migrationContext.AssumeMasterHostname, "assume-master-host", "", "(optional) explicitly tell gh-ost the identity of the master. Format: some.host.com[:port] This is useful in master-master setups where you wish to pick an explicit master, or in a tungsten-replicator where gh-ost is unable to determine the master")
flag.IntVar(&migrationContext.InspectorConnectionConfig.Key.Port, "port", 3306, "MySQL port (preferably a replica, not the master)")
flag.Float64Var(&migrationContext.InspectorConnectionConfig.Timeout, "mysql-timeout", 0.0, "Connect, read and write timeout for MySQL")
flag.StringVar(&migrationContext.CliUser, "user", "", "MySQL user")
flag.StringVar(&migrationContext.CliPassword, "password", "", "MySQL password")
flag.StringVar(&migrationContext.CliMasterUser, "master-user", "", "MySQL user on master, if different from that on replica. Requires --assume-master-host")
@ -64,6 +67,9 @@ func main() {
flag.StringVar(&migrationContext.DatabaseName, "database", "", "database name (mandatory)")
flag.StringVar(&migrationContext.OriginalTableName, "table", "", "table name (mandatory)")
flag.StringVar(&migrationContext.AlterStatement, "alter", "", "alter statement (mandatory)")
flag.BoolVar(&migrationContext.AttemptInstantDDL, "attempt-instant-ddl", false, "Attempt to use instant DDL for this migration first")
storageEngine := flag.String("storage-engine", "innodb", "Specify table storage engine (default: 'innodb'). When 'rocksdb': the session transaction isolation level is changed from REPEATABLE_READ to READ_COMMITTED.")
flag.BoolVar(&migrationContext.CountTableRows, "exact-rowcount", false, "actually count table rows as opposed to estimate them (results in more accurate progress estimation)")
flag.BoolVar(&migrationContext.ConcurrentCountTableRows, "concurrent-rowcount", true, "(with --exact-rowcount), when true (default): count rows after row-copy begins, concurrently, and adjust row estimate later on; when false: first count rows, then start row copy")
flag.BoolVar(&migrationContext.AllowedRunningOnMaster, "allow-on-master", false, "allow this migration to run directly on master. Preferably it would run on a replica")
@ -75,8 +81,10 @@ func main() {
flag.BoolVar(&migrationContext.DiscardForeignKeys, "discard-foreign-keys", false, "DANGER! This flag will migrate a table that has foreign keys and will NOT create foreign keys on the ghost table, thus your altered table will have NO foreign keys. This is useful for intentional dropping of foreign keys")
flag.BoolVar(&migrationContext.SkipForeignKeyChecks, "skip-foreign-key-checks", false, "set to 'true' when you know for certain there are no foreign keys on your table, and wish to skip the time it takes for gh-ost to verify that")
flag.BoolVar(&migrationContext.SkipStrictMode, "skip-strict-mode", false, "explicitly tell gh-ost binlog applier not to enforce strict sql mode")
flag.BoolVar(&migrationContext.AllowZeroInDate, "allow-zero-in-date", false, "explicitly tell gh-ost binlog applier to ignore NO_ZERO_IN_DATE,NO_ZERO_DATE in sql_mode")
flag.BoolVar(&migrationContext.AliyunRDS, "aliyun-rds", false, "set to 'true' when you execute on Aliyun RDS.")
flag.BoolVar(&migrationContext.GoogleCloudPlatform, "gcp", false, "set to 'true' when you execute on a 1st generation Google Cloud Platform (GCP).")
flag.BoolVar(&migrationContext.AzureMySQL, "azure", false, "set to 'true' when you execute on Azure Database on MySQL.")
executeFlag := flag.Bool("execute", false, "actually execute the alter & migrate the table. Default is noop: do some tests and exit")
flag.BoolVar(&migrationContext.TestOnReplica, "test-on-replica", false, "Have the migration run on a replica, not on the master. At the end of migration replication is stopped, and tables are swapped and immediately swap-revert. Replication remains stopped and you can compare the two tables for building trust")
@ -95,7 +103,7 @@ func main() {
flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges")
flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').")
exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.")
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)")
chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 10-100,000)")
dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)")
defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking")
cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)")
@ -106,6 +114,9 @@ func main() {
throttleControlReplicas := flag.String("throttle-control-replicas", "", "List of replicas on which to check for lag; comma delimited. Example: myhost1.com:3306,myhost2.com,myhost3.com:3307")
throttleQuery := flag.String("throttle-query", "", "when given, issued (every second) to check if operation should throttle. Expecting to return zero for no-throttle, >0 for throttle. Query is issued on the migrated server. Make sure this query is lightweight")
throttleHTTP := flag.String("throttle-http", "", "when given, gh-ost checks given URL via HEAD request; any response code other than 200 (OK) causes throttling; make sure it has low latency response")
flag.Int64Var(&migrationContext.ThrottleHTTPIntervalMillis, "throttle-http-interval-millis", 100, "Number of milliseconds to wait before triggering another HTTP throttle check")
flag.Int64Var(&migrationContext.ThrottleHTTPTimeoutMillis, "throttle-http-timeout-millis", 1000, "Number of milliseconds to use as an HTTP throttle check timeout")
ignoreHTTPErrors := flag.Bool("ignore-http-errors", false, "ignore HTTP connection errors during throttle check")
heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 100, "how frequently would gh-ost inject a heartbeat value")
flag.StringVar(&migrationContext.ThrottleFlagFile, "throttle-flag-file", "", "operation pauses when this file exists; hint: use a file that is specific to the table being altered")
flag.StringVar(&migrationContext.ThrottleAdditionalFlagFile, "throttle-additional-flag-file", "/tmp/gh-ost.throttle", "operation pauses when this file exists; hint: keep default, use for throttling multiple gh-ost operations")
@ -120,13 +131,15 @@ func main() {
flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience")
flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience")
flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience")
flag.Int64Var(&migrationContext.HooksStatusIntervalSec, "hooks-status-interval", 60, "how many seconds to wait between calling onStatus hook")
flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999")
flag.IntVar(&migrationContext.BinlogSyncerMaxReconnectAttempts, "binlogsyncer-max-reconnect-attempts", 0, "when master node fails, the maximum number of binlog synchronization attempts to reconnect. 0 is unlimited")
maxLoad := flag.String("max-load", "", "Comma delimited status-name=threshold. e.g: 'Threads_running=100,Threads_connected=500'. When status exceeds threshold, app throttles writes")
criticalLoad := flag.String("critical-load", "", "Comma delimited status-name=threshold, same format as --max-load. When status exceeds threshold, app panics and quits")
flag.Int64Var(&migrationContext.CriticalLoadIntervalMilliseconds, "critical-load-interval-millis", 0, "When 0, migration immediately bails out upon meeting critical-load. When non-zero, a second check is done after given interval, and migration only bails out if 2nd check still meets critical load")
flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When nonzero, critical-load does not panic and bail out; instead, gh-ost goes into hibernate for the specified duration. It will not read/write anything to from/to any server")
flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When non-zero, critical-load does not panic and bail out; instead, gh-ost goes into hibernation for the specified duration. It will not read/write anything from/to any server")
quiet := flag.Bool("quiet", false, "quiet")
verbose := flag.Bool("verbose", false, "verbose")
debug := flag.Bool("debug", false, "debug mode (very verbose)")
@ -156,69 +169,92 @@ func main() {
return
}
log.SetLevel(log.ERROR)
migrationContext.Log.SetLevel(log.ERROR)
if *verbose {
log.SetLevel(log.INFO)
migrationContext.Log.SetLevel(log.INFO)
}
if *debug {
log.SetLevel(log.DEBUG)
migrationContext.Log.SetLevel(log.DEBUG)
}
if *stack {
log.SetPrintStackTrace(*stack)
migrationContext.Log.SetPrintStackTrace(*stack)
}
if *quiet {
// Override!!
log.SetLevel(log.ERROR)
migrationContext.Log.SetLevel(log.ERROR)
}
if migrationContext.DatabaseName == "" {
log.Fatalf("--database must be provided and database name must not be empty")
}
if migrationContext.OriginalTableName == "" {
log.Fatalf("--table must be provided and table name must not be empty")
if err := migrationContext.SetConnectionConfig(*storageEngine); err != nil {
migrationContext.Log.Fatale(err)
}
if migrationContext.AlterStatement == "" {
log.Fatalf("--alter must be provided and statement must not be empty")
log.Fatal("--alter must be provided and statement must not be empty")
}
parser := sql.NewParserFromAlterStatement(migrationContext.AlterStatement)
migrationContext.AlterStatementOptions = parser.GetAlterStatementOptions()
if migrationContext.DatabaseName == "" {
if parser.HasExplicitSchema() {
migrationContext.DatabaseName = parser.GetExplicitSchema()
} else {
log.Fatal("--database must be provided and database name must not be empty, or --alter must specify database name")
}
}
if err := flag.Set("database", url.QueryEscape(migrationContext.DatabaseName)); err != nil {
migrationContext.Log.Fatale(err)
}
if migrationContext.OriginalTableName == "" {
if parser.HasExplicitTable() {
migrationContext.OriginalTableName = parser.GetExplicitTable()
} else {
log.Fatal("--table must be provided and table name must not be empty, or --alter must specify table name")
}
}
migrationContext.Noop = !(*executeFlag)
if migrationContext.AllowedRunningOnMaster && migrationContext.TestOnReplica {
log.Fatalf("--allow-on-master and --test-on-replica are mutually exclusive")
migrationContext.Log.Fatal("--allow-on-master and --test-on-replica are mutually exclusive")
}
if migrationContext.AllowedRunningOnMaster && migrationContext.MigrateOnReplica {
log.Fatalf("--allow-on-master and --migrate-on-replica are mutually exclusive")
migrationContext.Log.Fatal("--allow-on-master and --migrate-on-replica are mutually exclusive")
}
if migrationContext.MigrateOnReplica && migrationContext.TestOnReplica {
log.Fatalf("--migrate-on-replica and --test-on-replica are mutually exclusive")
migrationContext.Log.Fatal("--migrate-on-replica and --test-on-replica are mutually exclusive")
}
if migrationContext.SwitchToRowBinlogFormat && migrationContext.AssumeRBR {
log.Fatalf("--switch-to-rbr and --assume-rbr are mutually exclusive")
migrationContext.Log.Fatal("--switch-to-rbr and --assume-rbr are mutually exclusive")
}
if migrationContext.TestOnReplicaSkipReplicaStop {
if !migrationContext.TestOnReplica {
log.Fatalf("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
migrationContext.Log.Fatal("--test-on-replica-skip-replica-stop requires --test-on-replica to be enabled")
}
log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
migrationContext.Log.Warning("--test-on-replica-skip-replica-stop enabled. We will not stop replication before cut-over. Ensure you have a plugin that does this.")
}
if migrationContext.CliMasterUser != "" && migrationContext.AssumeMasterHostname == "" {
log.Fatalf("--master-user requires --assume-master-host")
migrationContext.Log.Fatal("--master-user requires --assume-master-host")
}
if migrationContext.CliMasterPassword != "" && migrationContext.AssumeMasterHostname == "" {
log.Fatalf("--master-password requires --assume-master-host")
migrationContext.Log.Fatal("--master-password requires --assume-master-host")
}
if migrationContext.TLSCACertificate != "" && !migrationContext.UseTLS {
log.Fatalf("--ssl-ca requires --ssl")
migrationContext.Log.Fatal("--ssl-ca requires --ssl")
}
if migrationContext.TLSCertificate != "" && !migrationContext.UseTLS {
log.Fatalf("--ssl-cert requires --ssl")
migrationContext.Log.Fatal("--ssl-cert requires --ssl")
}
if migrationContext.TLSKey != "" && !migrationContext.UseTLS {
log.Fatalf("--ssl-key requires --ssl")
migrationContext.Log.Fatal("--ssl-key requires --ssl")
}
if migrationContext.TLSAllowInsecure && !migrationContext.UseTLS {
log.Fatalf("--ssl-allow-insecure requires --ssl")
migrationContext.Log.Fatal("--ssl-allow-insecure requires --ssl")
}
if *replicationLagQuery != "" {
log.Warningf("--replication-lag-query is deprecated")
migrationContext.Log.Warning("--replication-lag-query is deprecated")
}
if *storageEngine == "rocksdb" {
migrationContext.Log.Warning("RocksDB storage engine support is experimental")
}
switch *cutOver {
@ -227,28 +263,28 @@ func main() {
case "two-step":
migrationContext.CutOverType = base.CutOverTwoStep
default:
log.Fatalf("Unknown cut-over: %s", *cutOver)
migrationContext.Log.Fatalf("Unknown cut-over: %s", *cutOver)
}
if err := migrationContext.ReadConfigFile(); err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
if err := migrationContext.ReadThrottleControlReplicaKeys(*throttleControlReplicas); err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
if err := migrationContext.ReadMaxLoad(*maxLoad); err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
if err := migrationContext.ReadCriticalLoad(*criticalLoad); err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
if migrationContext.ServeSocketFile == "" {
migrationContext.ServeSocketFile = fmt.Sprintf("/tmp/gh-ost.%s.%s.sock", migrationContext.DatabaseName, migrationContext.OriginalTableName)
}
if *askPass {
fmt.Println("Password:")
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
bytePassword, err := term.ReadPassword(syscall.Stdin)
if err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
migrationContext.CliPassword = string(bytePassword)
}
@ -259,26 +295,26 @@ func main() {
migrationContext.SetMaxLagMillisecondsThrottleThreshold(*maxLagMillis)
migrationContext.SetThrottleQuery(*throttleQuery)
migrationContext.SetThrottleHTTP(*throttleHTTP)
migrationContext.SetIgnoreHTTPErrors(*ignoreHTTPErrors)
migrationContext.SetDefaultNumRetries(*defaultRetries)
migrationContext.ApplyCredentials()
if err := migrationContext.SetupTLS(); err != nil {
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
if err := migrationContext.SetCutOverLockTimeoutSeconds(*cutOverLockTimeoutSeconds); err != nil {
log.Errore(err)
migrationContext.Log.Errore(err)
}
if err := migrationContext.SetExponentialBackoffMaxInterval(*exponentialBackoffMaxInterval); err != nil {
log.Errore(err)
migrationContext.Log.Errore(err)
}
log.Infof("starting gh-ost %+v", AppVersion)
acceptSignals(migrationContext)
migrator := logic.NewMigrator(migrationContext)
err := migrator.Migrate()
if err != nil {
migrator := logic.NewMigrator(migrationContext, AppVersion)
if err := migrator.Migrate(); err != nil {
migrator.ExecOnFailureHook()
log.Fatale(err)
migrationContext.Log.Fatale(err)
}
fmt.Fprintf(os.Stdout, "# Done\n")
fmt.Fprintln(os.Stdout, "# Done")
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -8,6 +8,7 @@ package logic
import (
gosql "database/sql"
"fmt"
"strings"
"sync/atomic"
"time"
@ -16,12 +17,13 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
"github.com/openark/golib/log"
"github.com/openark/golib/sqlutils"
)
const (
atomicCutOverMagicHint = "ghost-cut-over-sentry"
GhostChangelogTableComment = "gh-ost changelog"
atomicCutOverMagicHint = "ghost-cut-over-sentry"
)
type dmlBuildResult struct {
@ -46,7 +48,7 @@ func newDmlBuildResultError(err error) *dmlBuildResult {
}
}
// Applier connects and writes the the applier-server, which is the server where migration
// Applier connects and writes the applier-server, which is the server where migration
// happens. This is typically the master, but could be a replica when `--test-on-replica` or
// `--execute-on-replica` are given.
// Applier is the one to actually write row data and apply binlog events onto the ghost table.
@ -57,6 +59,7 @@ type Applier struct {
singletonDB *gosql.DB
migrationContext *base.MigrationContext
finishedMigrating int64
name string
}
func NewApplier(migrationContext *base.MigrationContext) *Applier {
@ -64,11 +67,11 @@ func NewApplier(migrationContext *base.MigrationContext) *Applier {
connectionConfig: migrationContext.ApplierConnectionConfig,
migrationContext: migrationContext,
finishedMigrating: 0,
name: "applier",
}
}
func (this *Applier) InitDBConnections() (err error) {
applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName)
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, applierUri); err != nil {
return err
@ -78,18 +81,18 @@ func (this *Applier) InitDBConnections() (err error) {
return err
}
this.singletonDB.SetMaxOpenConns(1)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
if err != nil {
return err
}
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext); err != nil {
if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext, this.name); err != nil {
return err
}
this.migrationContext.ApplierMySQLVersion = version
if err := this.validateAndReadTimeZone(); err != nil {
return err
}
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
@ -99,7 +102,7 @@ func (this *Applier) InitDBConnections() (err error) {
if err := this.readTableColumns(); err != nil {
return err
}
log.Infof("Applier initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.ApplierMySQLVersion)
this.migrationContext.Log.Infof("Applier initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.ApplierMySQLVersion)
return nil
}
@ -110,13 +113,40 @@ func (this *Applier) validateAndReadTimeZone() error {
return err
}
log.Infof("will use time_zone='%s' on applier", this.migrationContext.ApplierTimeZone)
this.migrationContext.Log.Infof("will use time_zone='%s' on applier", this.migrationContext.ApplierTimeZone)
return nil
}
// generateSqlModeQuery return a `sql_mode = ...` query, to be wrapped with a `set session` or `set global`,
// based on gh-ost configuration:
// - User may skip strict mode
// - User may allow zero dats or zero in dates
func (this *Applier) generateSqlModeQuery() string {
sqlModeAddendum := []string{`NO_AUTO_VALUE_ON_ZERO`}
if !this.migrationContext.SkipStrictMode {
sqlModeAddendum = append(sqlModeAddendum, `STRICT_ALL_TABLES`)
}
sqlModeQuery := fmt.Sprintf("CONCAT(@@session.sql_mode, ',%s')", strings.Join(sqlModeAddendum, ","))
if this.migrationContext.AllowZeroInDate {
sqlModeQuery = fmt.Sprintf("REPLACE(REPLACE(%s, 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')", sqlModeQuery)
}
return fmt.Sprintf("sql_mode = %s", sqlModeQuery)
}
// generateInstantDDLQuery returns the SQL for this ALTER operation
// with an INSTANT assertion (requires MySQL 8.0+)
func (this *Applier) generateInstantDDLQuery() string {
return fmt.Sprintf(`ALTER /* gh-ost */ TABLE %s.%s %s, ALGORITHM=INSTANT`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
this.migrationContext.AlterStatementOptions,
)
}
// readTableColumns reads table columns on applier
func (this *Applier) readTableColumns() (err error) {
log.Infof("Examining table structure on applier")
this.migrationContext.Log.Infof("Examining table structure on applier")
this.migrationContext.OriginalTableColumnsOnApplier, _, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName)
if err != nil {
return err
@ -157,7 +187,7 @@ func (this *Applier) ValidateOrDropExistingTables() error {
}
}
if len(this.migrationContext.GetOldTableName()) > mysql.MaxTableNameLength {
log.Fatalf("--timestamp-old-table defined, but resulting table name (%s) is too long (only %d characters allowed)", this.migrationContext.GetOldTableName(), mysql.MaxTableNameLength)
this.migrationContext.Log.Fatalf("--timestamp-old-table defined, but resulting table name (%s) is too long (only %d characters allowed)", this.migrationContext.GetOldTableName(), mysql.MaxTableNameLength)
}
if this.tableExists(this.migrationContext.GetOldTableName()) {
@ -167,6 +197,27 @@ func (this *Applier) ValidateOrDropExistingTables() error {
return nil
}
// AttemptInstantDDL attempts to use instant DDL (from MySQL 8.0, and earlier in Aurora and some others).
// If successful, the operation is only a meta-data change so a lot of time is saved!
// The risk of attempting to instant DDL when not supported is that a metadata lock may be acquired.
// This is minor, since gh-ost will eventually require a metadata lock anyway, but at the cut-over stage.
// Instant operations include:
// - Adding a column
// - Dropping a column
// - Dropping an index
// - Extending a VARCHAR column
// - Adding a virtual generated column
// It is not reliable to parse the `alter` statement to determine if it is instant or not.
// This is because the table might be in an older row format, or have some other incompatibility
// that is difficult to identify.
func (this *Applier) AttemptInstantDDL() error {
query := this.generateInstantDDLQuery()
this.migrationContext.Log.Infof("INSTANT DDL query is: %s", query)
// We don't need a trx, because for instant DDL the SQL mode doesn't matter.
_, err := this.db.Exec(query)
return err
}
// CreateGhostTable creates the ghost table on the applier host
func (this *Applier) CreateGhostTable() error {
query := fmt.Sprintf(`create /* gh-ost */ table %s.%s like %s.%s`,
@ -175,15 +226,37 @@ func (this *Applier) CreateGhostTable() error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Creating ghost table %s.%s",
this.migrationContext.Log.Infof("Creating ghost table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Ghost table created")
return nil
err := func() error {
tx, err := this.db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return err
}
if _, err := tx.Exec(query); err != nil {
return err
}
this.migrationContext.Log.Infof("Ghost table created")
if err := tx.Commit(); err != nil {
// Neither SET SESSION nor ALTER are really transactional, so strictly speaking
// there's no need to commit; but let's do this the legit way anyway.
return err
}
return nil
}()
return err
}
// AlterGhost applies `alter` statement on ghost table
@ -191,17 +264,58 @@ func (this *Applier) AlterGhost() error {
query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s %s`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
this.migrationContext.AlterStatement,
this.migrationContext.AlterStatementOptions,
)
log.Infof("Altering ghost table %s.%s",
this.migrationContext.Log.Infof("Altering ghost table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
log.Debugf("ALTER statement: %s", query)
this.migrationContext.Log.Debugf("ALTER statement: %s", query)
err := func() error {
tx, err := this.db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return err
}
if _, err := tx.Exec(query); err != nil {
return err
}
this.migrationContext.Log.Infof("Ghost table altered")
if err := tx.Commit(); err != nil {
// Neither SET SESSION nor ALTER are really transactional, so strictly speaking
// there's no need to commit; but let's do this the legit way anyway.
return err
}
return nil
}()
return err
}
// AlterGhost applies `alter` statement on ghost table
func (this *Applier) AlterGhostAutoIncrement() error {
query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s AUTO_INCREMENT=%d`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
this.migrationContext.OriginalTableAutoIncrement,
)
this.migrationContext.Log.Infof("Altering ghost table AUTO_INCREMENT value %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
this.migrationContext.Log.Debugf("AUTO_INCREMENT ALTER statement: %s", query)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Ghost table altered")
this.migrationContext.Log.Infof("Ghost table AUTO_INCREMENT altered")
return nil
}
@ -211,25 +325,25 @@ func (this *Applier) CreateChangelogTable() error {
return err
}
query := fmt.Sprintf(`create /* gh-ost */ table %s.%s (
id bigint auto_increment,
id bigint unsigned auto_increment,
last_update timestamp not null DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
hint varchar(64) charset ascii not null,
value varchar(4096) charset ascii not null,
primary key(id),
unique key hint_uidx(hint)
) auto_increment=256
`,
) auto_increment=256 comment='%s'`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
GhostChangelogTableComment,
)
log.Infof("Creating changelog table %s.%s",
this.migrationContext.Log.Infof("Creating changelog table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Changelog table created")
this.migrationContext.Log.Infof("Changelog table created")
return nil
}
@ -239,14 +353,14 @@ func (this *Applier) dropTable(tableName string) error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(tableName),
)
log.Infof("Dropping table %s.%s",
this.migrationContext.Log.Infof("Dropping table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(tableName),
)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Table dropped")
this.migrationContext.Log.Infof("Table dropped")
return nil
}
@ -290,7 +404,6 @@ func (this *Applier) WriteChangelog(hint, value string) (string, error) {
sql.EscapeName(this.migrationContext.GetChangelogTableName()),
)
_, err := sqlutils.ExecNoPrepare(this.db, query, explicitId, hint, value)
this.migrationContext.PanicAbortIfTableError(err)
return hint, err
}
@ -314,7 +427,7 @@ func (this *Applier) InitiateHeartbeat() {
if _, err := this.WriteChangelog("heartbeat", time.Now().Format(time.RFC3339Nano)); err != nil {
numSuccessiveFailures++
if numSuccessiveFailures > this.migrationContext.MaxRetries() {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
} else {
numSuccessiveFailures = 0
@ -323,8 +436,9 @@ func (this *Applier) InitiateHeartbeat() {
}
injectHeartbeat()
heartbeatTick := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
for range heartbeatTick {
ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -349,62 +463,97 @@ func (this *Applier) ExecuteThrottleQuery() (int64, error) {
}
var result int64
if err := this.db.QueryRow(throttleQuery).Scan(&result); err != nil {
return 0, log.Errore(err)
return 0, this.migrationContext.Log.Errore(err)
}
return result, nil
}
// ReadMigrationMinValues returns the minimum values to be iterated on rowcopy
func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error {
log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
// readMigrationMinValues returns the minimum values to be iterated on rowcopy
func (this *Applier) readMigrationMinValues(tx *gosql.Tx, uniqueKey *sql.UniqueKey) error {
this.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
query, err := sql.BuildUniqueKeyMinValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &uniqueKey.Columns)
if err != nil {
return err
}
rows, err := this.db.Query(query)
rows, err := tx.Query(query)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
this.migrationContext.MigrationRangeMinValues = sql.NewColumnValues(uniqueKey.Len())
if err = rows.Scan(this.migrationContext.MigrationRangeMinValues.ValuesPointers...); err != nil {
return err
}
}
log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues)
return err
this.migrationContext.Log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues)
return rows.Err()
}
// ReadMigrationMaxValues returns the maximum values to be iterated on rowcopy
func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error {
log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
// readMigrationMaxValues returns the maximum values to be iterated on rowcopy
func (this *Applier) readMigrationMaxValues(tx *gosql.Tx, uniqueKey *sql.UniqueKey) error {
this.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name)
query, err := sql.BuildUniqueKeyMaxValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &uniqueKey.Columns)
if err != nil {
return err
}
rows, err := this.db.Query(query)
rows, err := tx.Query(query)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
this.migrationContext.MigrationRangeMaxValues = sql.NewColumnValues(uniqueKey.Len())
if err = rows.Scan(this.migrationContext.MigrationRangeMaxValues.ValuesPointers...); err != nil {
return err
}
}
log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues)
return err
this.migrationContext.Log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues)
return rows.Err()
}
// ReadMigrationRangeValues reads min/max values that will be used for rowcopy
// ReadMigrationRangeValues reads min/max values that will be used for rowcopy.
// Before read min/max, write a changelog state into the ghc table to avoid lost data in mysql two-phase commit.
/*
Detail description of the lost data in mysql two-phase commit issue by @Fanduzi:
When using semi-sync and setting rpl_semi_sync_master_wait_point=AFTER_SYNC,
if an INSERT statement is being committed but blocks due to an unmet ack count,
the data inserted by the transaction is not visible to ReadMigrationRangeValues,
so the copy of the existing data in the table does not include the new row inserted by the transaction.
However, the binlog event for the transaction is already written to the binlog,
so the addDMLEventsListener only captures the binlog event after the transaction,
and thus the transaction's binlog event is not captured, resulting in data loss.
If write a changelog into ghc table before ReadMigrationRangeValues, and the transaction commit blocks
because the ack is not met, then the changelog will not be able to write, so the ReadMigrationRangeValues
will not be run. When the changelog writes successfully, the ReadMigrationRangeValues will read the
newly inserted data, thus Avoiding data loss due to the above problem.
*/
func (this *Applier) ReadMigrationRangeValues() error {
if err := this.ReadMigrationMinValues(this.migrationContext.UniqueKey); err != nil {
if _, err := this.WriteChangelogState(string(ReadMigrationRangeValues)); err != nil {
return err
}
if err := this.ReadMigrationMaxValues(this.migrationContext.UniqueKey); err != nil {
tx, err := this.db.Begin()
if err != nil {
return err
}
return nil
defer tx.Rollback()
if err := this.readMigrationMinValues(tx, this.migrationContext.UniqueKey); err != nil {
return err
}
if err := this.readMigrationMaxValues(tx, this.migrationContext.UniqueKey); err != nil {
return err
}
return tx.Commit()
}
// CalculateNextIterationRangeEndValues reads the next-iteration-range-end unique key values,
@ -434,10 +583,13 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo
if err != nil {
return hasFurtherRange, err
}
rows, err := this.db.Query(query, explodedArgs...)
if err != nil {
return hasFurtherRange, err
}
defer rows.Close()
iterationRangeMaxValues := sql.NewColumnValues(this.migrationContext.UniqueKey.Len())
for rows.Next() {
if err = rows.Scan(iterationRangeMaxValues.ValuesPointers...); err != nil {
@ -445,12 +597,15 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo
}
hasFurtherRange = true
}
if err = rows.Err(); err != nil {
return hasFurtherRange, err
}
if hasFurtherRange {
this.migrationContext.MigrationIterationRangeMaxValues = iterationRangeMaxValues
return hasFurtherRange, nil
}
}
log.Debugf("Iteration complete: no further range to iterate")
this.migrationContext.Log.Debugf("Iteration complete: no further range to iterate")
return hasFurtherRange, nil
}
@ -483,12 +638,9 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
return nil, err
}
defer tx.Rollback()
sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone)
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
if !this.migrationContext.SkipStrictMode {
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
}
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return nil, err
@ -508,7 +660,7 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected
}
rowsAffected, _ = sqlResult.RowsAffected()
duration = time.Since(startTime)
log.Debugf(
this.migrationContext.Log.Debugf(
"Issued INSERT on range: [%s]..[%s]; iteration: %d; chunk-size: %d",
this.migrationContext.MigrationIterationRangeMinValues,
this.migrationContext.MigrationIterationRangeMaxValues,
@ -523,7 +675,7 @@ func (this *Applier) LockOriginalTable() error {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Locking %s.%s",
this.migrationContext.Log.Infof("Locking %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
@ -531,18 +683,18 @@ func (this *Applier) LockOriginalTable() error {
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
return err
}
log.Infof("Table locked")
this.migrationContext.Log.Infof("Table locked")
return nil
}
// UnlockTables makes tea. No wait, it unlocks tables.
func (this *Applier) UnlockTables() error {
query := `unlock /* gh-ost */ tables`
log.Infof("Unlocking tables")
this.migrationContext.Log.Infof("Unlocking tables")
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
return err
}
log.Infof("Tables unlocked")
this.migrationContext.Log.Infof("Tables unlocked")
return nil
}
@ -556,7 +708,7 @@ func (this *Applier) SwapTablesQuickAndBumpy() error {
sql.EscapeName(this.migrationContext.OriginalTableName),
sql.EscapeName(this.migrationContext.GetOldTableName()),
)
log.Infof("Renaming original table")
this.migrationContext.Log.Infof("Renaming original table")
this.migrationContext.RenameTablesStartTime = time.Now()
if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil {
return err
@ -566,13 +718,13 @@ func (this *Applier) SwapTablesQuickAndBumpy() error {
sql.EscapeName(this.migrationContext.GetGhostTableName()),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Renaming ghost table")
this.migrationContext.Log.Infof("Renaming ghost table")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
this.migrationContext.RenameTablesEndTime = time.Now()
log.Infof("Tables renamed")
this.migrationContext.Log.Infof("Tables renamed")
return nil
}
@ -591,7 +743,7 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Renaming back both tables")
this.migrationContext.Log.Infof("Renaming back both tables")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err == nil {
return nil
}
@ -602,7 +754,7 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetGhostTableName()),
)
log.Infof("Renaming back to ghost table")
this.migrationContext.Log.Infof("Renaming back to ghost table")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
renameError = err
}
@ -612,11 +764,11 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Renaming back to original table")
this.migrationContext.Log.Infof("Renaming back to original table")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
renameError = err
}
return log.Errore(renameError)
return this.migrationContext.Log.Errore(renameError)
}
// StopSlaveIOThread is applicable with --test-on-replica; it stops the IO thread, duh.
@ -624,44 +776,44 @@ func (this *Applier) RenameTablesRollback() (renameError error) {
// and have them written to the binary log, so that we can then read them via streamer.
func (this *Applier) StopSlaveIOThread() error {
query := `stop /* gh-ost */ slave io_thread`
log.Infof("Stopping replication IO thread")
this.migrationContext.Log.Infof("Stopping replication IO thread")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Replication IO thread stopped")
this.migrationContext.Log.Infof("Replication IO thread stopped")
return nil
}
// StartSlaveIOThread is applicable with --test-on-replica
func (this *Applier) StartSlaveIOThread() error {
query := `start /* gh-ost */ slave io_thread`
log.Infof("Starting replication IO thread")
this.migrationContext.Log.Infof("Starting replication IO thread")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Replication IO thread started")
this.migrationContext.Log.Infof("Replication IO thread started")
return nil
}
// StartSlaveSQLThread is applicable with --test-on-replica
func (this *Applier) StopSlaveSQLThread() error {
query := `stop /* gh-ost */ slave sql_thread`
log.Infof("Verifying SQL thread is stopped")
this.migrationContext.Log.Infof("Verifying SQL thread is stopped")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("SQL thread stopped")
this.migrationContext.Log.Infof("SQL thread stopped")
return nil
}
// StartSlaveSQLThread is applicable with --test-on-replica
func (this *Applier) StartSlaveSQLThread() error {
query := `start /* gh-ost */ slave sql_thread`
log.Infof("Verifying SQL thread is running")
this.migrationContext.Log.Infof("Verifying SQL thread is running")
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("SQL thread started")
this.migrationContext.Log.Infof("SQL thread started")
return nil
}
@ -678,7 +830,7 @@ func (this *Applier) StopReplication() error {
if err != nil {
return err
}
log.Infof("Replication IO thread at %+v. SQL thread is at %+v", *readBinlogCoordinates, *executeBinlogCoordinates)
this.migrationContext.Log.Infof("Replication IO thread at %+v. SQL thread is at %+v", *readBinlogCoordinates, *executeBinlogCoordinates)
return nil
}
@ -690,7 +842,7 @@ func (this *Applier) StartReplication() error {
if err := this.StartSlaveSQLThread(); err != nil {
return err
}
log.Infof("Replication started")
this.migrationContext.Log.Infof("Replication started")
return nil
}
@ -704,7 +856,7 @@ func (this *Applier) ExpectUsedLock(sessionId int64) error {
var result int64
query := `select is_used_lock(?)`
lockName := this.GetSessionLockName(sessionId)
log.Infof("Checking session lock: %s", lockName)
this.migrationContext.Log.Infof("Checking session lock: %s", lockName)
if err := this.db.QueryRow(query, lockName).Scan(&result); err != nil || result != sessionId {
return fmt.Errorf("Session lock %s expected to be found but wasn't", lockName)
}
@ -739,7 +891,7 @@ func (this *Applier) ExpectProcess(sessionId int64, stateHint, infoHint string)
// DropAtomicCutOverSentryTableIfExists checks if the "old" table name
// happens to be a cut-over magic table; if so, it drops it.
func (this *Applier) DropAtomicCutOverSentryTableIfExists() error {
log.Infof("Looking for magic cut-over table")
this.migrationContext.Log.Infof("Looking for magic cut-over table")
tableName := this.migrationContext.GetOldTableName()
rowMap := this.showTableStatus(tableName)
if rowMap == nil {
@ -749,7 +901,7 @@ func (this *Applier) DropAtomicCutOverSentryTableIfExists() error {
if rowMap["Comment"].String != atomicCutOverMagicHint {
return fmt.Errorf("Expected magic comment on %s, did not find it", tableName)
}
log.Infof("Dropping magic cut-over table")
this.migrationContext.Log.Infof("Dropping magic cut-over table")
return this.dropTable(tableName)
}
@ -769,14 +921,14 @@ func (this *Applier) CreateAtomicCutOverSentryTable() error {
this.migrationContext.TableEngine,
atomicCutOverMagicHint,
)
log.Infof("Creating magic cut-over table %s.%s",
this.migrationContext.Log.Infof("Creating magic cut-over table %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(tableName),
)
if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil {
return err
}
log.Infof("Magic cut-over table created")
this.migrationContext.Log.Infof("Magic cut-over table created")
return nil
}
@ -793,6 +945,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
tableLocked <- fmt.Errorf("Unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads")
tableUnlocked <- fmt.Errorf("Unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads")
tx.Rollback()
this.DropAtomicCutOverSentryTableIfExists()
}()
var sessionId int64
@ -805,7 +958,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
lockResult := 0
query := `select get_lock(?, 0)`
lockName := this.GetSessionLockName(sessionId)
log.Infof("Grabbing voluntary lock: %s", lockName)
this.migrationContext.Log.Infof("Grabbing voluntary lock: %s", lockName)
if err := tx.QueryRow(query, lockName).Scan(&lockResult); err != nil || lockResult != 1 {
err := fmt.Errorf("Unable to acquire lock %s", lockName)
tableLocked <- err
@ -813,7 +966,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
}
tableLockTimeoutSeconds := this.migrationContext.CutOverLockTimeoutSeconds * 2
log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds)
this.migrationContext.Log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds)
query = fmt.Sprintf(`set session lock_wait_timeout:=%d`, tableLockTimeoutSeconds)
if _, err := tx.Exec(query); err != nil {
tableLocked <- err
@ -831,7 +984,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetOldTableName()),
)
log.Infof("Locking %s.%s, %s.%s",
this.migrationContext.Log.Infof("Locking %s.%s, %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
sql.EscapeName(this.migrationContext.DatabaseName),
@ -842,7 +995,7 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
tableLocked <- err
return err
}
log.Infof("Tables locked")
this.migrationContext.Log.Infof("Tables locked")
tableLocked <- nil // No error.
// From this point on, we are committed to UNLOCK TABLES. No matter what happens,
@ -851,22 +1004,23 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
// The cut-over phase will proceed to apply remaining backlog onto ghost table,
// and issue RENAME. We wait here until told to proceed.
<-okToUnlockTable
log.Infof("Will now proceed to drop magic table and unlock tables")
this.migrationContext.Log.Infof("Will now proceed to drop magic table and unlock tables")
// The magic table is here because we locked it. And we are the only ones allowed to drop it.
// And in fact, we will:
log.Infof("Dropping magic cut-over table")
this.migrationContext.Log.Infof("Dropping magic cut-over table")
query = fmt.Sprintf(`drop /* gh-ost */ table if exists %s.%s`,
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.GetOldTableName()),
)
if _, err := tx.Exec(query); err != nil {
log.Errore(err)
this.migrationContext.Log.Errore(err)
// We DO NOT return here because we must `UNLOCK TABLES`!
}
// Tables still locked
log.Infof("Releasing lock from %s.%s, %s.%s",
this.migrationContext.Log.Infof("Releasing lock from %s.%s, %s.%s",
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
sql.EscapeName(this.migrationContext.DatabaseName),
@ -875,9 +1029,9 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke
query = `unlock tables`
if _, err := tx.Exec(query); err != nil {
tableUnlocked <- err
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
log.Infof("Tables unlocked")
this.migrationContext.Log.Infof("Tables unlocked")
tableUnlocked <- nil
return nil
}
@ -899,7 +1053,7 @@ func (this *Applier) AtomicCutoverRename(sessionIdChan chan int64, tablesRenamed
}
sessionIdChan <- sessionId
log.Infof("Setting RENAME timeout as %d seconds", this.migrationContext.CutOverLockTimeoutSeconds)
this.migrationContext.Log.Infof("Setting RENAME timeout as %d seconds", this.migrationContext.CutOverLockTimeoutSeconds)
query := fmt.Sprintf(`set session lock_wait_timeout:=%d`, this.migrationContext.CutOverLockTimeoutSeconds)
if _, err := tx.Exec(query); err != nil {
return err
@ -915,15 +1069,13 @@ func (this *Applier) AtomicCutoverRename(sessionIdChan chan int64, tablesRenamed
sql.EscapeName(this.migrationContext.DatabaseName),
sql.EscapeName(this.migrationContext.OriginalTableName),
)
log.Infof("Issuing and expecting this to block: %s", query)
this.migrationContext.Log.Infof("Issuing and expecting this to block: %s", query)
if _, err := tx.Exec(query); err != nil {
this.migrationContext.PanicAbortIfTableError(err)
tablesRenamed <- err
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
tablesRenamed <- nil
log.Infof("Tables renamed")
this.migrationContext.Log.Infof("Tables renamed")
return nil
}
@ -985,7 +1137,6 @@ func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) (result
// ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table
func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error {
var totalDelta int64
err := func() error {
@ -1000,12 +1151,7 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
}
sessionQuery := "SET SESSION time_zone = '+00:00'"
sqlModeAddendum := `,NO_AUTO_VALUE_ON_ZERO`
if !this.migrationContext.SkipStrictMode {
sqlModeAddendum = fmt.Sprintf("%s,STRICT_ALL_TABLES", sqlModeAddendum)
}
sessionQuery = fmt.Sprintf("%s, sql_mode = CONCAT(@@session.sql_mode, ',%s')", sessionQuery, sqlModeAddendum)
sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery())
if _, err := tx.Exec(sessionQuery); err != nil {
return rollback(err)
@ -1015,11 +1161,20 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
if buildResult.err != nil {
return rollback(buildResult.err)
}
if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil {
err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args)
result, err := tx.Exec(buildResult.query, buildResult.args...)
if err != nil {
err = fmt.Errorf("%w; query=%s; args=%+v", err, buildResult.query, buildResult.args)
return rollback(err)
}
totalDelta += buildResult.rowsDelta
rowsAffected, err := result.RowsAffected()
if err != nil {
log.Warningf("error getting rows affected from DML event query: %s. i'm going to assume that the DML affected a single row, but this may result in inaccurate statistics", err)
rowsAffected = 1
}
// each DML is either a single insert (delta +1), update (delta +0) or delete (delta -1).
// multiplying by the rows actually affected (either 0 or 1) will give an accurate row delta for this DML event
totalDelta += buildResult.rowsDelta * rowsAffected
}
}
if err := tx.Commit(); err != nil {
@ -1029,19 +1184,19 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent))
}()
if err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
// no error
atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, int64(len(dmlEvents)))
if this.migrationContext.CountTableRows {
atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, totalDelta)
}
log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents))
this.migrationContext.Log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents))
return nil
}
func (this *Applier) Teardown() {
log.Debugf("Tearing down...")
this.migrationContext.Log.Debugf("Tearing down...")
this.db.Close()
this.singletonDB.Close()
atomic.StoreInt64(&this.finishedMigrating, 1)

185
go/logic/applier_test.go Normal file
View File

@ -0,0 +1,185 @@
/*
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
"strings"
"testing"
test "github.com/openark/golib/tests"
"github.com/github/gh-ost/go/base"
"github.com/github/gh-ost/go/binlog"
"github.com/github/gh-ost/go/sql"
)
func TestApplierGenerateSqlModeQuery(t *testing.T) {
migrationContext := base.NewMigrationContext()
applier := NewApplier(migrationContext)
{
test.S(t).ExpectEquals(
applier.generateSqlModeQuery(),
`sql_mode = CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO,STRICT_ALL_TABLES')`,
)
}
{
migrationContext.SkipStrictMode = true
migrationContext.AllowZeroInDate = false
test.S(t).ExpectEquals(
applier.generateSqlModeQuery(),
`sql_mode = CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO')`,
)
}
{
migrationContext.SkipStrictMode = false
migrationContext.AllowZeroInDate = true
test.S(t).ExpectEquals(
applier.generateSqlModeQuery(),
`sql_mode = REPLACE(REPLACE(CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO,STRICT_ALL_TABLES'), 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')`,
)
}
{
migrationContext.SkipStrictMode = true
migrationContext.AllowZeroInDate = true
test.S(t).ExpectEquals(
applier.generateSqlModeQuery(),
`sql_mode = REPLACE(REPLACE(CONCAT(@@session.sql_mode, ',NO_AUTO_VALUE_ON_ZERO'), 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')`,
)
}
}
func TestApplierUpdateModifiesUniqueKeyColumns(t *testing.T) {
columns := sql.NewColumnList([]string{"id", "item_id"})
columnValues := sql.ToColumnValues([]interface{}{123456, 42})
migrationContext := base.NewMigrationContext()
migrationContext.OriginalTableColumns = columns
migrationContext.UniqueKey = &sql.UniqueKey{
Name: t.Name(),
Columns: *columns,
}
applier := NewApplier(migrationContext)
t.Run("unmodified", func(t *testing.T) {
modifiedColumn, isModified := applier.updateModifiesUniqueKeyColumns(&binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.UpdateDML,
NewColumnValues: columnValues,
WhereColumnValues: columnValues,
})
test.S(t).ExpectEquals(modifiedColumn, "")
test.S(t).ExpectFalse(isModified)
})
t.Run("modified", func(t *testing.T) {
modifiedColumn, isModified := applier.updateModifiesUniqueKeyColumns(&binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.UpdateDML,
NewColumnValues: sql.ToColumnValues([]interface{}{123456, 24}),
WhereColumnValues: columnValues,
})
test.S(t).ExpectEquals(modifiedColumn, "item_id")
test.S(t).ExpectTrue(isModified)
})
}
func TestApplierBuildDMLEventQuery(t *testing.T) {
columns := sql.NewColumnList([]string{"id", "item_id"})
columnValues := sql.ToColumnValues([]interface{}{123456, 42})
migrationContext := base.NewMigrationContext()
migrationContext.OriginalTableName = "test"
migrationContext.OriginalTableColumns = columns
migrationContext.SharedColumns = columns
migrationContext.MappedSharedColumns = columns
migrationContext.UniqueKey = &sql.UniqueKey{
Name: t.Name(),
Columns: *columns,
}
applier := NewApplier(migrationContext)
t.Run("delete", func(t *testing.T) {
binlogEvent := &binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.DeleteDML,
WhereColumnValues: columnValues,
}
res := applier.buildDMLEventQuery(binlogEvent)
test.S(t).ExpectEquals(len(res), 1)
test.S(t).ExpectNil(res[0].err)
test.S(t).ExpectEquals(strings.TrimSpace(res[0].query),
`delete /* gh-ost `+"`test`.`_test_gho`"+` */
from
`+"`test`.`_test_gho`"+`
where
((`+"`id`"+` = ?) and (`+"`item_id`"+` = ?))`)
test.S(t).ExpectEquals(len(res[0].args), 2)
test.S(t).ExpectEquals(res[0].args[0], 123456)
test.S(t).ExpectEquals(res[0].args[1], 42)
})
t.Run("insert", func(t *testing.T) {
binlogEvent := &binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.InsertDML,
NewColumnValues: columnValues,
}
res := applier.buildDMLEventQuery(binlogEvent)
test.S(t).ExpectEquals(len(res), 1)
test.S(t).ExpectNil(res[0].err)
test.S(t).ExpectEquals(strings.TrimSpace(res[0].query),
`replace /* gh-ost `+"`test`.`_test_gho`"+` */ into
`+"`test`.`_test_gho`"+`
`+"(`id`, `item_id`)"+`
values
(?, ?)`)
test.S(t).ExpectEquals(len(res[0].args), 2)
test.S(t).ExpectEquals(res[0].args[0], 123456)
test.S(t).ExpectEquals(res[0].args[1], 42)
})
t.Run("update", func(t *testing.T) {
binlogEvent := &binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.UpdateDML,
NewColumnValues: columnValues,
WhereColumnValues: columnValues,
}
res := applier.buildDMLEventQuery(binlogEvent)
test.S(t).ExpectEquals(len(res), 1)
test.S(t).ExpectNil(res[0].err)
test.S(t).ExpectEquals(strings.TrimSpace(res[0].query),
`update /* gh-ost `+"`test`.`_test_gho`"+` */
`+"`test`.`_test_gho`"+`
set
`+"`id`"+`=?, `+"`item_id`"+`=?
where
((`+"`id`"+` = ?) and (`+"`item_id`"+` = ?))`)
test.S(t).ExpectEquals(len(res[0].args), 4)
test.S(t).ExpectEquals(res[0].args[0], 123456)
test.S(t).ExpectEquals(res[0].args[1], 42)
test.S(t).ExpectEquals(res[0].args[2], 123456)
test.S(t).ExpectEquals(res[0].args[3], 42)
})
}
func TestApplierInstantDDL(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrationContext.DatabaseName = "test"
migrationContext.OriginalTableName = "mytable"
migrationContext.AlterStatementOptions = "ADD INDEX (foo)"
applier := NewApplier(migrationContext)
t.Run("instantDDLstmt", func(t *testing.T) {
stmt := applier.generateInstantDDLQuery()
test.S(t).ExpectEquals(stmt, "ALTER /* gh-ost */ TABLE `test`.`mytable` ADD INDEX (foo), ALGORITHM=INSTANT")
})
}

View File

@ -1,6 +1,5 @@
/*
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -8,13 +7,14 @@ package logic
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"sync/atomic"
"github.com/github/gh-ost/go/base"
"github.com/outbrain/golib/log"
"github.com/openark/golib/log"
)
const (
@ -35,18 +35,16 @@ const (
type HooksExecutor struct {
migrationContext *base.MigrationContext
writer io.Writer
}
func NewHooksExecutor(migrationContext *base.MigrationContext) *HooksExecutor {
return &HooksExecutor{
migrationContext: migrationContext,
writer: os.Stderr,
}
}
func (this *HooksExecutor) initHooks() error {
return nil
}
func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) []string {
env := os.Environ()
env = append(env, fmt.Sprintf("GH_OST_DATABASE_NAME=%s", this.migrationContext.DatabaseName))
@ -64,26 +62,26 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname()))
env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname))
env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds()))
env = append(env, fmt.Sprintf("GH_OST_HEARTBEAT_LAG=%f", this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds()))
env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct()))
env = append(env, fmt.Sprintf("GH_OST_ETA_SECONDS=%d", this.migrationContext.GetETASeconds()))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner))
env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken))
env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop))
for _, variable := range extraVariables {
env = append(env, variable)
}
env = append(env, extraVariables...)
return env
}
// executeHook executes a command, and sets relevant environment variables
// combined output & error are printed to gh-ost's standard error.
// combined output & error are printed to the configured writer.
func (this *HooksExecutor) executeHook(hook string, extraVariables ...string) error {
cmd := exec.Command(hook)
cmd.Env = this.applyEnvironmentVariables(extraVariables...)
combinedOutput, err := cmd.CombinedOutput()
fmt.Fprintln(os.Stderr, string(combinedOutput))
fmt.Fprintln(this.writer, string(combinedOutput))
return log.Errore(err)
}

113
go/logic/hooks_test.go Normal file
View File

@ -0,0 +1,113 @@
/*
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
"bufio"
"bytes"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/openark/golib/tests"
"github.com/github/gh-ost/go/base"
)
func TestHooksExecutorExecuteHooks(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrationContext.AlterStatement = "ENGINE=InnoDB"
migrationContext.DatabaseName = "test"
migrationContext.Hostname = "test.example.com"
migrationContext.OriginalTableName = "tablename"
migrationContext.RowsDeltaEstimate = 1
migrationContext.RowsEstimate = 122
migrationContext.TotalRowsCopied = 123456
migrationContext.SetETADuration(time.Minute)
migrationContext.SetProgressPct(50)
hooksExecutor := NewHooksExecutor(migrationContext)
writeTmpHookFunc := func(testName, hookName, script string) (path string, err error) {
if path, err = os.MkdirTemp("", testName); err != nil {
return path, err
}
err = os.WriteFile(filepath.Join(path, hookName), []byte(script), 0777)
return path, err
}
t.Run("does-not-exist", func(t *testing.T) {
migrationContext.HooksPath = "/does/not/exist"
tests.S(t).ExpectNil(hooksExecutor.executeHooks("test-hook"))
})
t.Run("failed", func(t *testing.T) {
var err error
if migrationContext.HooksPath, err = writeTmpHookFunc(
"TestHooksExecutorExecuteHooks-failed",
"failed-hook",
"#!/bin/sh\nexit 1",
); err != nil {
panic(err)
}
defer os.RemoveAll(migrationContext.HooksPath)
tests.S(t).ExpectNotNil(hooksExecutor.executeHooks("failed-hook"))
})
t.Run("success", func(t *testing.T) {
var err error
if migrationContext.HooksPath, err = writeTmpHookFunc(
"TestHooksExecutorExecuteHooks-success",
"success-hook",
"#!/bin/sh\nenv",
); err != nil {
panic(err)
}
defer os.RemoveAll(migrationContext.HooksPath)
var buf bytes.Buffer
hooksExecutor.writer = &buf
tests.S(t).ExpectNil(hooksExecutor.executeHooks("success-hook", "TEST="+t.Name()))
scanner := bufio.NewScanner(&buf)
for scanner.Scan() {
split := strings.SplitN(scanner.Text(), "=", 2)
switch split[0] {
case "GH_OST_COPIED_ROWS":
copiedRows, _ := strconv.ParseInt(split[1], 10, 64)
tests.S(t).ExpectEquals(copiedRows, migrationContext.TotalRowsCopied)
case "GH_OST_DATABASE_NAME":
tests.S(t).ExpectEquals(split[1], migrationContext.DatabaseName)
case "GH_OST_DDL":
tests.S(t).ExpectEquals(split[1], migrationContext.AlterStatement)
case "GH_OST_DRY_RUN":
tests.S(t).ExpectEquals(split[1], "false")
case "GH_OST_ESTIMATED_ROWS":
estimatedRows, _ := strconv.ParseInt(split[1], 10, 64)
tests.S(t).ExpectEquals(estimatedRows, int64(123))
case "GH_OST_ETA_SECONDS":
etaSeconds, _ := strconv.ParseInt(split[1], 10, 64)
tests.S(t).ExpectEquals(etaSeconds, int64(60))
case "GH_OST_EXECUTING_HOST":
tests.S(t).ExpectEquals(split[1], migrationContext.Hostname)
case "GH_OST_GHOST_TABLE_NAME":
tests.S(t).ExpectEquals(split[1], fmt.Sprintf("_%s_gho", migrationContext.OriginalTableName))
case "GH_OST_OLD_TABLE_NAME":
tests.S(t).ExpectEquals(split[1], fmt.Sprintf("_%s_del", migrationContext.OriginalTableName))
case "GH_OST_PROGRESS":
progress, _ := strconv.ParseFloat(split[1], 64)
tests.S(t).ExpectEquals(progress, 50.0)
case "GH_OST_TABLE_NAME":
tests.S(t).ExpectEquals(split[1], migrationContext.OriginalTableName)
case "TEST":
tests.S(t).ExpectEquals(split[1], t.Name())
}
}
})
}

View File

@ -1,12 +1,14 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
"context"
gosql "database/sql"
"errors"
"fmt"
"reflect"
"strings"
@ -17,8 +19,7 @@ import (
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
"github.com/openark/golib/sqlutils"
)
const startSlavePostWaitMilliseconds = 500 * time.Millisecond
@ -30,12 +31,14 @@ type Inspector struct {
db *gosql.DB
informationSchemaDb *gosql.DB
migrationContext *base.MigrationContext
name string
}
func NewInspector(migrationContext *base.MigrationContext) *Inspector {
return &Inspector{
connectionConfig: migrationContext.InspectorConnectionConfig,
migrationContext: migrationContext,
name: "inspector",
}
}
@ -53,7 +56,7 @@ func (this *Inspector) InitDBConnections() (err error) {
if err := this.validateConnection(); err != nil {
return err
}
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform {
if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL {
if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil {
return err
} else {
@ -69,7 +72,7 @@ func (this *Inspector) InitDBConnections() (err error) {
if err := this.applyBinlogFormat(); err != nil {
return err
}
log.Infof("Inspector initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.InspectorMySQLVersion)
this.migrationContext.Log.Infof("Inspector initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.InspectorMySQLVersion)
return nil
}
@ -110,6 +113,10 @@ func (this *Inspector) InspectOriginalTable() (err error) {
if err != nil {
return err
}
this.migrationContext.OriginalTableAutoIncrement, err = this.getAutoIncrementValue(this.migrationContext.OriginalTableName)
if err != nil {
return err
}
return nil
}
@ -126,10 +133,7 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
if err != nil {
return err
}
sharedUniqueKeys, err := this.getSharedUniqueKeys(this.migrationContext.OriginalTableUniqueKeys, this.migrationContext.GhostTableUniqueKeys)
if err != nil {
return err
}
sharedUniqueKeys := this.getSharedUniqueKeys(this.migrationContext.OriginalTableUniqueKeys, this.migrationContext.GhostTableUniqueKeys)
for i, sharedUniqueKey := range sharedUniqueKeys {
this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &sharedUniqueKey.Columns)
uniqueKeyIsValid := true
@ -137,14 +141,14 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
switch column.Type {
case sql.FloatColumnType:
{
log.Warning("Will not use %+v as shared key due to FLOAT data type", sharedUniqueKey.Name)
this.migrationContext.Log.Warning("Will not use %+v as shared key due to FLOAT data type", sharedUniqueKey.Name)
uniqueKeyIsValid = false
}
case sql.JSONColumnType:
{
// Noteworthy that at this time MySQL does not allow JSON indexing anyhow, but this code
// will remain in place to potentially handle the future case where JSON is supported in indexes.
log.Warning("Will not use %+v as shared key due to JSON data type", sharedUniqueKey.Name)
this.migrationContext.Log.Warning("Will not use %+v as shared key due to JSON data type", sharedUniqueKey.Name)
uniqueKeyIsValid = false
}
}
@ -157,17 +161,17 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
if this.migrationContext.UniqueKey == nil {
return fmt.Errorf("No shared unique key can be found after ALTER! Bailing out")
}
log.Infof("Chosen shared unique key is %s", this.migrationContext.UniqueKey.Name)
this.migrationContext.Log.Infof("Chosen shared unique key is %s", this.migrationContext.UniqueKey.Name)
if this.migrationContext.UniqueKey.HasNullable {
if this.migrationContext.NullableUniqueKeyAllowed {
log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", this.migrationContext.UniqueKey)
this.migrationContext.Log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", this.migrationContext.UniqueKey)
} else {
return fmt.Errorf("Chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", this.migrationContext.UniqueKey)
}
}
this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.ColumnRenameMap)
log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
this.migrationContext.Log.Infof("Shared columns are %s", this.migrationContext.SharedColumns)
// By fact that a non-empty unique key exists we also know the shared columns are non-empty
// This additional step looks at which columns are unsigned. We could have merged this within
@ -182,9 +186,20 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) {
if column.Name == mappedColumn.Name && column.Type == sql.DateTimeColumnType && mappedColumn.Type == sql.TimestampColumnType {
this.migrationContext.MappedSharedColumns.SetConvertDatetimeToTimestamp(column.Name, this.migrationContext.ApplierTimeZone)
}
if column.Name == mappedColumn.Name && column.Type == sql.EnumColumnType && mappedColumn.Charset != "" {
this.migrationContext.MappedSharedColumns.SetEnumToTextConversion(column.Name)
this.migrationContext.MappedSharedColumns.SetEnumValues(column.Name, column.EnumValues)
}
if column.Name == mappedColumn.Name && column.Charset != mappedColumn.Charset {
this.migrationContext.SharedColumns.SetCharsetConversion(column.Name, column.Charset, mappedColumn.Charset)
}
}
for _, column := range this.migrationContext.UniqueKey.Columns.Columns() {
if this.migrationContext.GhostTableVirtualColumns.GetColumn(column.Name) != nil {
// this is a virtual column
continue
}
if this.migrationContext.MappedSharedColumns.HasTimezoneConversion(column.Name) {
return fmt.Errorf("No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key. Column: %s, key: %s", column.Name, this.migrationContext.UniqueKey.Name)
}
@ -199,7 +214,7 @@ func (this *Inspector) validateConnection() error {
return fmt.Errorf("MySQL replication length limited to 32 characters. See https://dev.mysql.com/doc/refman/5.7/en/assigning-passwords.html")
}
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext)
version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name)
this.migrationContext.InspectorMySQLVersion = version
return err
}
@ -250,19 +265,19 @@ func (this *Inspector) validateGrants() error {
this.migrationContext.HasSuperPrivilege = foundSuper
if foundAll {
log.Infof("User has ALL privileges")
this.migrationContext.Log.Infof("User has ALL privileges")
return nil
}
if foundSuper && foundReplicationSlave && foundDBAll {
log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
this.migrationContext.Log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
return nil
}
if foundReplicationClient && foundReplicationSlave && foundDBAll {
log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
this.migrationContext.Log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
return nil
}
log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(this.migrationContext.DatabaseName), foundDBAll)
return log.Errorf("User has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
this.migrationContext.Log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(this.migrationContext.DatabaseName), foundDBAll)
return this.migrationContext.Log.Errorf("User has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(this.migrationContext.DatabaseName))
}
// restartReplication is required so that we are _certain_ the binlog format and
@ -270,7 +285,7 @@ func (this *Inspector) validateGrants() error {
// It is entirely possible, for example, that the replication is using 'STATEMENT'
// binlog format even as the variable says 'ROW'
func (this *Inspector) restartReplication() error {
log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Infof("Restarting replication on %s to make sure binlog settings apply to replication thread", this.connectionConfig.Key.String())
masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.connectionConfig)
if masterKey == nil {
@ -289,7 +304,7 @@ func (this *Inspector) restartReplication() error {
}
time.Sleep(startSlavePostWaitMilliseconds)
log.Debugf("Replication restarted")
this.migrationContext.Log.Debugf("Replication restarted")
return nil
}
@ -309,7 +324,7 @@ func (this *Inspector) applyBinlogFormat() error {
if err := this.restartReplication(); err != nil {
return err
}
log.Debugf("'ROW' binlog format applied")
this.migrationContext.Log.Debugf("'ROW' binlog format applied")
return nil
}
// We already have RBR, no explicit switch
@ -329,13 +344,13 @@ func (this *Inspector) validateBinlogs() error {
return err
}
if !hasBinaryLogs {
return fmt.Errorf("%s:%d must have binary logs enabled", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return fmt.Errorf("%s must have binary logs enabled", this.connectionConfig.Key.String())
}
if this.migrationContext.RequiresBinlogFormatChange() {
if !this.migrationContext.SwitchToRowBinlogFormat {
return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s:%d doesn't have replicas", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", this.connectionConfig.Key.String())
}
query := fmt.Sprintf(`show /* gh-ost */ slave hosts`)
query := `show /* gh-ost */ slave hosts`
countReplicas := 0
err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error {
countReplicas++
@ -345,21 +360,20 @@ func (this *Inspector) validateBinlogs() error {
return err
}
if countReplicas > 0 {
return fmt.Errorf("%s:%d has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
return fmt.Errorf("%s has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
}
log.Infof("%s:%d has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat)
this.migrationContext.Log.Infof("%s has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat)
}
query = `select @@global.binlog_row_image`
if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil {
// Only as of 5.6. We wish to support 5.5 as well
this.migrationContext.OriginalBinlogRowImage = "FULL"
return err
}
this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage)
if this.migrationContext.OriginalBinlogRowImage != "FULL" {
return fmt.Errorf("%s:%d has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogRowImage)
return fmt.Errorf("%s has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogRowImage)
}
log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Infof("binary logs validated on %s", this.connectionConfig.Key.String())
return nil
}
@ -372,25 +386,25 @@ func (this *Inspector) validateLogSlaveUpdates() error {
}
if logSlaveUpdates {
log.Infof("log_slave_updates validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Infof("log_slave_updates validated on %s", this.connectionConfig.Key.String())
return nil
}
if this.migrationContext.IsTungsten {
log.Warningf("log_slave_updates not found on %s:%d, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.String())
return nil
}
if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica {
return fmt.Errorf("%s:%d must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return fmt.Errorf("%s must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.String())
}
if this.migrationContext.InspectorIsAlsoApplier() {
log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.String())
return nil
}
return fmt.Errorf("%s:%d must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port)
return fmt.Errorf("%s must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.String())
}
// validateTable makes sure the table we need to operate on actually exists
@ -413,17 +427,17 @@ func (this *Inspector) validateTable() error {
return err
}
if !tableFound {
return log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine)
log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate)
this.migrationContext.Log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine)
this.migrationContext.Log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate)
return nil
}
// validateTableForeignKeys makes sure no foreign keys exist on the migrated table
func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) error {
if this.migrationContext.SkipForeignKeyChecks {
log.Warning("--skip-foreign-key-checks provided: will not check for foreign keys")
this.migrationContext.Log.Warning("--skip-foreign-key-checks provided: will not check for foreign keys")
return nil
}
query := `
@ -457,16 +471,16 @@ func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) erro
return err
}
if numParentForeignKeys > 0 {
return log.Errorf("Found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
if numChildForeignKeys > 0 {
if allowChildForeignKeys {
log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag")
this.migrationContext.Log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag")
return nil
}
return log.Errorf("Found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Debugf("Validated no foreign keys exist on table")
this.migrationContext.Log.Debugf("Validated no foreign keys exist on table")
return nil
}
@ -492,9 +506,9 @@ func (this *Inspector) validateTableTriggers() error {
return err
}
if numTriggers > 0 {
return log.Errorf("Found triggers on %s.%s. Triggers are not supported at this time. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Found triggers on %s.%s. Triggers are not supported at this time. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Debugf("Validated no triggers exist on table")
this.migrationContext.Log.Debugf("Validated no triggers exist on table")
return nil
}
@ -514,28 +528,48 @@ func (this *Inspector) estimateTableRowsViaExplain() error {
return err
}
if !outputFound {
return log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
return this.migrationContext.Log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
}
log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate)
this.migrationContext.Log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate)
return nil
}
// CountTableRows counts exact number of rows on the original table
func (this *Inspector) CountTableRows() error {
func (this *Inspector) CountTableRows(ctx context.Context) error {
atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1)
defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0)
log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while")
query := fmt.Sprintf(`select /* gh-ost */ count(*) as rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
var rowsEstimate int64
if err := this.db.QueryRow(query).Scan(&rowsEstimate); err != nil {
conn, err := this.db.Conn(ctx)
if err != nil {
return err
}
defer conn.Close()
var connectionID string
if err := conn.QueryRowContext(ctx, `SELECT /* gh-ost */ CONNECTION_ID()`).Scan(&connectionID); err != nil {
return err
}
query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName))
var rowsEstimate int64
if err := conn.QueryRowContext(ctx, query).Scan(&rowsEstimate); err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
this.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err())
return mysql.Kill(this.db, connectionID)
}
return err
}
// row count query finished. nil out the cancel func, so the main migration thread
// doesn't bother calling it after row copy is done.
this.migrationContext.SetCountTableRowsCancelFunc(nil)
atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate)
this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate
log.Infof("Exact number of rows via COUNT: %d", rowsEstimate)
this.migrationContext.Log.Infof("Exact number of rows via COUNT: %d", rowsEstimate)
return nil
}
@ -554,6 +588,7 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
columnName := m.GetString("COLUMN_NAME")
columnType := m.GetString("COLUMN_TYPE")
columnOctetLength := m.GetUint("CHARACTER_OCTET_LENGTH")
for _, columnsList := range columnsLists {
column := columnsList.GetColumn(columnName)
if column == nil {
@ -580,6 +615,11 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
}
if strings.HasPrefix(columnType, "enum") {
column.Type = sql.EnumColumnType
column.EnumValues = sql.ParseEnumValues(m.GetString("COLUMN_TYPE"))
}
if strings.HasPrefix(columnType, "binary") {
column.Type = sql.BinaryColumnType
column.BinaryOctetLength = columnOctetLength
}
if charset := m.GetString("CHARACTER_SET_NAME"); charset != "" {
column.Charset = charset
@ -590,6 +630,24 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL
return err
}
// getAutoIncrementValue get's the original table's AUTO_INCREMENT value, if exists (0 value if not exists)
func (this *Inspector) getAutoIncrementValue(tableName string) (autoIncrement uint64, err error) {
query := `
SELECT
AUTO_INCREMENT
FROM INFORMATION_SCHEMA.TABLES
WHERE
TABLES.TABLE_SCHEMA = ?
AND TABLES.TABLE_NAME = ?
AND AUTO_INCREMENT IS NOT NULL
`
err = sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error {
autoIncrement = m.GetUint64("AUTO_INCREMENT")
return nil
}, this.migrationContext.DatabaseName, tableName)
return autoIncrement, err
}
// getCandidateUniqueKeys investigates a table and returns the list of unique keys
// candidate for chunking
func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) {
@ -663,13 +721,13 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*
if err != nil {
return uniqueKeys, err
}
log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys)
this.migrationContext.Log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys)
return uniqueKeys, nil
}
// getSharedUniqueKeys returns the intersection of two given unique keys,
// testing by list of columns
func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [](*sql.UniqueKey)) (uniqueKeys [](*sql.UniqueKey), err error) {
func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys []*sql.UniqueKey) (uniqueKeys []*sql.UniqueKey) {
// We actually do NOT rely on key name, just on the set of columns. This is because maybe
// the ALTER is on the name itself...
for _, originalUniqueKey := range originalUniqueKeys {
@ -679,7 +737,7 @@ func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [
}
}
}
return uniqueKeys, nil
return uniqueKeys
}
// getSharedColumns returns the intersection of two lists of columns in same order as the first list
@ -753,7 +811,7 @@ func (this *Inspector) readChangelogState(hint string) (string, error) {
}
func (this *Inspector) getMasterConnectionConfig() (applierConfig *mysql.ConnectionConfig, err error) {
log.Infof("Recursively searching for replication master")
this.migrationContext.Log.Infof("Recursively searching for replication master")
visitedKeys := mysql.NewInstanceKeyMap()
return mysql.GetMasterConnectionConfigSafe(this.connectionConfig, visitedKeys, this.migrationContext.AllowedMasterMaster)
}
@ -768,5 +826,4 @@ func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err er
func (this *Inspector) Teardown() {
this.db.Close()
this.informationSchemaDb.Close()
return
}

31
go/logic/inspect_test.go Normal file
View File

@ -0,0 +1,31 @@
/*
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
"testing"
test "github.com/openark/golib/tests"
"github.com/github/gh-ost/go/sql"
)
func TestInspectGetSharedUniqueKeys(t *testing.T) {
origUniqKeys := []*sql.UniqueKey{
{Columns: *sql.NewColumnList([]string{"id", "item_id"})},
{Columns: *sql.NewColumnList([]string{"id", "org_id"})},
}
ghostUniqKeys := []*sql.UniqueKey{
{Columns: *sql.NewColumnList([]string{"id", "item_id"})},
{Columns: *sql.NewColumnList([]string{"id", "org_id"})},
{Columns: *sql.NewColumnList([]string{"item_id", "user_id"})},
}
inspector := &Inspector{}
sharedUniqKeys := inspector.getSharedUniqueKeys(origUniqKeys, ghostUniqKeys)
test.S(t).ExpectEquals(len(sharedUniqKeys), 2)
test.S(t).ExpectEquals(sharedUniqKeys[0].Columns.String(), "id,item_id")
test.S(t).ExpectEquals(sharedUniqKeys[1].Columns.String(), "id,org_id")
}

File diff suppressed because it is too large Load Diff

256
go/logic/migrator_test.go Normal file
View File

@ -0,0 +1,256 @@
/*
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
"errors"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/openark/golib/tests"
"github.com/github/gh-ost/go/base"
"github.com/github/gh-ost/go/binlog"
"github.com/github/gh-ost/go/sql"
)
func TestMigratorOnChangelogEvent(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrator := NewMigrator(migrationContext, "1.2.3")
t.Run("heartbeat", func(t *testing.T) {
columnValues := sql.ToColumnValues([]interface{}{
123,
time.Now().Unix(),
"heartbeat",
"2022-08-16T00:45:10.52Z",
})
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.InsertDML,
NewColumnValues: columnValues,
}))
})
t.Run("state-AllEventsUpToLockProcessed", func(t *testing.T) {
var wg sync.WaitGroup
wg.Add(1)
go func(wg *sync.WaitGroup) {
defer wg.Done()
es := <-migrator.applyEventsQueue
tests.S(t).ExpectNotNil(es)
tests.S(t).ExpectNotNil(es.writeFunc)
}(&wg)
columnValues := sql.ToColumnValues([]interface{}{
123,
time.Now().Unix(),
"state",
AllEventsUpToLockProcessed,
})
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.InsertDML,
NewColumnValues: columnValues,
}))
wg.Wait()
})
t.Run("state-GhostTableMigrated", func(t *testing.T) {
go func() {
tests.S(t).ExpectTrue(<-migrator.ghostTableMigrated)
}()
columnValues := sql.ToColumnValues([]interface{}{
123,
time.Now().Unix(),
"state",
GhostTableMigrated,
})
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.InsertDML,
NewColumnValues: columnValues,
}))
})
t.Run("state-Migrated", func(t *testing.T) {
columnValues := sql.ToColumnValues([]interface{}{
123,
time.Now().Unix(),
"state",
Migrated,
})
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.InsertDML,
NewColumnValues: columnValues,
}))
})
t.Run("state-ReadMigrationRangeValues", func(t *testing.T) {
columnValues := sql.ToColumnValues([]interface{}{
123,
time.Now().Unix(),
"state",
ReadMigrationRangeValues,
})
tests.S(t).ExpectNil(migrator.onChangelogEvent(&binlog.BinlogDMLEvent{
DatabaseName: "test",
DML: binlog.InsertDML,
NewColumnValues: columnValues,
}))
})
}
func TestMigratorValidateStatement(t *testing.T) {
t.Run("add-column", func(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrator := NewMigrator(migrationContext, "1.2.3")
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test ADD test_new VARCHAR(64) NOT NULL`))
tests.S(t).ExpectNil(migrator.validateAlterStatement())
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
})
t.Run("drop-column", func(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrator := NewMigrator(migrationContext, "1.2.3")
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test DROP abc`))
tests.S(t).ExpectNil(migrator.validateAlterStatement())
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 1)
_, exists := migrator.migrationContext.DroppedColumnsMap["abc"]
tests.S(t).ExpectTrue(exists)
})
t.Run("rename-column", func(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrator := NewMigrator(migrationContext, "1.2.3")
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test CHANGE test123 test1234 bigint unsigned`))
err := migrator.validateAlterStatement()
tests.S(t).ExpectNotNil(err)
tests.S(t).ExpectTrue(strings.HasPrefix(err.Error(), "gh-ost believes the ALTER statement renames columns"))
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
})
t.Run("rename-column-approved", func(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrator := NewMigrator(migrationContext, "1.2.3")
migrator.migrationContext.ApproveRenamedColumns = true
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test CHANGE test123 test1234 bigint unsigned`))
tests.S(t).ExpectNil(migrator.validateAlterStatement())
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
})
t.Run("rename-table", func(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrator := NewMigrator(migrationContext, "1.2.3")
tests.S(t).ExpectNil(migrator.parser.ParseAlterStatement(`ALTER TABLE test RENAME TO test_new`))
err := migrator.validateAlterStatement()
tests.S(t).ExpectNotNil(err)
tests.S(t).ExpectTrue(errors.Is(err, ErrMigratorUnsupportedRenameAlter))
tests.S(t).ExpectEquals(len(migrator.migrationContext.DroppedColumnsMap), 0)
})
}
func TestMigratorCreateFlagFiles(t *testing.T) {
tmpdir, err := os.MkdirTemp("", t.Name())
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
migrationContext := base.NewMigrationContext()
migrationContext.PostponeCutOverFlagFile = filepath.Join(tmpdir, "cut-over.flag")
migrator := NewMigrator(migrationContext, "1.2.3")
tests.S(t).ExpectNil(migrator.createFlagFiles())
tests.S(t).ExpectNil(migrator.createFlagFiles()) // twice to test already-exists
_, err = os.Stat(migrationContext.PostponeCutOverFlagFile)
tests.S(t).ExpectNil(err)
}
func TestMigratorGetProgressPercent(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrator := NewMigrator(migrationContext, "1.2.3")
{
tests.S(t).ExpectEquals(migrator.getProgressPercent(0), float64(100.0))
}
{
migrationContext.TotalRowsCopied = 250
tests.S(t).ExpectEquals(migrator.getProgressPercent(1000), float64(25.0))
}
}
func TestMigratorGetMigrationStateAndETA(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrator := NewMigrator(migrationContext, "1.2.3")
now := time.Now()
migrationContext.RowCopyStartTime = now.Add(-time.Minute)
migrationContext.RowCopyEndTime = now
{
migrationContext.TotalRowsCopied = 456
state, eta, etaDuration := migrator.getMigrationStateAndETA(123456)
tests.S(t).ExpectEquals(state, "migrating")
tests.S(t).ExpectEquals(eta, "4h29m44s")
tests.S(t).ExpectEquals(etaDuration.String(), "4h29m44s")
}
{
migrationContext.TotalRowsCopied = 456
state, eta, etaDuration := migrator.getMigrationStateAndETA(456)
tests.S(t).ExpectEquals(state, "migrating")
tests.S(t).ExpectEquals(eta, "due")
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
}
{
migrationContext.TotalRowsCopied = 123456
state, eta, etaDuration := migrator.getMigrationStateAndETA(456)
tests.S(t).ExpectEquals(state, "migrating")
tests.S(t).ExpectEquals(eta, "due")
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
}
{
atomic.StoreInt64(&migrationContext.CountingRowsFlag, 1)
state, eta, etaDuration := migrator.getMigrationStateAndETA(123456)
tests.S(t).ExpectEquals(state, "counting rows")
tests.S(t).ExpectEquals(eta, "due")
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
}
{
atomic.StoreInt64(&migrationContext.CountingRowsFlag, 0)
atomic.StoreInt64(&migrationContext.IsPostponingCutOver, 1)
state, eta, etaDuration := migrator.getMigrationStateAndETA(123456)
tests.S(t).ExpectEquals(state, "postponing cut-over")
tests.S(t).ExpectEquals(eta, "due")
tests.S(t).ExpectEquals(etaDuration.String(), "0s")
}
}
func TestMigratorShouldPrintStatus(t *testing.T) {
migrationContext := base.NewMigrationContext()
migrator := NewMigrator(migrationContext, "1.2.3")
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(NoPrintStatusRule, 10, time.Second)) // test 'rule != HeuristicPrintStatusRule' return
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 10, time.Second)) // test 'etaDuration.Seconds() <= 60'
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 90, time.Second)) // test 'etaDuration.Seconds() <= 60' again
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 90, time.Minute)) // test 'etaDuration.Seconds() <= 180'
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 60, 90*time.Second)) // test 'elapsedSeconds <= 180'
tests.S(t).ExpectFalse(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 61, 90*time.Second)) // test 'elapsedSeconds <= 180'
tests.S(t).ExpectFalse(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 99, 210*time.Second)) // test 'elapsedSeconds <= 180'
tests.S(t).ExpectFalse(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 12345, 86400*time.Second)) // test 'else'
tests.S(t).ExpectTrue(migrator.shouldPrintStatus(HeuristicPrintStatusRule, 30030, 86400*time.Second)) // test 'else' again
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -16,7 +16,6 @@ import (
"sync/atomic"
"github.com/github/gh-ost/go/base"
"github.com/outbrain/golib/log"
)
type printStatusFunc func(PrintStatusRule, io.Writer)
@ -49,12 +48,12 @@ func (this *Server) BindSocketFile() (err error) {
if err != nil {
return err
}
log.Infof("Listening on unix socket file: %s", this.migrationContext.ServeSocketFile)
this.migrationContext.Log.Infof("Listening on unix socket file: %s", this.migrationContext.ServeSocketFile)
return nil
}
func (this *Server) RemoveSocketFile() (err error) {
log.Infof("Removing socket file: %s", this.migrationContext.ServeSocketFile)
this.migrationContext.Log.Infof("Removing socket file: %s", this.migrationContext.ServeSocketFile)
return os.Remove(this.migrationContext.ServeSocketFile)
}
@ -66,7 +65,7 @@ func (this *Server) BindTCPPort() (err error) {
if err != nil {
return err
}
log.Infof("Listening on tcp port: %d", this.migrationContext.ServeTCPPort)
this.migrationContext.Log.Infof("Listening on tcp port: %d", this.migrationContext.ServeTCPPort)
return nil
}
@ -76,7 +75,7 @@ func (this *Server) Serve() (err error) {
for {
conn, err := this.unixListener.Accept()
if err != nil {
log.Errore(err)
this.migrationContext.Log.Errore(err)
}
go this.handleConnection(conn)
}
@ -88,7 +87,7 @@ func (this *Server) Serve() (err error) {
for {
conn, err := this.tcpListener.Accept()
if err != nil {
log.Errore(err)
this.migrationContext.Log.Errore(err)
}
go this.handleConnection(conn)
}
@ -118,13 +117,11 @@ func (this *Server) onServerCommand(command string, writer *bufio.Writer) (err e
} else {
fmt.Fprintf(writer, "%s\n", err.Error())
}
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
}
// applyServerCommand parses and executes commands by user
func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (printStatusRule PrintStatusRule, err error) {
printStatusRule = NoPrintStatusRule
tokens := strings.SplitN(command, "=", 2)
command = strings.TrimSpace(tokens[0])
arg := ""
@ -135,7 +132,7 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
}
}
argIsQuestion := (arg == "?")
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged\n"
throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged"
if err := this.hooksExecutor.onInteractiveCommand(command); err != nil {
return NoPrintStatusRule, err
@ -147,7 +144,9 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr
fmt.Fprint(writer, `available commands:
status # Print a detailed status message
sup # Print a short status message
coordinates # Print the currently inspected coordinates
coordinates # Print the currently inspected coordinates
applier # Print the hostname of the applier
inspector # Print the hostname of the inspector
chunk-size=<newsize> # Set a new chunk-size
dml-batch-size=<newsize> # Set a new dml-batch-size
nice-ratio=<ratio> # Set a new nice-ratio, immediate sleep after each row-copy operation, float (examples: 0 is aggressive, 0.7 adds 70% runtime, 1.0 doubles runtime, 2.0 triples runtime, ...)
@ -178,6 +177,22 @@ help # This message
}
return NoPrintStatusRule, fmt.Errorf("coordinates are read-only")
}
case "applier":
if this.migrationContext.ApplierConnectionConfig != nil && this.migrationContext.ApplierConnectionConfig.ImpliedKey != nil {
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
this.migrationContext.ApplierConnectionConfig.ImpliedKey.String(),
this.migrationContext.ApplierMySQLVersion,
)
}
return NoPrintStatusRule, nil
case "inspector":
if this.migrationContext.InspectorConnectionConfig != nil && this.migrationContext.InspectorConnectionConfig.ImpliedKey != nil {
fmt.Fprintf(writer, "Host: %s, Version: %s\n",
this.migrationContext.InspectorConnectionConfig.ImpliedKey.String(),
this.migrationContext.InspectorMySQLVersion,
)
}
return NoPrintStatusRule, nil
case "chunk-size":
{
if argIsQuestion {
@ -265,7 +280,7 @@ help # This message
return NoPrintStatusRule, nil
}
this.migrationContext.SetThrottleQuery(arg)
fmt.Fprintf(writer, throttleHint)
fmt.Fprintln(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "throttle-http":
@ -275,7 +290,7 @@ help # This message
return NoPrintStatusRule, nil
}
this.migrationContext.SetThrottleHTTP(arg)
fmt.Fprintf(writer, throttleHint)
fmt.Fprintln(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "throttle-control-replicas":
@ -298,7 +313,7 @@ help # This message
return NoPrintStatusRule, err
}
atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1)
fmt.Fprintf(writer, throttleHint)
fmt.Fprintln(writer, throttleHint)
return ForcePrintStatusAndHintRule, nil
}
case "no-throttle", "unthrottle", "resume", "continue":
@ -342,7 +357,7 @@ help # This message
return NoPrintStatusRule, err
}
err := fmt.Errorf("User commanded 'panic'. The migration will be aborted without cleanup. Please drop the gh-ost tables before trying again.")
this.migrationContext.PanicAbortOnError(err)
this.migrationContext.PanicAbort <- err
return NoPrintStatusRule, err
}
default:

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -16,8 +16,7 @@ import (
"github.com/github/gh-ost/go/binlog"
"github.com/github/gh-ost/go/mysql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
"github.com/openark/golib/sqlutils"
)
type BinlogEventListener struct {
@ -43,6 +42,7 @@ type EventsStreamer struct {
listenersMutex *sync.Mutex
eventsChannel chan *binlog.BinlogEntry
binlogReader *binlog.GoMySQLReader
name string
}
func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer {
@ -52,13 +52,13 @@ func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer
listeners: [](*BinlogEventListener){},
listenersMutex: &sync.Mutex{},
eventsChannel: make(chan *binlog.BinlogEntry, EventsChannelBufferSize),
name: "streamer",
}
}
// AddListener registers a new listener for binlog events, on a per-table basis
func (this *EventsStreamer) AddListener(
async bool, databaseName string, tableName string, onDmlEvent func(event *binlog.BinlogDMLEvent) error) (err error) {
this.listenersMutex.Lock()
defer this.listenersMutex.Unlock()
@ -86,10 +86,10 @@ func (this *EventsStreamer) notifyListeners(binlogEvent *binlog.BinlogDMLEvent)
for _, listener := range this.listeners {
listener := listener
if strings.ToLower(listener.databaseName) != strings.ToLower(binlogEvent.DatabaseName) {
if !strings.EqualFold(listener.databaseName, binlogEvent.DatabaseName) {
continue
}
if strings.ToLower(listener.tableName) != strings.ToLower(binlogEvent.TableName) {
if !strings.EqualFold(listener.tableName, binlogEvent.TableName) {
continue
}
if listener.async {
@ -107,7 +107,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil {
return err
}
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext); err != nil {
if _, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name); err != nil {
return err
}
if err := this.readCurrentBinlogCoordinates(); err != nil {
@ -122,10 +122,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) {
// initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica
func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error {
goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext)
if err != nil {
return err
}
goMySQLReader := binlog.NewGoMySQLReader(this.migrationContext)
if err := goMySQLReader.ConnectBinlogStreamer(*binlogCoordinates); err != nil {
return err
}
@ -160,7 +157,7 @@ func (this *EventsStreamer) readCurrentBinlogCoordinates() error {
if !foundMasterStatus {
return fmt.Errorf("Got no results from SHOW MASTER STATUS. Bailing out")
}
log.Debugf("Streamer binlog coordinates: %+v", *this.initialBinlogCoordinates)
this.migrationContext.Log.Debugf("Streamer binlog coordinates: %+v", *this.initialBinlogCoordinates)
return nil
}
@ -186,7 +183,7 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
return nil
}
log.Infof("StreamEvents encountered unexpected error: %+v", err)
this.migrationContext.Log.Infof("StreamEvents encountered unexpected error: %+v", err)
this.migrationContext.MarkPointOfInterest()
time.Sleep(ReconnectStreamerSleepSeconds * time.Second)
@ -202,7 +199,7 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
// Reposition at same binlog file.
lastAppliedRowsEventHint = this.binlogReader.LastAppliedRowsEventHint
log.Infof("Reconnecting... Will resume at %+v", lastAppliedRowsEventHint)
this.migrationContext.Log.Infof("Reconnecting... Will resume at %+v", lastAppliedRowsEventHint)
if err := this.initBinlogReader(this.GetReconnectBinlogCoordinates()); err != nil {
return err
}
@ -213,11 +210,10 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error {
func (this *EventsStreamer) Close() (err error) {
err = this.binlogReader.Close()
log.Infof("Closed streamer connection. err=%+v", err)
this.migrationContext.Log.Infof("Closed streamer connection. err=%+v", err)
return err
}
func (this *EventsStreamer) Teardown() {
this.db.Close()
return
}

View File

@ -1,11 +1,12 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package logic
import (
"context"
"fmt"
"net/http"
"strings"
@ -15,24 +16,25 @@ import (
"github.com/github/gh-ost/go/base"
"github.com/github/gh-ost/go/mysql"
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
)
var (
httpStatusMessages map[int]string = map[int]string{
httpStatusMessages = map[int]string{
200: "OK",
404: "Not found",
417: "Expectation failed",
429: "Too many requests",
500: "Internal server error",
-1: "Connection error",
}
// See https://github.com/github/freno/blob/master/doc/http.md
httpStatusFrenoMessages map[int]string = map[int]string{
httpStatusFrenoMessages = map[int]string{
200: "OK",
404: "freno: unknown metric",
417: "freno: access forbidden",
429: "freno: threshold exceeded",
500: "freno: internal error",
-1: "freno: connection error",
}
)
@ -41,16 +43,22 @@ const frenoMagicHint = "freno"
// Throttler collects metrics related to throttling and makes informed decision
// whether throttling should take place.
type Throttler struct {
appVersion string
migrationContext *base.MigrationContext
applier *Applier
httpClient *http.Client
httpClientTimeout time.Duration
inspector *Inspector
finishedMigrating int64
}
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector) *Throttler {
func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector, appVersion string) *Throttler {
return &Throttler{
appVersion: appVersion,
migrationContext: migrationContext,
applier: applier,
httpClient: &http.Client{},
httpClientTimeout: time.Duration(migrationContext.ThrottleHTTPTimeoutMillis) * time.Millisecond,
inspector: inspector,
finishedMigrating: 0,
}
@ -84,6 +92,7 @@ func (this *Throttler) shouldThrottle() (result bool, reason string, reasonHint
if statusCode != 0 && statusCode != http.StatusOK {
return true, this.throttleHttpMessage(int(statusCode)), base.NoThrottleReasonHint
}
// Replication lag throttle
maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)
lag := atomic.LoadInt64(&this.migrationContext.CurrentLag)
@ -120,7 +129,7 @@ func parseChangelogHeartbeat(heartbeatValue string) (lag time.Duration, err erro
// parseChangelogHeartbeat parses a string timestamp and deduces replication lag
func (this *Throttler) parseChangelogHeartbeat(heartbeatValue string) (err error) {
if lag, err := parseChangelogHeartbeat(heartbeatValue); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
} else {
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
return nil
@ -142,13 +151,13 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
// This means we will always get a good heartbeat value.
// When running on replica, we should instead check the `SHOW SLAVE STATUS` output.
if lag, err := mysql.GetReplicationLagFromSlaveStatus(this.inspector.informationSchemaDb); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
} else {
atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag))
}
} else {
if heartbeatValue, err := this.inspector.readChangelogState("heartbeat"); err != nil {
return log.Errore(err)
return this.migrationContext.Log.Errore(err)
} else {
this.parseChangelogHeartbeat(heartbeatValue)
}
@ -159,8 +168,9 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
collectFunc()
firstThrottlingCollected <- true
ticker := time.Tick(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
for range ticker {
ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -170,7 +180,6 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo
// collectControlReplicasLag polls all the control replicas to get maximum lag value
func (this *Throttler) collectControlReplicasLag() {
if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 {
return
}
@ -186,9 +195,12 @@ func (this *Throttler) collectControlReplicasLag() {
dbUri := connectionConfig.GetDBUri("information_schema")
var heartbeatValue string
if db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri); err != nil {
db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri)
if err != nil {
return lag, err
} else if err = db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
}
if err := db.QueryRow(replicationLagQuery).Scan(&heartbeatValue); err != nil {
return lag, err
}
@ -209,7 +221,6 @@ func (this *Throttler) collectControlReplicasLag() {
lagResult := &mysql.ReplicationLagResult{Key: connectionConfig.Key}
go func() {
lagResult.Lag, lagResult.Err = readReplicaLag(connectionConfig)
this.migrationContext.PanicAbortIfTableError(lagResult.Err)
lagResults <- lagResult
}()
}
@ -233,12 +244,14 @@ func (this *Throttler) collectControlReplicasLag() {
}
this.migrationContext.SetControlReplicasLagResult(readControlReplicasLag())
}
aggressiveTicker := time.Tick(100 * time.Millisecond)
relaxedFactor := 10
counter := 0
shouldReadLagAggressively := false
for range aggressiveTicker {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -281,24 +294,53 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<-
if url == "" {
return true, nil
}
resp, err := http.Head(url)
ctx, cancel := context.WithTimeout(context.Background(), this.httpClientTimeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil)
if err != nil {
return false, err
}
req.Header.Set("User-Agent", fmt.Sprintf("gh-ost/%s", this.appVersion))
resp, err := this.httpClient.Do(req)
if err != nil {
return false, err
}
defer resp.Body.Close()
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(resp.StatusCode))
return false, nil
}
collectFunc()
_, err := collectFunc()
if err != nil {
// If not told to ignore errors, we'll throttle on HTTP connection issues
if !this.migrationContext.IgnoreHTTPErrors {
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1))
}
}
firstThrottlingCollected <- true
ticker := time.Tick(100 * time.Millisecond)
for range ticker {
collectInterval := time.Duration(this.migrationContext.ThrottleHTTPIntervalMillis) * time.Millisecond
ticker := time.NewTicker(collectInterval)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
if sleep, _ := collectFunc(); sleep {
sleep, err := collectFunc()
if err != nil {
// If not told to ignore errors, we'll throttle on HTTP connection issues
if !this.migrationContext.IgnoreHTTPErrors {
atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1))
}
}
if sleep {
time.Sleep(1 * time.Second)
}
}
@ -318,7 +360,7 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
// Regardless of throttle, we take opportunity to check for panic-abort
if this.migrationContext.PanicFlagFile != "" {
if base.FileExists(this.migrationContext.PanicFlagFile) {
this.migrationContext.PanicAbortOnError(fmt.Errorf("Found panic-file %s. Aborting without cleanup", this.migrationContext.PanicFlagFile))
this.migrationContext.PanicAbort <- fmt.Errorf("Found panic-file %s. Aborting without cleanup", this.migrationContext.PanicFlagFile)
}
}
@ -331,7 +373,7 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
hibernateDuration := time.Duration(this.migrationContext.CriticalLoadHibernateSeconds) * time.Second
hibernateUntilTime := time.Now().Add(hibernateDuration)
atomic.StoreInt64(&this.migrationContext.HibernateUntil, hibernateUntilTime.UnixNano())
log.Errorf("critical-load met: %s=%d, >=%d. Will hibernate for the duration of %+v, until %+v", variableName, value, threshold, hibernateDuration, hibernateUntilTime)
this.migrationContext.Log.Errorf("critical-load met: %s=%d, >=%d. Will hibernate for the duration of %+v, until %+v", variableName, value, threshold, hibernateDuration, hibernateUntilTime)
go func() {
time.Sleep(hibernateDuration)
this.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(true, "leaving hibernation", base.LeavingHibernationThrottleReasonHint))
@ -341,15 +383,15 @@ func (this *Throttler) collectGeneralThrottleMetrics() error {
}
if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds == 0 {
this.migrationContext.PanicAbortOnError(fmt.Errorf("critical-load met: %s=%d, >=%d", variableName, value, threshold))
this.migrationContext.PanicAbort <- fmt.Errorf("critical-load met: %s=%d, >=%d", variableName, value, threshold)
}
if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds > 0 {
log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, this.migrationContext.CriticalLoadIntervalMilliseconds)
this.migrationContext.Log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, this.migrationContext.CriticalLoadIntervalMilliseconds)
go func() {
timer := time.NewTimer(time.Millisecond * time.Duration(this.migrationContext.CriticalLoadIntervalMilliseconds))
<-timer.C
if criticalLoadMetAgain, variableName, value, threshold, _ := this.criticalLoadIsMet(); criticalLoadMetAgain {
this.migrationContext.PanicAbortOnError(fmt.Errorf("critical-load met again after %d millis: %s=%d, >=%d", this.migrationContext.CriticalLoadIntervalMilliseconds, variableName, value, threshold))
this.migrationContext.PanicAbort <- fmt.Errorf("critical-load met again after %d millis: %s=%d, >=%d", this.migrationContext.CriticalLoadIntervalMilliseconds, variableName, value, threshold)
}
}()
}
@ -404,8 +446,9 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
this.collectGeneralThrottleMetrics()
firstThrottlingCollected <- true
throttlerMetricsTick := time.Tick(1 * time.Second)
for range throttlerMetricsTick {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return
}
@ -416,9 +459,7 @@ func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan
}
// initiateThrottlerChecks initiates the throttle ticker and sets the basic behavior of throttling.
func (this *Throttler) initiateThrottlerChecks() error {
throttlerTick := time.Tick(100 * time.Millisecond)
func (this *Throttler) initiateThrottlerChecks() {
throttlerFunction := func() {
alreadyThrottling, currentReason, _ := this.migrationContext.IsThrottled()
shouldThrottle, throttleReason, throttleReasonHint := this.shouldThrottle()
@ -435,14 +476,15 @@ func (this *Throttler) initiateThrottlerChecks() error {
this.migrationContext.SetThrottled(shouldThrottle, throttleReason, throttleReasonHint)
}
throttlerFunction()
for range throttlerTick {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
if atomic.LoadInt64(&this.finishedMigrating) > 0 {
return nil
return
}
throttlerFunction()
}
return nil
}
// throttle sees if throttling needs take place, and if so, continuously sleeps (blocks)
@ -462,6 +504,6 @@ func (this *Throttler) throttle(onThrottled func()) {
}
func (this *Throttler) Teardown() {
log.Debugf("Tearing down...")
this.migrationContext.Log.Debugf("Tearing down...")
atomic.StoreInt64(&this.finishedMigrating, 1)
}

View File

@ -1,36 +1,21 @@
/*
Copyright 2015 Shlomi Noach, courtesy Booking.com
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package mysql
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
var detachPattern *regexp.Regexp
func init() {
detachPattern, _ = regexp.Compile(`//([^/:]+):([\d]+)`) // e.g. `//binlog.01234:567890`
}
type BinlogType int
const (
BinaryLog BinlogType = iota
RelayLog
)
// BinlogCoordinates described binary log coordinates in the form of log file & log position.
type BinlogCoordinates struct {
LogFile string
LogPos int64
Type BinlogType
}
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
@ -62,7 +47,7 @@ func (this *BinlogCoordinates) Equals(other *BinlogCoordinates) bool {
if other == nil {
return false
}
return this.LogFile == other.LogFile && this.LogPos == other.LogPos && this.Type == other.Type
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
}
// IsEmpty returns true if the log file is empty, unnamed
@ -87,76 +72,5 @@ func (this *BinlogCoordinates) SmallerThanOrEquals(other *BinlogCoordinates) boo
if this.SmallerThan(other) {
return true
}
return this.LogFile == other.LogFile && this.LogPos == other.LogPos // No Type comparison
}
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
func (this *BinlogCoordinates) FileSmallerThan(other *BinlogCoordinates) bool {
return this.LogFile < other.LogFile
}
// FileNumberDistance returns the numeric distance between this coordinate's file number and the other's.
// Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's"
func (this *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int {
thisNumber, _ := this.FileNumber()
otherNumber, _ := other.FileNumber()
return otherNumber - thisNumber
}
// FileNumber returns the numeric value of the file, and the length in characters representing the number in the filename.
// Example: FileNumber() of mysqld.log.000789 is (789, 6)
func (this *BinlogCoordinates) FileNumber() (int, int) {
tokens := strings.Split(this.LogFile, ".")
numPart := tokens[len(tokens)-1]
numLen := len(numPart)
fileNum, err := strconv.Atoi(numPart)
if err != nil {
return 0, 0
}
return fileNum, numLen
}
// PreviousFileCoordinatesBy guesses the filename of the previous binlog/relaylog, by given offset (number of files back)
func (this *BinlogCoordinates) PreviousFileCoordinatesBy(offset int) (BinlogCoordinates, error) {
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
fileNum, numLen := this.FileNumber()
if fileNum == 0 {
return result, errors.New("Log file number is zero, cannot detect previous file")
}
newNumStr := fmt.Sprintf("%d", (fileNum - offset))
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
tokens := strings.Split(this.LogFile, ".")
tokens[len(tokens)-1] = newNumStr
result.LogFile = strings.Join(tokens, ".")
return result, nil
}
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
func (this *BinlogCoordinates) PreviousFileCoordinates() (BinlogCoordinates, error) {
return this.PreviousFileCoordinatesBy(1)
}
// PreviousFileCoordinates guesses the filename of the previous binlog/relaylog
func (this *BinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) {
result := BinlogCoordinates{LogPos: 0, Type: this.Type}
fileNum, numLen := this.FileNumber()
newNumStr := fmt.Sprintf("%d", (fileNum + 1))
newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr
tokens := strings.Split(this.LogFile, ".")
tokens[len(tokens)-1] = newNumStr
result.LogFile = strings.Join(tokens, ".")
return result, nil
}
// FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's.
func (this *BinlogCoordinates) DetachedCoordinates() (isDetached bool, detachedLogFile string, detachedLogPos string) {
detachedCoordinatesSubmatch := detachPattern.FindStringSubmatch(this.LogFile)
if len(detachedCoordinatesSubmatch) == 0 {
return false, "", ""
}
return true, detachedCoordinatesSubmatch[1], detachedCoordinatesSubmatch[2]
return this.LogFile == other.LogFile && this.LogPos == other.LogPos
}

View File

@ -8,8 +8,8 @@ package mysql
import (
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {
@ -37,57 +37,6 @@ func TestBinlogCoordinates(t *testing.T) {
test.S(t).ExpectTrue(c1.SmallerThanOrEquals(&c3))
}
func TestBinlogNext(t *testing.T) {
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
cres, err := c1.NextFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00018")
c2 := BinlogCoordinates{LogFile: "mysql-bin.00099", LogPos: 104}
cres, err = c2.NextFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00100")
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00099", LogPos: 104}
cres, err = c3.NextFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00100")
}
func TestBinlogPrevious(t *testing.T) {
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
cres, err := c1.PreviousFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00016")
c2 := BinlogCoordinates{LogFile: "mysql-bin.00100", LogPos: 104}
cres, err = c2.PreviousFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql-bin.00099")
c3 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00100", LogPos: 104}
cres, err = c3.PreviousFileCoordinates()
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(c1.Type, cres.Type)
test.S(t).ExpectEquals(cres.LogFile, "mysql.00.prod.com.00099")
c4 := BinlogCoordinates{LogFile: "mysql.00.prod.com.00000", LogPos: 104}
_, err = c4.PreviousFileCoordinates()
test.S(t).ExpectNotNil(err)
}
func TestBinlogCoordinatesAsKey(t *testing.T) {
m := make(map[BinlogCoordinates]bool)
@ -103,20 +52,3 @@ func TestBinlogCoordinatesAsKey(t *testing.T) {
test.S(t).ExpectEquals(len(m), 3)
}
func TestBinlogFileNumber(t *testing.T) {
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
c2 := BinlogCoordinates{LogFile: "mysql-bin.00022", LogPos: 104}
test.S(t).ExpectEquals(c1.FileNumberDistance(&c1), 0)
test.S(t).ExpectEquals(c1.FileNumberDistance(&c2), 5)
test.S(t).ExpectEquals(c2.FileNumberDistance(&c1), -5)
}
func TestBinlogFileNumberDistance(t *testing.T) {
c1 := BinlogCoordinates{LogFile: "mysql-bin.00017", LogPos: 104}
fileNum, numLen := c1.FileNumber()
test.S(t).ExpectEquals(fileNum, 17)
test.S(t).ExpectEquals(numLen, 5)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -12,6 +12,7 @@ import (
"fmt"
"io/ioutil"
"net"
"strings"
"github.com/go-sql-driver/mysql"
)
@ -22,11 +23,13 @@ const (
// ConnectionConfig is the minimal configuration required to connect to a MySQL server
type ConnectionConfig struct {
Key InstanceKey
User string
Password string
ImpliedKey *InstanceKey
tlsConfig *tls.Config
Key InstanceKey
User string
Password string
ImpliedKey *InstanceKey
tlsConfig *tls.Config
Timeout float64
TransactionIsolation string
}
func NewConnectionConfig() *ConnectionConfig {
@ -40,10 +43,12 @@ func NewConnectionConfig() *ConnectionConfig {
// DuplicateCredentials creates a new connection config with given key and with same credentials as this config
func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionConfig {
config := &ConnectionConfig{
Key: key,
User: this.User,
Password: this.Password,
tlsConfig: this.tlsConfig,
Key: key,
User: this.User,
Password: this.Password,
tlsConfig: this.tlsConfig,
Timeout: this.Timeout,
TransactionIsolation: this.TransactionIsolation,
}
config.ImpliedKey = &config.Key
return config
@ -90,6 +95,7 @@ func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clien
}
this.tlsConfig = &tls.Config{
ServerName: this.Key.Hostname,
Certificates: certs,
RootCAs: rootCertPool,
InsecureSkipVerify: allowInsecure,
@ -109,12 +115,23 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string {
// Wrap IPv6 literals in square brackets
hostname = fmt.Sprintf("[%s]", hostname)
}
interpolateParams := true
// go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to
// simplify construction of the DSN below.
tlsOption := "false"
if this.tlsConfig != nil {
tlsOption = TLS_CONFIG_KEY
}
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?interpolateParams=%t&autocommit=true&charset=utf8mb4,utf8,latin1&tls=%s", this.User, this.Password, hostname, this.Key.Port, databaseName, interpolateParams, tlsOption)
connectionParams := []string{
"autocommit=true",
"charset=utf8mb4,utf8,latin1",
"interpolateParams=true",
fmt.Sprintf("tls=%s", tlsOption),
fmt.Sprintf("transaction_isolation=%q", this.TransactionIsolation),
fmt.Sprintf("timeout=%fs", this.Timeout),
fmt.Sprintf("readTimeout=%fs", this.Timeout),
fmt.Sprintf("writeTimeout=%fs", this.Timeout),
}
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?%s", this.User, this.Password, hostname, this.Key.Port, databaseName, strings.Join(connectionParams, "&"))
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -9,8 +9,12 @@ import (
"crypto/tls"
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
const (
transactionIsolation = "REPEATABLE-READ"
)
func init() {
@ -25,6 +29,7 @@ func TestNewConnectionConfig(t *testing.T) {
test.S(t).ExpectEquals(c.ImpliedKey.Port, 0)
test.S(t).ExpectEquals(c.User, "")
test.S(t).ExpectEquals(c.Password, "")
test.S(t).ExpectEquals(c.TransactionIsolation, "")
}
func TestDuplicateCredentials(t *testing.T) {
@ -36,6 +41,7 @@ func TestDuplicateCredentials(t *testing.T) {
InsecureSkipVerify: true,
ServerName: "feathers",
}
c.TransactionIsolation = transactionIsolation
dup := c.DuplicateCredentials(InstanceKey{Hostname: "otherhost", Port: 3310})
test.S(t).ExpectEquals(dup.Key.Hostname, "otherhost")
@ -45,6 +51,7 @@ func TestDuplicateCredentials(t *testing.T) {
test.S(t).ExpectEquals(dup.User, "gromit")
test.S(t).ExpectEquals(dup.Password, "penguin")
test.S(t).ExpectEquals(dup.tlsConfig, c.tlsConfig)
test.S(t).ExpectEquals(dup.TransactionIsolation, c.TransactionIsolation)
}
func TestDuplicate(t *testing.T) {
@ -52,6 +59,7 @@ func TestDuplicate(t *testing.T) {
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
c.TransactionIsolation = transactionIsolation
dup := c.Duplicate()
test.S(t).ExpectEquals(dup.Key.Hostname, "myhost")
@ -60,6 +68,7 @@ func TestDuplicate(t *testing.T) {
test.S(t).ExpectEquals(dup.ImpliedKey.Port, 3306)
test.S(t).ExpectEquals(dup.User, "gromit")
test.S(t).ExpectEquals(dup.Password, "penguin")
test.S(t).ExpectEquals(dup.TransactionIsolation, transactionIsolation)
}
func TestGetDBUri(t *testing.T) {
@ -67,9 +76,11 @@ func TestGetDBUri(t *testing.T) {
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
c.Timeout = 1.2345
c.TransactionIsolation = transactionIsolation
uri := c.GetDBUri("test")
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=false")
test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=false&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
}
func TestGetDBUriWithTLSSetup(t *testing.T) {
@ -77,8 +88,10 @@ func TestGetDBUriWithTLSSetup(t *testing.T) {
c.Key = InstanceKey{Hostname: "myhost", Port: 3306}
c.User = "gromit"
c.Password = "penguin"
c.Timeout = 1.2345
c.tlsConfig = &tls.Config{}
c.TransactionIsolation = transactionIsolation
uri := c.GetDBUri("test")
test.S(t).ExpectEquals(uri, "gromit:penguin@tcp(myhost:3306)/test?interpolateParams=true&autocommit=true&charset=utf8mb4,utf8,latin1&tls=ghost")
test.S(t).ExpectEquals(uri, `gromit:penguin@tcp(myhost:3306)/test?autocommit=true&charset=utf8mb4,utf8,latin1&interpolateParams=true&tls=ghost&transaction_isolation="REPEATABLE-READ"&timeout=1.234500s&readTimeout=1.234500s&writeTimeout=1.234500s`)
}

View File

@ -1,5 +1,6 @@
/*
Copyright 2015 Shlomi Noach, courtesy Booking.com
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -7,12 +8,21 @@ package mysql
import (
"fmt"
"regexp"
"strconv"
"strings"
)
const (
DefaultInstancePort = 3306
const DefaultInstancePort = 3306
var (
ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$")
ipv4HostRegexp = regexp.MustCompile("^([^:]+)$")
// e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308
ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") //nolint:gosimple
// e.g. 2001:db8:1f70::999:de8:7648:6e8
ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$")
)
// InstanceKey is an instance indicator, identified by hostname and port
@ -25,25 +35,34 @@ const detachHint = "//"
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306
func NewRawInstanceKey(hostPort string) (*InstanceKey, error) {
tokens := strings.SplitN(hostPort, ":", 2)
if len(tokens) != 2 {
return nil, fmt.Errorf("Cannot parse InstanceKey from %s. Expected format is host:port", hostPort)
var hostname, port string
if submatch := ipv4HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
port = submatch[2]
} else if submatch := ipv4HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
} else if submatch := ipv6HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
port = submatch[2]
} else if submatch := ipv6HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 {
hostname = submatch[1]
} else {
return nil, fmt.Errorf("Cannot parse address: %s", hostPort)
}
instanceKey := &InstanceKey{Hostname: tokens[0]}
var err error
if instanceKey.Port, err = strconv.Atoi(tokens[1]); err != nil {
return instanceKey, fmt.Errorf("Invalid port: %s", tokens[1])
instanceKey := &InstanceKey{Hostname: hostname, Port: DefaultInstancePort}
if port != "" {
var err error
if instanceKey.Port, err = strconv.Atoi(port); err != nil {
return instanceKey, fmt.Errorf("Invalid port: %s", port)
}
}
return instanceKey, nil
}
// ParseRawInstanceKeyLoose will parse an InstanceKey from a string representation such as 127.0.0.1:3306.
// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306.
// The port part is optional; there will be no name resolve
func ParseRawInstanceKeyLoose(hostPort string) (*InstanceKey, error) {
if !strings.Contains(hostPort, ":") {
return &InstanceKey{Hostname: hostPort, Port: DefaultInstancePort}, nil
}
func ParseInstanceKey(hostPort string) (*InstanceKey, error) {
return NewRawInstanceKey(hostPort)
}

View File

@ -92,7 +92,7 @@ func (this *InstanceKeyMap) ReadCommaDelimitedList(list string) error {
}
tokens := strings.Split(list, ",")
for _, token := range tokens {
key, err := ParseRawInstanceKeyLoose(token)
key, err := ParseInstanceKey(token)
if err != nil {
return err
}

View File

@ -0,0 +1,74 @@
/*
Copyright 2016 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
package mysql
import (
"testing"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {
log.SetLevel(log.ERROR)
}
func TestParseInstanceKey(t *testing.T) {
{
key, err := ParseInstanceKey("myhost:1234")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "myhost")
test.S(t).ExpectEquals(key.Port, 1234)
}
{
key, err := ParseInstanceKey("myhost")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "myhost")
test.S(t).ExpectEquals(key.Port, 3306)
}
{
key, err := ParseInstanceKey("10.0.0.3:3307")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "10.0.0.3")
test.S(t).ExpectEquals(key.Port, 3307)
}
{
key, err := ParseInstanceKey("10.0.0.3")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "10.0.0.3")
test.S(t).ExpectEquals(key.Port, 3306)
}
{
key, err := ParseInstanceKey("[2001:db8:1f70::999:de8:7648:6e8]:3308")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "2001:db8:1f70::999:de8:7648:6e8")
test.S(t).ExpectEquals(key.Port, 3308)
}
{
key, err := ParseInstanceKey("::1")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "::1")
test.S(t).ExpectEquals(key.Port, 3306)
}
{
key, err := ParseInstanceKey("0:0:0:0:0:0:0:0")
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(key.Hostname, "0:0:0:0:0:0:0:0")
test.S(t).ExpectEquals(key.Port, 3306)
}
{
_, err := ParseInstanceKey("[2001:xxxx:1f70::999:de8:7648:6e8]:3308")
test.S(t).ExpectNotNil(err)
}
{
_, err := ParseInstanceKey("10.0.0.4:")
test.S(t).ExpectNotNil(err)
}
{
_, err := ParseInstanceKey("10.0.0.4:5.6.7")
test.S(t).ExpectNotNil(err)
}
}

View File

@ -14,18 +14,16 @@ import (
"github.com/github/gh-ost/go/sql"
"github.com/outbrain/golib/log"
"github.com/outbrain/golib/sqlutils"
"github.com/openark/golib/log"
"github.com/openark/golib/sqlutils"
)
const (
Error1017CantFindFile = "Error 1017:"
Error1146TableDoesntExist = "Error 1146:"
MaxTableNameLength = 64
MaxReplicationPasswordLength = 32
MaxDBPoolConnections = 3
)
const MaxTableNameLength = 64
const MaxReplicationPasswordLength = 32
type ReplicationLagResult struct {
Key InstanceKey
Lag time.Duration
@ -44,23 +42,22 @@ func (this *ReplicationLagResult) HasLag() bool {
var knownDBs map[string]*gosql.DB = make(map[string]*gosql.DB)
var knownDBsMutex = &sync.Mutex{}
func GetDB(migrationUuid string, mysql_uri string) (*gosql.DB, bool, error) {
func GetDB(migrationUuid string, mysql_uri string) (db *gosql.DB, exists bool, err error) {
cacheKey := migrationUuid + ":" + mysql_uri
knownDBsMutex.Lock()
defer func() {
knownDBsMutex.Unlock()
}()
defer knownDBsMutex.Unlock()
var exists bool
if _, exists = knownDBs[cacheKey]; !exists {
if db, err := gosql.Open("mysql", mysql_uri); err == nil {
knownDBs[cacheKey] = db
} else {
return db, exists, err
if db, exists = knownDBs[cacheKey]; !exists {
db, err = gosql.Open("mysql", mysql_uri)
if err != nil {
return nil, false, err
}
db.SetMaxOpenConns(MaxDBPoolConnections)
db.SetMaxIdleConns(MaxDBPoolConnections)
knownDBs[cacheKey] = db
}
return knownDBs[cacheKey], exists, nil
return db, exists, nil
}
// GetReplicationLagFromSlaveStatus returns replication lag for a given db; via SHOW SLAVE STATUS
@ -208,3 +205,9 @@ func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnL
}
return sql.NewColumnList(columnNames), sql.NewColumnList(virtualColumnNames), nil
}
// Kill executes a KILL QUERY by connection id
func Kill(db *gosql.DB, connectionID string) error {
_, err := db.Exec(`KILL QUERY %s`, connectionID)
return err
}

View File

@ -1,65 +0,0 @@
/*
Copyright 2014 Outbrain Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package os
import (
"github.com/outbrain/golib/log"
"io/ioutil"
"os"
"os/exec"
)
func execCmd(commandText string, arguments ...string) (*exec.Cmd, string, error) {
commandBytes := []byte(commandText)
tmpFile, err := ioutil.TempFile("", "gh-ost-process-cmd-")
if err != nil {
return nil, "", log.Errore(err)
}
ioutil.WriteFile(tmpFile.Name(), commandBytes, 0644)
log.Debugf("execCmd: %s", commandText)
shellArguments := append([]string{}, tmpFile.Name())
shellArguments = append(shellArguments, arguments...)
log.Debugf("%+v", shellArguments)
return exec.Command("bash", shellArguments...), tmpFile.Name(), nil
}
// CommandRun executes a command
func CommandRun(commandText string, arguments ...string) error {
cmd, tmpFileName, err := execCmd(commandText, arguments...)
defer os.Remove(tmpFileName)
if err != nil {
return log.Errore(err)
}
err = cmd.Run()
return log.Errore(err)
}
// RunCommandWithOutput executes a command and return output bytes
func RunCommandWithOutput(commandText string) ([]byte, error) {
cmd, tmpFileName, err := execCmd(commandText)
defer os.Remove(tmpFileName)
if err != nil {
return nil, log.Errore(err)
}
outputBytes, err := cmd.Output()
if err != nil {
return nil, log.Errore(err)
}
return outputBytes, nil
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -33,11 +33,13 @@ func EscapeName(name string) string {
}
func buildColumnsPreparedValues(columns *ColumnList) []string {
values := make([]string, columns.Len(), columns.Len())
values := make([]string, columns.Len())
for i, column := range columns.Columns() {
var token string
if column.timezoneConversion != nil {
token = fmt.Sprintf("convert_tz(?, '%s', '%s')", column.timezoneConversion.ToTimezone, "+00:00")
} else if column.enumToTextConversion {
token = fmt.Sprintf("ELT(?, %s)", column.EnumValues)
} else if column.Type == JSONColumnType {
token = "convert(? using utf8mb4)"
} else {
@ -49,7 +51,7 @@ func buildColumnsPreparedValues(columns *ColumnList) []string {
}
func buildPreparedValues(length int) []string {
values := make([]string, length, length)
values := make([]string, length)
for i := 0; i < length; i++ {
values[i] = "?"
}
@ -57,7 +59,7 @@ func buildPreparedValues(length int) []string {
}
func duplicateNames(names []string) []string {
duplicate := make([]string, len(names), len(names))
duplicate := make([]string, len(names))
copy(duplicate, names)
return duplicate
}
@ -108,6 +110,8 @@ func BuildSetPreparedClause(columns *ColumnList) (result string, err error) {
var setToken string
if column.timezoneConversion != nil {
setToken = fmt.Sprintf("%s=convert_tz(?, '%s', '%s')", EscapeName(column.Name), column.timezoneConversion.ToTimezone, "+00:00")
} else if column.enumToTextConversion {
setToken = fmt.Sprintf("%s=ELT(?, %s)", EscapeName(column.Name), column.EnumValues)
} else if column.Type == JSONColumnType {
setToken = fmt.Sprintf("%s=convert(? using utf8mb4)", EscapeName(column.Name))
} else {
@ -163,7 +167,7 @@ func BuildRangeComparison(columns []string, values []string, args []interface{},
if includeEquals {
comparison, err := BuildEqualsComparison(columns, values)
if err != nil {
return "", explodedArgs, nil
return "", explodedArgs, err
}
comparisons = append(comparisons, comparison)
explodedArgs = append(explodedArgs, args...)
@ -257,8 +261,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@ -312,8 +316,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, tableName str
explodedArgs = append(explodedArgs, rangeExplodedArgs...)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames))
uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@ -364,7 +368,7 @@ func buildUniqueKeyMinMaxValuesPreparedQuery(databaseName, tableName string, uni
tableName = EscapeName(tableName)
uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names())
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames))
uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames))
for i, column := range uniqueKeyColumns.Columns() {
uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i])
if column.Type == EnumColumnType {
@ -396,7 +400,7 @@ func BuildDMLDeleteQuery(databaseName, tableName string, tableColumns, uniqueKey
}
for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(args[tableOrdinal])
arg := column.convertArg(args[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg)
}
databaseName = EscapeName(databaseName)
@ -433,7 +437,7 @@ func BuildDMLInsertQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(args[tableOrdinal])
arg := column.convertArg(args[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg)
}
@ -481,27 +485,33 @@ func BuildDMLUpdateQuery(databaseName, tableName string, tableColumns, sharedCol
for _, column := range sharedColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(valueArgs[tableOrdinal])
arg := column.convertArg(valueArgs[tableOrdinal], false)
sharedArgs = append(sharedArgs, arg)
}
for _, column := range uniqueKeyColumns.Columns() {
tableOrdinal := tableColumns.Ordinals[column.Name]
arg := column.convertArg(whereArgs[tableOrdinal])
arg := column.convertArg(whereArgs[tableOrdinal], true)
uniqueKeyArgs = append(uniqueKeyArgs, arg)
}
setClause, err := BuildSetPreparedClause(mappedSharedColumns)
if err != nil {
return "", sharedArgs, uniqueKeyArgs, err
}
equalsComparison, err := BuildEqualsPreparedComparison(uniqueKeyColumns.Names())
if err != nil {
return "", sharedArgs, uniqueKeyArgs, err
}
result = fmt.Sprintf(`
update /* gh-ost %s.%s */
%s.%s
update /* gh-ost %s.%s */
%s.%s
set
%s
where
%s
`, databaseName, tableName,
%s
`, databaseName, tableName,
databaseName, tableName,
setClause,
equalsComparison,

View File

@ -12,8 +12,8 @@ import (
"regexp"
"strings"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
var (

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -12,26 +12,57 @@ import (
)
var (
sanitizeQuotesRegexp = regexp.MustCompile("('[^']*')")
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
sanitizeQuotesRegexp = regexp.MustCompile("('[^']*')")
renameColumnRegexp = regexp.MustCompile(`(?i)\bchange\s+(column\s+|)([\S]+)\s+([\S]+)\s+`)
dropColumnRegexp = regexp.MustCompile(`(?i)\bdrop\s+(column\s+|)([\S]+)$`)
renameTableRegexp = regexp.MustCompile(`(?i)\brename\s+(to|as)\s+`)
autoIncrementRegexp = regexp.MustCompile(`(?i)\bauto_increment[\s]*=[\s]*([0-9]+)`)
alterTableExplicitSchemaTableRegexps = []*regexp.Regexp{
// ALTER TABLE `scm`.`tbl` something
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
// ALTER TABLE `scm`.tbl something
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `[.]([\S]+)\s+(.*$)`),
// ALTER TABLE scm.`tbl` something
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
// ALTER TABLE scm.tbl something
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)[.]([\S]+)\s+(.*$)`),
}
alterTableExplicitTableRegexps = []*regexp.Regexp{
// ALTER TABLE `tbl` something
regexp.MustCompile(`(?i)\balter\s+table\s+` + "`" + `([^` + "`" + `]+)` + "`" + `\s+(.*$)`),
// ALTER TABLE tbl something
regexp.MustCompile(`(?i)\balter\s+table\s+([\S]+)\s+(.*$)`),
}
enumValuesRegexp = regexp.MustCompile("^enum[(](.*)[)]$")
)
type Parser struct {
columnRenameMap map[string]string
droppedColumns map[string]bool
isRenameTable bool
type AlterTableParser struct {
columnRenameMap map[string]string
droppedColumns map[string]bool
isRenameTable bool
isAutoIncrementDefined bool
alterStatementOptions string
alterTokens []string
explicitSchema string
explicitTable string
}
func NewParser() *Parser {
return &Parser{
func NewAlterTableParser() *AlterTableParser {
return &AlterTableParser{
columnRenameMap: make(map[string]string),
droppedColumns: make(map[string]bool),
}
}
func (this *Parser) tokenizeAlterStatement(alterStatement string) (tokens []string, err error) {
func NewParserFromAlterStatement(alterStatement string) *AlterTableParser {
parser := NewAlterTableParser()
parser.ParseAlterStatement(alterStatement)
return parser
}
func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tokens []string) {
terminatingQuote := rune(0)
f := func(c rune) bool {
switch {
@ -55,16 +86,16 @@ func (this *Parser) tokenizeAlterStatement(alterStatement string) (tokens []stri
for i := range tokens {
tokens[i] = strings.TrimSpace(tokens[i])
}
return tokens, nil
return tokens
}
func (this *Parser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) {
func (this *AlterTableParser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) {
strippedStatement = alterStatement
strippedStatement = sanitizeQuotesRegexp.ReplaceAllString(strippedStatement, "''")
return strippedStatement
}
func (this *Parser) parseAlterToken(alterToken string) (err error) {
func (this *AlterTableParser) parseAlterToken(alterToken string) {
{
// rename
allStringSubmatch := renameColumnRegexp.FindAllStringSubmatch(alterToken, -1)
@ -94,19 +125,40 @@ func (this *Parser) parseAlterToken(alterToken string) (err error) {
this.isRenameTable = true
}
}
return nil
{
// auto_increment
if autoIncrementRegexp.MatchString(alterToken) {
this.isAutoIncrementDefined = true
}
}
}
func (this *Parser) ParseAlterStatement(alterStatement string) (err error) {
alterTokens, _ := this.tokenizeAlterStatement(alterStatement)
for _, alterToken := range alterTokens {
func (this *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) {
this.alterStatementOptions = alterStatement
for _, alterTableRegexp := range alterTableExplicitSchemaTableRegexps {
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
this.explicitSchema = submatch[1]
this.explicitTable = submatch[2]
this.alterStatementOptions = submatch[3]
break
}
}
for _, alterTableRegexp := range alterTableExplicitTableRegexps {
if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 {
this.explicitTable = submatch[1]
this.alterStatementOptions = submatch[2]
break
}
}
for _, alterToken := range this.tokenizeAlterStatement(this.alterStatementOptions) {
alterToken = this.sanitizeQuotesFromAlterStatement(alterToken)
this.parseAlterToken(alterToken)
this.alterTokens = append(this.alterTokens, alterToken)
}
return nil
}
func (this *Parser) GetNonTrivialRenames() map[string]string {
func (this *AlterTableParser) GetNonTrivialRenames() map[string]string {
result := make(map[string]string)
for column, renamed := range this.columnRenameMap {
if column != renamed {
@ -116,14 +168,45 @@ func (this *Parser) GetNonTrivialRenames() map[string]string {
return result
}
func (this *Parser) HasNonTrivialRenames() bool {
func (this *AlterTableParser) HasNonTrivialRenames() bool {
return len(this.GetNonTrivialRenames()) > 0
}
func (this *Parser) DroppedColumnsMap() map[string]bool {
func (this *AlterTableParser) DroppedColumnsMap() map[string]bool {
return this.droppedColumns
}
func (this *Parser) IsRenameTable() bool {
func (this *AlterTableParser) IsRenameTable() bool {
return this.isRenameTable
}
func (this *AlterTableParser) IsAutoIncrementDefined() bool {
return this.isAutoIncrementDefined
}
func (this *AlterTableParser) GetExplicitSchema() string {
return this.explicitSchema
}
func (this *AlterTableParser) HasExplicitSchema() bool {
return this.GetExplicitSchema() != ""
}
func (this *AlterTableParser) GetExplicitTable() string {
return this.explicitTable
}
func (this *AlterTableParser) HasExplicitTable() bool {
return this.GetExplicitTable() != ""
}
func (this *AlterTableParser) GetAlterStatementOptions() string {
return this.alterStatementOptions
}
func ParseEnumValues(enumColumnType string) string {
if submatch := enumValuesRegexp.FindStringSubmatch(enumColumnType); len(submatch) > 0 {
return submatch[1]
}
return enumColumnType
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 GitHub Inc.
Copyright 2022 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
@ -9,8 +9,8 @@ import (
"reflect"
"testing"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {
@ -19,28 +19,53 @@ func init() {
func TestParseAlterStatement(t *testing.T) {
statement := "add column t int, engine=innodb"
parser := NewParser()
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
}
func TestParseAlterStatementTrivialRename(t *testing.T) {
statement := "add column t int, change ts ts timestamp, engine=innodb"
parser := NewParser()
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 1)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
}
func TestParseAlterStatementWithAutoIncrement(t *testing.T) {
statements := []string{
"auto_increment=7",
"auto_increment = 7",
"AUTO_INCREMENT = 71",
"add column t int, change ts ts timestamp, auto_increment=7 engine=innodb",
"add column t int, change ts ts timestamp, auto_increment =7 engine=innodb",
"add column t int, change ts ts timestamp, AUTO_INCREMENT = 7 engine=innodb",
"add column t int, change ts ts timestamp, engine=innodb auto_increment=73425",
}
for _, statement := range statements {
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectTrue(parser.IsAutoIncrementDefined())
}
}
func TestParseAlterStatementTrivialRenames(t *testing.T) {
statement := "add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb"
parser := NewParser()
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectFalse(parser.HasNonTrivialRenames())
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(len(parser.columnRenameMap), 2)
test.S(t).ExpectEquals(parser.columnRenameMap["ts"], "ts")
test.S(t).ExpectEquals(parser.columnRenameMap["f"], "f")
@ -58,9 +83,11 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
}
for _, statement := range statements {
parser := NewParser()
parser := NewAlterTableParser()
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectFalse(parser.IsAutoIncrementDefined())
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
renames := parser.GetNonTrivialRenames()
test.S(t).ExpectEquals(len(renames), 2)
test.S(t).ExpectEquals(renames["i"], "count")
@ -69,46 +96,46 @@ func TestParseAlterStatementNonTrivial(t *testing.T) {
}
func TestTokenizeAlterStatement(t *testing.T) {
parser := NewParser()
parser := NewAlterTableParser()
{
alterStatement := "add column t int"
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
tokens := parser.tokenizeAlterStatement(alterStatement)
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int"}))
}
{
alterStatement := "add column t int, change column i int"
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
tokens := parser.tokenizeAlterStatement(alterStatement)
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int"}))
}
{
alterStatement := "add column t int, change column i int 'some comment'"
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
tokens := parser.tokenizeAlterStatement(alterStatement)
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment'"}))
}
{
alterStatement := "add column t int, change column i int 'some comment, with comma'"
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
tokens := parser.tokenizeAlterStatement(alterStatement)
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "change column i int 'some comment, with comma'"}))
}
{
alterStatement := "add column t int, add column d decimal(10,2)"
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
tokens := parser.tokenizeAlterStatement(alterStatement)
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column d decimal(10,2)"}))
}
{
alterStatement := "add column t int, add column e enum('a','b','c')"
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
tokens := parser.tokenizeAlterStatement(alterStatement)
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int", "add column e enum('a','b','c')"}))
}
{
alterStatement := "add column t int(11), add column e enum('a','b','c')"
tokens, _ := parser.tokenizeAlterStatement(alterStatement)
tokens := parser.tokenizeAlterStatement(alterStatement)
test.S(t).ExpectTrue(reflect.DeepEqual(tokens, []string{"add column t int(11)", "add column e enum('a','b','c')"}))
}
}
func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
parser := NewParser()
parser := NewAlterTableParser()
{
alterStatement := "add column e enum('a','b','c')"
strippedStatement := parser.sanitizeQuotesFromAlterStatement(alterStatement)
@ -122,9 +149,8 @@ func TestSanitizeQuotesFromAlterStatement(t *testing.T) {
}
func TestParseAlterStatementDroppedColumns(t *testing.T) {
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
@ -132,16 +158,17 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
test.S(t).ExpectTrue(parser.droppedColumns["b"])
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b, drop key c_idx, drop column `d`"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectEquals(len(parser.droppedColumns), 2)
test.S(t).ExpectTrue(parser.droppedColumns["b"])
test.S(t).ExpectTrue(parser.droppedColumns["d"])
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b, drop key c_idx, drop column `d`, drop `e`, drop primary key, drop foreign key fk_1"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
@ -151,7 +178,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
test.S(t).ExpectTrue(parser.droppedColumns["e"])
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b, drop bad statement, add column i int"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
@ -161,40 +188,151 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) {
}
func TestParseAlterStatementRenameTable(t *testing.T) {
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectFalse(parser.isRenameTable)
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "rename as something_else"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "drop column b, rename as something_else"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.alterStatementOptions, statement)
test.S(t).ExpectTrue(parser.isRenameTable)
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "engine=innodb rename as something_else"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
{
parser := NewParser()
parser := NewAlterTableParser()
statement := "rename as something_else, engine=innodb"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectTrue(parser.isRenameTable)
}
}
func TestParseAlterStatementExplicitTable(t *testing.T) {
{
parser := NewAlterTableParser()
statement := "drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "")
test.S(t).ExpectEquals(parser.explicitTable, "")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table tbl drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table `tbl` drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table `scm with spaces`.`tbl` drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm with spaces")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table `scm`.`tbl with spaces` drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl with spaces")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table `scm`.tbl drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table scm.`tbl` drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table scm.tbl drop column b"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b"}))
}
{
parser := NewAlterTableParser()
statement := "alter table scm.tbl drop column b, add index idx(i)"
err := parser.ParseAlterStatement(statement)
test.S(t).ExpectNil(err)
test.S(t).ExpectEquals(parser.explicitSchema, "scm")
test.S(t).ExpectEquals(parser.explicitTable, "tbl")
test.S(t).ExpectEquals(parser.alterStatementOptions, "drop column b, add index idx(i)")
test.S(t).ExpectTrue(reflect.DeepEqual(parser.alterTokens, []string{"drop column b", "add index idx(i)"}))
}
}
func TestParseEnumValues(t *testing.T) {
{
s := "enum('red','green','blue','orange')"
values := ParseEnumValues(s)
test.S(t).ExpectEquals(values, "'red','green','blue','orange'")
}
{
s := "('red','green','blue','orange')"
values := ParseEnumValues(s)
test.S(t).ExpectEquals(values, "('red','green','blue','orange')")
}
{
s := "zzz"
values := ParseEnumValues(s)
test.S(t).ExpectEquals(values, "zzz")
}
}

View File

@ -6,6 +6,7 @@
package sql
import (
"bytes"
"fmt"
"reflect"
"strconv"
@ -22,6 +23,7 @@ const (
MediumIntColumnType
JSONColumnType
FloatColumnType
BinaryColumnType
)
const maxMediumintUnsigned int32 = 16777215
@ -30,20 +32,48 @@ type TimezoneConversion struct {
ToTimezone string
}
type Column struct {
Name string
IsUnsigned bool
Charset string
Type ColumnType
timezoneConversion *TimezoneConversion
type CharacterSetConversion struct {
ToCharset string
FromCharset string
}
func (this *Column) convertArg(arg interface{}) interface{} {
type Column struct {
Name string
IsUnsigned bool
Charset string
Type ColumnType
EnumValues string
timezoneConversion *TimezoneConversion
enumToTextConversion bool
// add Octet length for binary type, fix bytes with suffix "00" get clipped in mysql binlog.
// https://github.com/github/gh-ost/issues/909
BinaryOctetLength uint
charsetConversion *CharacterSetConversion
}
func (this *Column) convertArg(arg interface{}, isUniqueKeyColumn bool) interface{} {
if s, ok := arg.(string); ok {
// string, charset conversion
if encoding, ok := charsetEncodingMap[this.Charset]; ok {
arg, _ = encoding.NewDecoder().String(s)
arg2Bytes := []byte(s)
// convert to bytes if character string without charsetConversion.
if this.Charset != "" && this.charsetConversion == nil {
arg = arg2Bytes
} else {
if encoding, ok := charsetEncodingMap[this.Charset]; ok {
arg, _ = encoding.NewDecoder().String(s)
}
}
if this.Type == BinaryColumnType && isUniqueKeyColumn {
size := len(arg2Bytes)
if uint(size) < this.BinaryOctetLength {
buf := bytes.NewBuffer(arg2Bytes)
for i := uint(0); i < (this.BinaryOctetLength - uint(size)); i++ {
buf.Write([]byte{0})
}
arg = buf.String()
}
}
return arg
}
@ -179,6 +209,18 @@ func (this *ColumnList) HasTimezoneConversion(columnName string) bool {
return this.GetColumn(columnName).timezoneConversion != nil
}
func (this *ColumnList) SetEnumToTextConversion(columnName string) {
this.GetColumn(columnName).enumToTextConversion = true
}
func (this *ColumnList) IsEnumToTextConversion(columnName string) bool {
return this.GetColumn(columnName).enumToTextConversion
}
func (this *ColumnList) SetEnumValues(columnName string, enumValues string) {
this.GetColumn(columnName).EnumValues = enumValues
}
func (this *ColumnList) String() string {
return strings.Join(this.Names(), ",")
}
@ -206,6 +248,10 @@ func (this *ColumnList) Len() int {
return len(this.columns)
}
func (this *ColumnList) SetCharsetConversion(columnName string, fromCharset string, toCharset string) {
this.GetColumn(columnName).charsetConversion = &CharacterSetConversion{FromCharset: fromCharset, ToCharset: toCharset}
}
// UniqueKey is the combination of a key's name and columns
type UniqueKey struct {
Name string

View File

@ -10,8 +10,8 @@ import (
"reflect"
"github.com/outbrain/golib/log"
test "github.com/outbrain/golib/tests"
"github.com/openark/golib/log"
test "github.com/openark/golib/tests"
)
func init() {

View File

@ -0,0 +1,13 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
color varchar(32),
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
insert into gh_ost_test values (null, 11, 'red');
insert into gh_ost_test values (null, 13, 'green');
insert into gh_ost_test values (null, 17, 'blue');

View File

@ -0,0 +1 @@
--attempt-instant-ddl

View File

@ -0,0 +1,17 @@
drop event if exists gh_ost_test;
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (NULL, 11);
insert into gh_ost_test values (NULL, 13);
insert into gh_ost_test values (NULL, 17);
insert into gh_ost_test values (NULL, 23);
insert into gh_ost_test values (NULL, 29);
insert into gh_ost_test values (NULL, 31);
insert into gh_ost_test values (NULL, 37);
delete from gh_ost_test where id>=5;

View File

@ -0,0 +1 @@
AUTO_INCREMENT=7

View File

@ -0,0 +1 @@
--alter='AUTO_INCREMENT=7'

View File

@ -0,0 +1,17 @@
drop event if exists gh_ost_test;
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (NULL, 11);
insert into gh_ost_test values (NULL, 13);
insert into gh_ost_test values (NULL, 17);
insert into gh_ost_test values (NULL, 23);
insert into gh_ost_test values (NULL, 29);
insert into gh_ost_test values (NULL, 31);
insert into gh_ost_test values (NULL, 37);
delete from gh_ost_test where id>=5;

View File

@ -0,0 +1 @@
AUTO_INCREMENT=8

View File

@ -0,0 +1,13 @@
drop event if exists gh_ost_test;
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (NULL, 11);
insert into gh_ost_test values (NULL, 13);
insert into gh_ost_test values (NULL, 17);
insert into gh_ost_test values (NULL, 23);

View File

@ -0,0 +1 @@
AUTO_INCREMENT=5

View File

@ -0,0 +1,21 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id bigint auto_increment,
val bigint not null,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 18446744073709551615);
insert into gh_ost_test values (null, 18446744073709551614);
insert into gh_ost_test values (null, 18446744073709551613);
end ;;

View File

@ -0,0 +1 @@
--alter="change val val bigint"

View File

@ -0,0 +1,40 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
ts0 timestamp(6) default current_timestamp(6),
updated tinyint unsigned default 0,
primary key(id, ts0)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
insert into gh_ost_test values (null, 13, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
insert into gh_ost_test values (null, 17, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
insert into gh_ost_test values (null, 19, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
insert into gh_ost_test values (null, 23, sysdate(6), 0);
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
insert into gh_ost_test values (null, 29, sysdate(6), 0);
insert into gh_ost_test values (null, 31, sysdate(6), 0);
insert into gh_ost_test values (null, 37, sysdate(6), 0);
insert into gh_ost_test values (null, 41, sysdate(6), 0);
delete from gh_ost_test where i = 31 order by id desc limit 1;
end ;;

View File

@ -0,0 +1,40 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
v varchar(128),
updated tinyint unsigned default 0,
primary key(id, v)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, 'eleven', 0);
update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1;
insert into gh_ost_test values (null, 13, 'thirteen', 0);
update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1;
insert into gh_ost_test values (null, 17, 'seventeen', 0);
update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1;
insert into gh_ost_test values (null, 19, 'nineteen', 0);
update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1;
insert into gh_ost_test values (null, 23, 'twenty three', 0);
update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1;
insert into gh_ost_test values (null, 29, 'twenty nine', 0);
insert into gh_ost_test values (null, 31, 'thirty one', 0);
insert into gh_ost_test values (null, 37, 'thirty seven', 0);
insert into gh_ost_test values (null, 41, 'forty one', 0);
delete from gh_ost_test where i = 31 order by id desc limit 1;
end ;;

View File

@ -7,9 +7,6 @@ create table gh_ost_test (
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 'átesting');
insert into gh_ost_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial');
drop event if exists gh_ost_test;

View File

@ -1 +0,0 @@
(5.5)

View File

@ -1,13 +1,11 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
id int unsigned auto_increment,
i int not null,
color varchar(32),
dt datetime,
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 1, 'blue');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
@ -18,5 +16,5 @@ create event gh_ost_test
enable
do
begin
drop table if exists _gh_ost_test_gho;
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
end ;;

View File

@ -0,0 +1 @@
--allow-zero-in-date --alter="change column dt dt datetime not null default '1970-00-00 00:00:00'"

View File

@ -1 +0,0 @@
(5.5)

View File

@ -0,0 +1 @@
Percona

View File

@ -0,0 +1,26 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
i int not null,
e enum('red', 'green', 'blue', 'orange') null default null collate 'utf8_bin',
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 7, 'red');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 11, 'red');
insert into gh_ost_test values (null, 13, 'green');
insert into gh_ost_test values (null, 17, 'blue');
set @last_insert_id := last_insert_id();
update gh_ost_test set e='orange' where id = @last_insert_id;
end ;;

View File

@ -0,0 +1 @@
--alter="change e e varchar(32) not null default ''"

View File

@ -1,23 +1,21 @@
set session sql_mode='';
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
id int unsigned auto_increment,
i int not null,
color varchar(32),
dt datetime not null default '1970-00-00 00:00:00',
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 1, 'red');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp + interval 3 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 1, 'blue');
drop table if exists _gh_ost_test_ghc;
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
end ;;

View File

@ -0,0 +1 @@
--allow-zero-in-date --alter="engine=innodb"

View File

@ -1,12 +1,11 @@
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
id int unsigned auto_increment,
i int not null,
color varchar(32),
dt datetime,
primary key(id)
) auto_increment=1;
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
@ -17,6 +16,5 @@ create event gh_ost_test
enable
do
begin
insert into gh_ost_test values (null, 1, 'blue');
drop table if exists _gh_ost_test_gho;
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
end ;;

View File

@ -0,0 +1 @@
Invalid default value for 'dt'

View File

@ -0,0 +1 @@
--alter="change column dt dt datetime not null default '1970-00-00 00:00:00'"

View File

@ -1 +0,0 @@
Error 1146: Table 'test._gh_ost_test_ghc' doesn't exist

View File

@ -1 +0,0 @@
--throttle-query='select sleep(1)'

View File

@ -1 +0,0 @@
Error 1146: Table 'test._gh_ost_test_ghc' doesn't exist

View File

@ -1 +0,0 @@
Error 1146: Table 'test._gh_ost_test_gho' doesn't exist

View File

@ -1 +0,0 @@
--throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _gh_ost_test_ghc'

View File

@ -1 +0,0 @@
Error 1146: Table 'test._gh_ost_test_gho' doesn't exist

View File

@ -1,23 +1,21 @@
set session sql_mode='';
drop table if exists gh_ost_test;
create table gh_ost_test (
id int auto_increment,
id int unsigned auto_increment,
i int not null,
color varchar(32),
dt datetime not null default '1970-00-00 00:00:00',
primary key(id)
) auto_increment=1;
insert into gh_ost_test values (null, 1, 'red');
drop event if exists gh_ost_test;
delimiter ;;
create event gh_ost_test
on schedule every 1 second
starts current_timestamp + interval 3 second
starts current_timestamp
ends current_timestamp + interval 60 second
on completion not preserve
enable
do
begin
insert into gh_ost_test values (null, 1, 'blue');
drop table if exists _gh_ost_test_ghc;
insert into gh_ost_test values (null, 7, '2010-10-20 10:20:30');
end ;;

View File

@ -0,0 +1 @@
Invalid default value for 'dt'

View File

@ -0,0 +1 @@
--alter="engine=innodb"

View File

@ -0,0 +1 @@
Percona

View File

@ -0,0 +1 @@
Percona

Some files were not shown because too many files have changed in this diff Show More