From d8ccc1152ac0fa82cf9e56217de8452e6adce0b5 Mon Sep 17 00:00:00 2001 From: Ed Toro Date: Tue, 27 Jul 2021 11:38:50 -0400 Subject: [PATCH 01/24] v1.1.3 --- RELEASE_VERSION | 2 +- build.sh | 15 +++++++++++---- doc/command-line-flags.md | 16 +++++++++++----- doc/hooks.md | 1 + doc/triggerless-design.md | 2 +- go/cmd/gh-ost/main.go | 8 +++++++- script/bootstrap | 1 - script/ensure-go-installed | 4 ++-- .../siddontang/go-mysql/client/resp.go | 1 - .../siddontang/go-mysql/packet/conn.go | 7 +++---- 10 files changed, 37 insertions(+), 20 deletions(-) diff --git a/RELEASE_VERSION b/RELEASE_VERSION index 45a1b3f44..781dcb07c 100644 --- a/RELEASE_VERSION +++ b/RELEASE_VERSION @@ -1 +1 @@ -1.1.2 +1.1.3 diff --git a/build.sh b/build.sh index ef24e81f4..653aeee99 100755 --- a/build.sh +++ b/build.sh @@ -18,7 +18,7 @@ function build { GOOS=$3 GOARCH=$4 - if ! go version | egrep -q 'go(1\.1[56])' ; then + if ! go version | egrep -q 'go1\.(1[5-9]|[2-9][0-9]{1})' ; then echo "go version must be 1.15 or above" exit 1 fi @@ -41,8 +41,9 @@ function build { builddir=$(setuptree) cp $buildpath/$target $builddir/gh-ost/usr/bin cd $buildpath - fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach ' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" . - fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'shlomi-noach ' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files . + fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" . + fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files . + cd - fi } @@ -63,10 +64,16 @@ main() { mkdir -p ${buildpath} rm -rf ${buildpath:?}/* build GNU/Linux linux linux amd64 - # build macOS osx darwin amd64 + build macOS osx darwin amd64 echo "Binaries found in:" find $buildpath/gh-ost* -type f -maxdepth 1 + + echo "Checksums:" + (cd $buildpath && shasum -a256 gh-ost* 2>/dev/null) } +. script/bootstrap +cd .gopath/src/github.com/github/gh-ost + main "$@" diff --git a/doc/command-line-flags.md b/doc/command-line-flags.md index 62d3d111f..2818d1091 100644 --- a/doc/command-line-flags.md +++ b/doc/command-line-flags.md @@ -22,7 +22,7 @@ If, for some reason, you do not wish `gh-ost` to connect to a replica, you may c ### approve-renamed-columns -When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise the new structure may also look like some column was dropped and another was added. +When your migration issues a column rename (`change column old_name new_name ...`) `gh-ost` analyzes the statement to try and associate the old column name with new column name. Otherwise, the new structure may also look like some column was dropped and another was added. `gh-ost` will print out what it thinks the _rename_ implied, but will not issue the migration unless you provide with `--approve-renamed-columns`. @@ -32,7 +32,7 @@ If you think `gh-ost` is mistaken and that there's actually no _rename_ involved `gh-ost` infers the identity of the master server by crawling up the replication topology. You may explicitly tell `gh-ost` the identity of the master host via `--assume-master-host=the.master.com`. This is useful in: -- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters and you prefer that it picks a specific one +- _master-master_ topologies (together with [`--allow-master-master`](#allow-master-master)), where `gh-ost` can arbitrarily pick one of the co-masters, and you prefer that it picks a specific one - _tungsten replicator_ topologies (together with [`--tungsten`](#tungsten)), where `gh-ost` is unable to crawl and detect the master ### assume-rbr @@ -61,7 +61,13 @@ Comma delimited status-name=threshold, same format as [`--max-load`](#max-load). `--critical-load` defines a threshold that, when met, `gh-ost` panics and bails out. The default behavior is to bail out immediately when meeting this threshold. -This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10 hour migration. +This may sometimes lead to migrations bailing out on a very short spike, that, while in itself is impacting production and is worth investigating, isn't reason enough to kill a 10-hour migration. + +### critical-load-hibernate-seconds + +When `--critical-load-hibernate-seconds` is non-zero (e.g. `--critical-load-hibernate-seconds=300`), `critical-load` does not panic and bail out; instead, `gh-ost` goes into hibernation for the specified duration. It will not read/write anything from/to any server during this time. Execution then continues upon waking from hibernation. + +If `critical-load` is met again, `gh-ost` will repeat this cycle, and never panic and bail out. ### critical-load-interval-millis @@ -98,7 +104,7 @@ Noteworthy is that setting `--dml-batch-size` to higher value _does not_ mean `g ### exact-rowcount -A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can, and often be, a large number. Exactly what that number is? +A `gh-ost` execution need to copy whatever rows you have in your existing table onto the ghost table. This can and often will be, a large number. Exactly what that number is? `gh-ost` initially estimates the number of rows in your table by issuing an `explain select * from your_table`. This will use statistics on your table and return with a rough estimate. How rough? It might go as low as half or as high as double the actual number of rows in your table. This is the same method as used in [`pt-online-schema-change`](https://www.percona.com/doc/percona-toolkit/2.2/pt-online-schema-change.html). `gh-ost` also supports the `--exact-rowcount` flag. When this flag is given, two things happen: @@ -230,7 +236,7 @@ Provide a command delimited list of replicas; `gh-ost` will throttle when any of ### throttle-http -Provide a HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check. +Provide an HTTP endpoint; `gh-ost` will issue `HEAD` requests on given URL and throttle whenever response status code is not `200`. The URL can be queried and updated dynamically via [interactive commands](interactive-commands.md). Empty URL disables the HTTP check. ### timestamp-old-table diff --git a/doc/hooks.md b/doc/hooks.md index 91e131110..c1fe59453 100644 --- a/doc/hooks.md +++ b/doc/hooks.md @@ -68,6 +68,7 @@ The following variables are available on all hooks: - `GH_OST_INSPECTED_LAG` - lag in seconds (floating point) of inspected server - `GH_OST_HEARTBEAT_LAG` - lag in seconds (floating point) of heartbeat - `GH_OST_PROGRESS` - progress pct ([0..100], floating point) of migration +- `GH_OST_ETA_SECONDS` - estimated duration until migration finishes in seconds - `GH_OST_MIGRATED_HOST` - `GH_OST_INSPECTED_HOST` - `GH_OST_EXECUTING_HOST` diff --git a/doc/triggerless-design.md b/doc/triggerless-design.md index 510a301fc..10a4203fe 100644 --- a/doc/triggerless-design.md +++ b/doc/triggerless-design.md @@ -112,7 +112,7 @@ It is also interesting to observe that `gh-ost` is the only application writing When `gh-ost` pauses (throttles), it issues no writes on the ghost table. Because there are no triggers, write workload is decoupled from the `gh-ost` write workload. And because we're using an asynchronous approach, the algorithm already handles a time difference between a master write time and the ghost apply time. A difference of a few microseconds is no different from a difference of minutes or hours. -When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or and explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table. +When `gh-ost` [throttles](throttle.md), either by replication lag, `max-load` setting or an explicit [interactive user command](interactive-commands.md), the master is back to normal. It sees no more writes on the ghost table. An exception is the ongoing heartbeat writes onto the changelog table, which we consider to be negligible. #### Testability diff --git a/go/cmd/gh-ost/main.go b/go/cmd/gh-ost/main.go index b8557f9ce..04d4eebf0 100644 --- a/go/cmd/gh-ost/main.go +++ b/go/cmd/gh-ost/main.go @@ -8,6 +8,7 @@ package main import ( "flag" "fmt" + "net/url" "os" "os/signal" "syscall" @@ -130,7 +131,7 @@ func main() { maxLoad := flag.String("max-load", "", "Comma delimited status-name=threshold. e.g: 'Threads_running=100,Threads_connected=500'. When status exceeds threshold, app throttles writes") criticalLoad := flag.String("critical-load", "", "Comma delimited status-name=threshold, same format as --max-load. When status exceeds threshold, app panics and quits") flag.Int64Var(&migrationContext.CriticalLoadIntervalMilliseconds, "critical-load-interval-millis", 0, "When 0, migration immediately bails out upon meeting critical-load. When non-zero, a second check is done after given interval, and migration only bails out if 2nd check still meets critical load") - flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When nonzero, critical-load does not panic and bail out; instead, gh-ost goes into hibernate for the specified duration. It will not read/write anything to from/to any server") + flag.Int64Var(&migrationContext.CriticalLoadHibernateSeconds, "critical-load-hibernate-seconds", 0, "When non-zero, critical-load does not panic and bail out; instead, gh-ost goes into hibernation for the specified duration. It will not read/write anything from/to any server") quiet := flag.Bool("quiet", false, "quiet") verbose := flag.Bool("verbose", false, "verbose") debug := flag.Bool("debug", false, "debug mode (very verbose)") @@ -188,6 +189,11 @@ func main() { log.Fatalf("--database must be provided and database name must not be empty, or --alter must specify database name") } } + + if err := flag.Set("database", url.QueryEscape(migrationContext.DatabaseName)); err != nil { + migrationContext.Log.Fatale(err) + } + if migrationContext.OriginalTableName == "" { if parser.HasExplicitTable() { migrationContext.OriginalTableName = parser.GetExplicitTable() diff --git a/script/bootstrap b/script/bootstrap index 573313a75..2f5cd08eb 100755 --- a/script/bootstrap +++ b/script/bootstrap @@ -10,7 +10,6 @@ set -e # Since we want to be able to build this outside of GOPATH, we set it # up so it points back to us and go is none the wiser -set -x rm -rf .gopath mkdir -p .gopath/src/github.com/github ln -s "$PWD" .gopath/src/github.com/github/gh-ost diff --git a/script/ensure-go-installed b/script/ensure-go-installed index 473ba3828..14c33ff01 100755 --- a/script/ensure-go-installed +++ b/script/ensure-go-installed @@ -1,7 +1,7 @@ #!/bin/bash PREFERRED_GO_VERSION=go1.16.4 -SUPPORTED_GO_VERSIONS='go1.1[56]' +SUPPORTED_GO_VERSIONS='go1.1[567]' GO_PKG_DARWIN=${PREFERRED_GO_VERSION}.darwin-amd64.pkg GO_PKG_DARWIN_SHA=0f215de06019a054a3da46a0722989986c956d719c7a0a8fc38a5f3c216d6f6b @@ -35,7 +35,7 @@ if [ -z "$(which go)" ] || [ -z "$(go version | grep "$SUPPORTED_GO_VERSIONS")" curl -L -O https://dl.google.com/go/$GO_PKG_DARWIN shasum -a256 $GO_PKG_DARWIN | grep $GO_PKG_DARWIN_SHA xar -xf $GO_PKG_DARWIN - cpio -i < com.googlecode.go.pkg/Payload + cpio -i < org.golang.go.pkg/Payload else curl -L -O https://dl.google.com/go/$GO_PKG_LINUX shasum -a256 $GO_PKG_LINUX | grep $GO_PKG_LINUX_SHA diff --git a/vendor/github.com/siddontang/go-mysql/client/resp.go b/vendor/github.com/siddontang/go-mysql/client/resp.go index 71aa1bcd4..0cdff1d32 100644 --- a/vendor/github.com/siddontang/go-mysql/client/resp.go +++ b/vendor/github.com/siddontang/go-mysql/client/resp.go @@ -1,6 +1,5 @@ package client -import "C" import ( "encoding/binary" diff --git a/vendor/github.com/siddontang/go-mysql/packet/conn.go b/vendor/github.com/siddontang/go-mysql/packet/conn.go index 41b1bf1c7..3785261ae 100644 --- a/vendor/github.com/siddontang/go-mysql/packet/conn.go +++ b/vendor/github.com/siddontang/go-mysql/packet/conn.go @@ -1,6 +1,5 @@ package packet -import "C" import ( "bytes" "io" @@ -127,7 +126,7 @@ func (c *Conn) WritePacket(data []byte) error { func (c *Conn) WriteClearAuthPacket(password string) error { // Calculate the packet length and add a tailing 0 pktLen := len(password) + 1 - data := make([]byte, 4 + pktLen) + data := make([]byte, 4+pktLen) // Add the clear password [null terminated string] copy(data[4:], password) @@ -140,7 +139,7 @@ func (c *Conn) WriteClearAuthPacket(password string) error { // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse func (c *Conn) WritePublicKeyAuthPacket(password string, cipher []byte) error { // request public key - data := make([]byte, 4 + 1) + data := make([]byte, 4+1) data[4] = 2 // cachingSha2PasswordRequestPublicKey c.WritePacket(data) @@ -163,7 +162,7 @@ func (c *Conn) WritePublicKeyAuthPacket(password string, cipher []byte) error { } sha1v := sha1.New() enc, _ := rsa.EncryptOAEP(sha1v, rand.Reader, pub.(*rsa.PublicKey), plain, nil) - data = make([]byte, 4 + len(enc)) + data = make([]byte, 4+len(enc)) copy(data[4:], enc) return c.WritePacket(data) } From 0a87c7af58974a9103dc3bd7e5b4a1a37665b106 Mon Sep 17 00:00:00 2001 From: Tyler Knodell Date: Tue, 1 Feb 2022 08:54:55 -0700 Subject: [PATCH 02/24] Add flag to customize the interval which the onStatus hook is called --- go/base/context.go | 1 + go/cmd/gh-ost/main.go | 1 + go/logic/migrator.go | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go/base/context.go b/go/base/context.go index b9badc43b..fd42e1da4 100644 --- a/go/base/context.go +++ b/go/base/context.go @@ -141,6 +141,7 @@ type MigrationContext struct { HooksHintMessage string HooksHintOwner string HooksHintToken string + HooksStatusIntervalSec int64 DropServeSocket bool ServeSocketFile string diff --git a/go/cmd/gh-ost/main.go b/go/cmd/gh-ost/main.go index 04d4eebf0..491a0bb1e 100644 --- a/go/cmd/gh-ost/main.go +++ b/go/cmd/gh-ost/main.go @@ -125,6 +125,7 @@ func main() { flag.StringVar(&migrationContext.HooksHintMessage, "hooks-hint", "", "arbitrary message to be injected to hooks via GH_OST_HOOKS_HINT, for your convenience") flag.StringVar(&migrationContext.HooksHintOwner, "hooks-hint-owner", "", "arbitrary name of owner to be injected to hooks via GH_OST_HOOKS_HINT_OWNER, for your convenience") flag.StringVar(&migrationContext.HooksHintToken, "hooks-hint-token", "", "arbitrary token to be injected to hooks via GH_OST_HOOKS_HINT_TOKEN, for your convenience") + flag.Int64Var(&migrationContext.HooksStatusIntervalSec, "hooks-status-interval", 60, "how many seconds to wait between calling onStatus hook") flag.UintVar(&migrationContext.ReplicaServerId, "replica-server-id", 99999, "server id used by gh-ost process. Default: 99999") diff --git a/go/logic/migrator.go b/go/logic/migrator.go index c12c21fc3..ceb6e6704 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -1016,7 +1016,7 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) { w := io.MultiWriter(writers...) fmt.Fprintln(w, status) - if elapsedSeconds%60 == 0 { + if elapsedSeconds%this.migrationContext.HooksStatusIntervalSec == 0 { this.hooksExecutor.onStatus(status) } } From dd919de4e3d8c46a1fa23fb5c6b8f3e848aa8003 Mon Sep 17 00:00:00 2001 From: dm-2 <45519614+dm-2@users.noreply.github.com> Date: Mon, 7 Feb 2022 15:35:14 +0000 Subject: [PATCH 03/24] Add docs for `hooks-status-interval` --- doc/command-line-flags.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/command-line-flags.md b/doc/command-line-flags.md index 2818d1091..417255a41 100644 --- a/doc/command-line-flags.md +++ b/doc/command-line-flags.md @@ -141,6 +141,10 @@ Add this flag when executing on a 1st generation Google Cloud Platform (GCP). Default 100. See [`subsecond-lag`](subsecond-lag.md) for details. +### hooks-status-interval + +Defaults to 60 seconds. Configures how often the `gh-ost-on-status` hook is called, see [`hooks`](hooks.md) for full details on how to use hooks. + ### initially-drop-ghost-table `gh-ost` maintains two tables while migrating: the _ghost_ table (which is synced from your original table and finally replaces it) and a changelog table, which is used internally for bookkeeping. By default, it panics and aborts if it sees those tables upon startup. Provide `--initially-drop-ghost-table` and `--initially-drop-old-table` to let `gh-ost` know it's OK to drop them beforehand. From 62ac8974f5c5dd9363898701e0db828c40e10d6a Mon Sep 17 00:00:00 2001 From: Arthur Schreiber Date: Mon, 7 Feb 2022 11:23:39 +0000 Subject: [PATCH 04/24] Reduce the minimal chunk size from `100` to `10`. --- go/base/context.go | 4 ++-- go/cmd/gh-ost/main.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go/base/context.go b/go/base/context.go index fd42e1da4..7b01cd908 100644 --- a/go/base/context.go +++ b/go/base/context.go @@ -553,8 +553,8 @@ func (this *MigrationContext) SetMaxLagMillisecondsThrottleThreshold(maxLagMilli } func (this *MigrationContext) SetChunkSize(chunkSize int64) { - if chunkSize < 100 { - chunkSize = 100 + if chunkSize < 10 { + chunkSize = 10 } if chunkSize > 100000 { chunkSize = 100000 diff --git a/go/cmd/gh-ost/main.go b/go/cmd/gh-ost/main.go index 491a0bb1e..2afe9abc4 100644 --- a/go/cmd/gh-ost/main.go +++ b/go/cmd/gh-ost/main.go @@ -99,7 +99,7 @@ func main() { flag.BoolVar(&migrationContext.AssumeRBR, "assume-rbr", false, "set to 'true' when you know for certain your server uses 'ROW' binlog_format. gh-ost is unable to tell, event after reading binlog_format, whether the replication process does indeed use 'ROW', and restarts replication to be certain RBR setting is applied. Such operation requires SUPER privileges which you might not have. Setting this flag avoids restarting replication and you can proceed to use gh-ost without SUPER privileges") flag.BoolVar(&migrationContext.CutOverExponentialBackoff, "cut-over-exponential-backoff", false, "Wait exponentially longer intervals between failed cut-over attempts. Wait intervals obey a maximum configurable with 'exponential-backoff-max-interval').") exponentialBackoffMaxInterval := flag.Int64("exponential-backoff-max-interval", 64, "Maximum number of seconds to wait between attempts when performing various operations with exponential backoff.") - chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 100-100,000)") + chunkSize := flag.Int64("chunk-size", 1000, "amount of rows to handle in each iteration (allowed range: 10-100,000)") dmlBatchSize := flag.Int64("dml-batch-size", 10, "batch size for DML events to apply in a single transaction (range 1-100)") defaultRetries := flag.Int64("default-retries", 60, "Default number of retries for various operations before panicking") cutOverLockTimeoutSeconds := flag.Int64("cut-over-lock-timeout-seconds", 3, "Max number of seconds to hold locks on tables while attempting to cut-over (retry attempted when lock exceeds timeout)") From 7c9c1f04ef1d44f18b4f276ae5ab569c4e172b42 Mon Sep 17 00:00:00 2001 From: dm-2 <45519614+dm-2@users.noreply.github.com> Date: Mon, 7 Feb 2022 13:22:30 +0000 Subject: [PATCH 05/24] v1.1.4 --- RELEASE_VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE_VERSION b/RELEASE_VERSION index 781dcb07c..65087b4f5 100644 --- a/RELEASE_VERSION +++ b/RELEASE_VERSION @@ -1 +1 @@ -1.1.3 +1.1.4 From 0ba4762e2d565c023844042636ae7f131663f963 Mon Sep 17 00:00:00 2001 From: dm-2 <45519614+dm-2@users.noreply.github.com> Date: Fri, 25 Feb 2022 14:10:42 +0000 Subject: [PATCH 06/24] fix: update build script to explicitly build RPMs for linux --- build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sh b/build.sh index 653aeee99..6b4afd75f 100755 --- a/build.sh +++ b/build.sh @@ -41,7 +41,7 @@ function build { builddir=$(setuptree) cp $buildpath/$target $builddir/gh-ost/usr/bin cd $buildpath - fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" . + fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t rpm --rpm-rpmbuild-define "_build_id_links none" --rpm-os linux . fpm -v "${RELEASE_VERSION}" --epoch 1 -f -s dir -n gh-ost -m 'GitHub' --description "GitHub's Online Schema Migrations for MySQL " --url "https://github.com/github/gh-ost" --vendor "GitHub" --license "Apache 2.0" -C $builddir/gh-ost --prefix=/ -t deb --deb-no-default-config-files . cd - fi From 9b27d9110e28395e071c44cbafb1b6d128fdb3c9 Mon Sep 17 00:00:00 2001 From: Tim Vaillancourt Date: Wed, 23 Feb 2022 23:28:03 +0100 Subject: [PATCH 07/24] Use `.String()` for logging connection-config `InstanceKey` --- go/logic/inspect.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/go/logic/inspect.go b/go/logic/inspect.go index fb473b830..3264a6c86 100644 --- a/go/logic/inspect.go +++ b/go/logic/inspect.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -283,7 +283,7 @@ func (this *Inspector) validateGrants() error { // It is entirely possible, for example, that the replication is using 'STATEMENT' // binlog format even as the variable says 'ROW' func (this *Inspector) restartReplication() error { - this.migrationContext.Log.Infof("Restarting replication on %s:%d to make sure binlog settings apply to replication thread", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Infof("Restarting replication on %s to make sure binlog settings apply to replication thread", this.connectionConfig.Key.String()) masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.connectionConfig) if masterKey == nil { @@ -342,11 +342,11 @@ func (this *Inspector) validateBinlogs() error { return err } if !hasBinaryLogs { - return fmt.Errorf("%s:%d must have binary logs enabled", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + return fmt.Errorf("%s must have binary logs enabled", this.connectionConfig.Key.String()) } if this.migrationContext.RequiresBinlogFormatChange() { if !this.migrationContext.SwitchToRowBinlogFormat { - return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s:%d doesn't have replicas", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", this.connectionConfig.Key.String()) } query := fmt.Sprintf(`show /* gh-ost */ slave hosts`) countReplicas := 0 @@ -358,9 +358,9 @@ func (this *Inspector) validateBinlogs() error { return err } if countReplicas > 0 { - return fmt.Errorf("%s:%d has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat) + return fmt.Errorf("%s has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat) } - this.migrationContext.Log.Infof("%s:%d has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogFormat) + this.migrationContext.Log.Infof("%s has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat) } query = `select @@global.binlog_row_image` if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil { @@ -369,10 +369,10 @@ func (this *Inspector) validateBinlogs() error { } this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage) if this.migrationContext.OriginalBinlogRowImage != "FULL" { - return fmt.Errorf("%s:%d has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port, this.migrationContext.OriginalBinlogRowImage) + return fmt.Errorf("%s has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogRowImage) } - this.migrationContext.Log.Infof("binary logs validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Infof("binary logs validated on %s", this.connectionConfig.Key.String()) return nil } @@ -385,25 +385,25 @@ func (this *Inspector) validateLogSlaveUpdates() error { } if logSlaveUpdates { - this.migrationContext.Log.Infof("log_slave_updates validated on %s:%d", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Infof("log_slave_updates validated on %s", this.connectionConfig.Key.String()) return nil } if this.migrationContext.IsTungsten { - this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.String()) return nil } if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica { - return fmt.Errorf("%s:%d must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + return fmt.Errorf("%s must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.String()) } if this.migrationContext.InspectorIsAlsoApplier() { - this.migrationContext.Log.Warningf("log_slave_updates not found on %s:%d, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.String()) return nil } - return fmt.Errorf("%s:%d must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.Hostname, this.connectionConfig.Key.Port) + return fmt.Errorf("%s must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.String()) } // validateTable makes sure the table we need to operate on actually exists From 869ed92400182d934db6918f0967d6718292612f Mon Sep 17 00:00:00 2001 From: Tim Vaillancourt Date: Thu, 24 Feb 2022 01:30:29 +0100 Subject: [PATCH 08/24] Fix needless `fmt.Sprintf` call in `go/logic/inspector.go` --- go/logic/inspect.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/logic/inspect.go b/go/logic/inspect.go index 3264a6c86..db2e9c294 100644 --- a/go/logic/inspect.go +++ b/go/logic/inspect.go @@ -348,7 +348,7 @@ func (this *Inspector) validateBinlogs() error { if !this.migrationContext.SwitchToRowBinlogFormat { return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", this.connectionConfig.Key.String()) } - query := fmt.Sprintf(`show /* gh-ost */ slave hosts`) + query := `show /* gh-ost */ slave hosts` countReplicas := 0 err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error { countReplicas++ From 63c171dc7a9ecb9795d003494fd70c9f18cd1203 Mon Sep 17 00:00:00 2001 From: jecepeda Date: Fri, 11 Mar 2022 14:10:58 +0100 Subject: [PATCH 09/24] Add binaries for arm64 architectures --- build.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/build.sh b/build.sh index 6b4afd75f..3a29360e3 100755 --- a/build.sh +++ b/build.sh @@ -23,18 +23,17 @@ function build { exit 1 fi - # TODO: remove GO111MODULE once gh-ost uses Go modules - echo "Building ${osname} binary" + echo "Building ${osname}-${GOARCH} binary" export GOOS export GOARCH GO111MODULE=off go build -ldflags "$ldflags" -o $buildpath/$target go/cmd/gh-ost/main.go if [ $? -ne 0 ]; then - echo "Build failed for ${osname}" + echo "Build failed for ${osname} ${GOARCH}." exit 1 fi - (cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${timestamp}.tar.gz $target) + (cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${GOARCH}-${timestamp}.tar.gz $target) if [ "$GOOS" == "linux" ] ; then echo "Creating Distro full packages" @@ -64,7 +63,9 @@ main() { mkdir -p ${buildpath} rm -rf ${buildpath:?}/* build GNU/Linux linux linux amd64 + build GNU/Linux linux linux arm64 build macOS osx darwin amd64 + build macOS osx darwin arm64 echo "Binaries found in:" find $buildpath/gh-ost* -type f -maxdepth 1 From 8dd1571f02b9f05377d86577be4792e5f258675c Mon Sep 17 00:00:00 2001 From: Tim Vaillancourt Date: Tue, 31 May 2022 21:23:39 +0200 Subject: [PATCH 10/24] Add `golangci-lint` CI action, fix `gosimple`, `govet` + `unused` lint errors (#1127) * Add `golangci-lint`, fix `gosimple`, `govet` and `unused` linter complaints * Go 1.16 * Update copyright dates --- .github/workflows/golangci-lint.yml | 21 +++++++++ .golangci.yml | 12 ++++++ go/base/context.go | 4 +- go/base/context_test.go | 66 ++++++++++++++++++++++++++++- go/base/default_logger.go | 11 +++-- go/base/utils.go | 6 +-- go/binlog/gomysql_reader.go | 7 ++- go/logic/hooks.go | 7 +-- go/logic/inspect.go | 1 - go/logic/migrator.go | 60 +++++++++++--------------- go/logic/streamer.go | 3 +- go/logic/throttler.go | 2 +- go/mysql/instance_key.go | 12 +++--- go/sql/builder.go | 18 ++++---- 14 files changed, 156 insertions(+), 74 deletions(-) create mode 100644 .github/workflows/golangci-lint.yml create mode 100644 .golangci.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 000000000..685afbeda --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,21 @@ +name: golangci-lint +on: + push: + branches: + - master + pull_request: +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + with: + go-version: 1.16 + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..4a487bd97 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,12 @@ +run: + timeout: 5m + modules-download-mode: readonly + +linters: + disable: + - errcheck + - staticcheck + enable: + - gosimple + - govet + - unused diff --git a/go/base/context.go b/go/base/context.go index 7b01cd908..9e254bb72 100644 --- a/go/base/context.go +++ b/go/base/context.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -15,7 +15,7 @@ import ( "sync/atomic" "time" - "github.com/satori/go.uuid" + uuid "github.com/satori/go.uuid" "github.com/github/gh-ost/go/mysql" "github.com/github/gh-ost/go/sql" diff --git a/go/base/context_test.go b/go/base/context_test.go index 8a9c6a5b1..ac45c260d 100644 --- a/go/base/context_test.go +++ b/go/base/context_test.go @@ -1,11 +1,13 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ package base import ( + "io/ioutil" + "os" "testing" "time" @@ -56,3 +58,65 @@ func TestGetTableNames(t *testing.T) { test.S(t).ExpectEquals(context.GetChangelogTableName(), "_tmp_ghc") } } + +func TestReadConfigFile(t *testing.T) { + { + context := NewMigrationContext() + context.ConfigFile = "/does/not/exist" + if err := context.ReadConfigFile(); err == nil { + t.Fatal("Expected .ReadConfigFile() to return an error, got nil") + } + } + { + f, err := ioutil.TempFile("", t.Name()) + if err != nil { + t.Fatalf("Failed to create tmp file: %v", err) + } + defer os.Remove(f.Name()) + + f.Write([]byte("[client]")) + context := NewMigrationContext() + context.ConfigFile = f.Name() + if err := context.ReadConfigFile(); err != nil { + t.Fatalf(".ReadConfigFile() failed: %v", err) + } + } + { + f, err := ioutil.TempFile("", t.Name()) + if err != nil { + t.Fatalf("Failed to create tmp file: %v", err) + } + defer os.Remove(f.Name()) + + f.Write([]byte("[client]\nuser=test\npassword=123456")) + context := NewMigrationContext() + context.ConfigFile = f.Name() + if err := context.ReadConfigFile(); err != nil { + t.Fatalf(".ReadConfigFile() failed: %v", err) + } + + if context.config.Client.User != "test" { + t.Fatalf("Expected client user %q, got %q", "test", context.config.Client.User) + } else if context.config.Client.Password != "123456" { + t.Fatalf("Expected client password %q, got %q", "123456", context.config.Client.Password) + } + } + { + f, err := ioutil.TempFile("", t.Name()) + if err != nil { + t.Fatalf("Failed to create tmp file: %v", err) + } + defer os.Remove(f.Name()) + + f.Write([]byte("[osc]\nmax_load=10")) + context := NewMigrationContext() + context.ConfigFile = f.Name() + if err := context.ReadConfigFile(); err != nil { + t.Fatalf(".ReadConfigFile() failed: %v", err) + } + + if context.config.Osc.Max_Load != "10" { + t.Fatalf("Expected osc 'max_load' %q, got %q", "10", context.config.Osc.Max_Load) + } + } +} diff --git a/go/base/default_logger.go b/go/base/default_logger.go index be6b1f221..435b5ed65 100644 --- a/go/base/default_logger.go +++ b/go/base/default_logger.go @@ -1,3 +1,8 @@ +/* + Copyright 2022 GitHub Inc. + See https://github.com/github/gh-ost/blob/master/LICENSE +*/ + package base import ( @@ -12,22 +17,18 @@ func NewDefaultLogger() *simpleLogger { func (*simpleLogger) Debug(args ...interface{}) { log.Debug(args[0].(string), args[1:]) - return } func (*simpleLogger) Debugf(format string, args ...interface{}) { log.Debugf(format, args...) - return } func (*simpleLogger) Info(args ...interface{}) { log.Info(args[0].(string), args[1:]) - return } func (*simpleLogger) Infof(format string, args ...interface{}) { log.Infof(format, args...) - return } func (*simpleLogger) Warning(args ...interface{}) error { @@ -64,10 +65,8 @@ func (*simpleLogger) Fatale(err error) error { func (*simpleLogger) SetLevel(level log.LogLevel) { log.SetLevel(level) - return } func (*simpleLogger) SetPrintStackTrace(printStackTraceFlag bool) { log.SetPrintStackTrace(printStackTraceFlag) - return } diff --git a/go/base/utils.go b/go/base/utils.go index ed1451402..c0e32933a 100644 --- a/go/base/utils.go +++ b/go/base/utils.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -25,9 +25,7 @@ func PrettifyDurationOutput(d time.Duration) string { if d < time.Second { return "0s" } - result := fmt.Sprintf("%s", d) - result = prettifyDurationRegexp.ReplaceAllString(result, "") - return result + return prettifyDurationRegexp.ReplaceAllString(d.String(), "") } func FileExists(fileName string) bool { diff --git a/go/binlog/gomysql_reader.go b/go/binlog/gomysql_reader.go index bc80cb5b0..142c641ef 100644 --- a/go/binlog/gomysql_reader.go +++ b/go/binlog/gomysql_reader.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -64,7 +64,10 @@ func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordin this.currentCoordinates = coordinates this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", this.currentCoordinates) // Start sync with specified binlog file and position - this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{this.currentCoordinates.LogFile, uint32(this.currentCoordinates.LogPos)}) + this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{ + Name: this.currentCoordinates.LogFile, + Pos: uint32(this.currentCoordinates.LogPos), + }) return err } diff --git a/go/logic/hooks.go b/go/logic/hooks.go index 2275ede51..a984c67dc 100644 --- a/go/logic/hooks.go +++ b/go/logic/hooks.go @@ -1,6 +1,5 @@ /* -/* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -72,9 +71,7 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [ env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken)) env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop)) - for _, variable := range extraVariables { - env = append(env, variable) - } + env = append(env, extraVariables...) return env } diff --git a/go/logic/inspect.go b/go/logic/inspect.go index db2e9c294..4a4ce37e2 100644 --- a/go/logic/inspect.go +++ b/go/logic/inspect.go @@ -805,5 +805,4 @@ func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err er func (this *Inspector) Teardown() { this.db.Close() this.informationSchemaDb.Close() - return } diff --git a/go/logic/migrator.go b/go/logic/migrator.go index ceb6e6704..c5cb24432 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -176,16 +176,6 @@ func (this *Migrator) retryOperationWithExponentialBackoff(operation func() erro return err } -// executeAndThrottleOnError executes a given function. If it errors, it -// throttles. -func (this *Migrator) executeAndThrottleOnError(operation func() error) (err error) { - if err := operation(); err != nil { - this.throttler.throttle(nil) - return err - } - return nil -} - // consumeRowCopyComplete blocks on the rowCopyComplete channel once, and then // consumes and drops any further incoming events that may be left hanging. func (this *Migrator) consumeRowCopyComplete() { @@ -823,57 +813,57 @@ func (this *Migrator) initiateStatus() error { // migration, and as response to the "status" interactive command. func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) { w := io.MultiWriter(writers...) - fmt.Fprintln(w, fmt.Sprintf("# Migrating %s.%s; Ghost table is %s.%s", + fmt.Fprintf(w, "# Migrating %s.%s; Ghost table is %s.%s\n", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName), sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.GetGhostTableName()), - )) - fmt.Fprintln(w, fmt.Sprintf("# Migrating %+v; inspecting %+v; executing on %+v", + ) + fmt.Fprintf(w, "# Migrating %+v; inspecting %+v; executing on %+v\n", *this.applier.connectionConfig.ImpliedKey, *this.inspector.connectionConfig.ImpliedKey, this.migrationContext.Hostname, - )) - fmt.Fprintln(w, fmt.Sprintf("# Migration started at %+v", + ) + fmt.Fprintf(w, "# Migration started at %+v\n", this.migrationContext.StartTime.Format(time.RubyDate), - )) + ) maxLoad := this.migrationContext.GetMaxLoad() criticalLoad := this.migrationContext.GetCriticalLoad() - fmt.Fprintln(w, fmt.Sprintf("# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f", + fmt.Fprintf(w, "# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f\n", atomic.LoadInt64(&this.migrationContext.ChunkSize), atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold), atomic.LoadInt64(&this.migrationContext.DMLBatchSize), maxLoad.String(), criticalLoad.String(), this.migrationContext.GetNiceRatio(), - )) + ) if this.migrationContext.ThrottleFlagFile != "" { setIndicator := "" if base.FileExists(this.migrationContext.ThrottleFlagFile) { setIndicator = "[set]" } - fmt.Fprintln(w, fmt.Sprintf("# throttle-flag-file: %+v %+v", + fmt.Fprintf(w, "# throttle-flag-file: %+v %+v\n", this.migrationContext.ThrottleFlagFile, setIndicator, - )) + ) } if this.migrationContext.ThrottleAdditionalFlagFile != "" { setIndicator := "" if base.FileExists(this.migrationContext.ThrottleAdditionalFlagFile) { setIndicator = "[set]" } - fmt.Fprintln(w, fmt.Sprintf("# throttle-additional-flag-file: %+v %+v", + fmt.Fprintf(w, "# throttle-additional-flag-file: %+v %+v\n", this.migrationContext.ThrottleAdditionalFlagFile, setIndicator, - )) + ) } if throttleQuery := this.migrationContext.GetThrottleQuery(); throttleQuery != "" { - fmt.Fprintln(w, fmt.Sprintf("# throttle-query: %+v", + fmt.Fprintf(w, "# throttle-query: %+v\n", throttleQuery, - )) + ) } if throttleControlReplicaKeys := this.migrationContext.GetThrottleControlReplicaKeys(); throttleControlReplicaKeys.Len() > 0 { - fmt.Fprintln(w, fmt.Sprintf("# throttle-control-replicas count: %+v", + fmt.Fprintf(w, "# throttle-control-replicas count: %+v\n", throttleControlReplicaKeys.Len(), - )) + ) } if this.migrationContext.PostponeCutOverFlagFile != "" { @@ -881,20 +871,20 @@ func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) { if base.FileExists(this.migrationContext.PostponeCutOverFlagFile) { setIndicator = "[set]" } - fmt.Fprintln(w, fmt.Sprintf("# postpone-cut-over-flag-file: %+v %+v", + fmt.Fprintf(w, "# postpone-cut-over-flag-file: %+v %+v\n", this.migrationContext.PostponeCutOverFlagFile, setIndicator, - )) + ) } if this.migrationContext.PanicFlagFile != "" { - fmt.Fprintln(w, fmt.Sprintf("# panic-flag-file: %+v", + fmt.Fprintf(w, "# panic-flag-file: %+v\n", this.migrationContext.PanicFlagFile, - )) + ) } - fmt.Fprintln(w, fmt.Sprintf("# Serving on unix socket: %+v", + fmt.Fprintf(w, "# Serving on unix socket: %+v\n", this.migrationContext.ServeSocketFile, - )) + ) if this.migrationContext.ServeTCPPort != 0 { - fmt.Fprintln(w, fmt.Sprintf("# Serving on TCP port: %+v", this.migrationContext.ServeTCPPort)) + fmt.Fprintf(w, "# Serving on TCP port: %+v\n", this.migrationContext.ServeTCPPort) } } @@ -1195,7 +1185,6 @@ func (this *Migrator) iterateChunks() error { // Enqueue copy operation; to be executed by executeWriteFuncs() this.copyRowsQueue <- copyRowsFunc } - return nil } func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error { @@ -1301,7 +1290,6 @@ func (this *Migrator) executeWriteFuncs() error { } } } - return nil } // finalCleanup takes actions at very end of migration, dropping tables etc. diff --git a/go/logic/streamer.go b/go/logic/streamer.go index a07240caf..3fd4e8a8e 100644 --- a/go/logic/streamer.go +++ b/go/logic/streamer.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -220,5 +220,4 @@ func (this *EventsStreamer) Close() (err error) { func (this *EventsStreamer) Teardown() { this.db.Close() - return } diff --git a/go/logic/throttler.go b/go/logic/throttler.go index abe8669e1..9c120b3e5 100644 --- a/go/logic/throttler.go +++ b/go/logic/throttler.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ diff --git a/go/mysql/instance_key.go b/go/mysql/instance_key.go index eb108d84a..679bdc9f0 100644 --- a/go/mysql/instance_key.go +++ b/go/mysql/instance_key.go @@ -1,5 +1,6 @@ /* Copyright 2015 Shlomi Noach, courtesy Booking.com + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -12,15 +13,16 @@ import ( "strings" ) -const ( - DefaultInstancePort = 3306 -) +const DefaultInstancePort = 3306 var ( ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$") ipv4HostRegexp = regexp.MustCompile("^([^:]+)$") - ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308 - ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") // e.g. 2001:db8:1f70::999:de8:7648:6e8 + + // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308 + ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") //nolint:gosimple + // e.g. 2001:db8:1f70::999:de8:7648:6e8 + ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") ) // InstanceKey is an instance indicator, identified by hostname and port diff --git a/go/sql/builder.go b/go/sql/builder.go index 7fe366c6f..15199ffca 100644 --- a/go/sql/builder.go +++ b/go/sql/builder.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -33,7 +33,7 @@ func EscapeName(name string) string { } func buildColumnsPreparedValues(columns *ColumnList) []string { - values := make([]string, columns.Len(), columns.Len()) + values := make([]string, columns.Len()) for i, column := range columns.Columns() { var token string if column.timezoneConversion != nil { @@ -51,7 +51,7 @@ func buildColumnsPreparedValues(columns *ColumnList) []string { } func buildPreparedValues(length int) []string { - values := make([]string, length, length) + values := make([]string, length) for i := 0; i < length; i++ { values[i] = "?" } @@ -59,7 +59,7 @@ func buildPreparedValues(length int) []string { } func duplicateNames(names []string) []string { - duplicate := make([]string, len(names), len(names)) + duplicate := make([]string, len(names)) copy(duplicate, names) return duplicate } @@ -261,8 +261,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string explodedArgs = append(explodedArgs, rangeExplodedArgs...) uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names()) - uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) - uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) + uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames)) + uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames)) for i, column := range uniqueKeyColumns.Columns() { uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i]) if column.Type == EnumColumnType { @@ -316,8 +316,8 @@ func BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, tableName str explodedArgs = append(explodedArgs, rangeExplodedArgs...) uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names()) - uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) - uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) + uniqueKeyColumnAscending := make([]string, len(uniqueKeyColumnNames)) + uniqueKeyColumnDescending := make([]string, len(uniqueKeyColumnNames)) for i, column := range uniqueKeyColumns.Columns() { uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i]) if column.Type == EnumColumnType { @@ -368,7 +368,7 @@ func buildUniqueKeyMinMaxValuesPreparedQuery(databaseName, tableName string, uni tableName = EscapeName(tableName) uniqueKeyColumnNames := duplicateNames(uniqueKeyColumns.Names()) - uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames), len(uniqueKeyColumnNames)) + uniqueKeyColumnOrder := make([]string, len(uniqueKeyColumnNames)) for i, column := range uniqueKeyColumns.Columns() { uniqueKeyColumnNames[i] = EscapeName(uniqueKeyColumnNames[i]) if column.Type == EnumColumnType { From 6598b345ca7ce7ca57b80306f8cb38ff85dc2a5f Mon Sep 17 00:00:00 2001 From: Tim Vaillancourt Date: Thu, 23 Jun 2022 03:37:18 +0200 Subject: [PATCH 11/24] Fix `integer divide by zero` panic in migrator --- go/logic/migrator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/go/logic/migrator.go b/go/logic/migrator.go index c5cb24432..b5c405a98 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -1006,7 +1006,8 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) { w := io.MultiWriter(writers...) fmt.Fprintln(w, status) - if elapsedSeconds%this.migrationContext.HooksStatusIntervalSec == 0 { + hooksStatusIntervalSec := this.migrationContext.HooksStatusIntervalSec + if hooksStatusIntervalSec > 0 && elapsedSeconds%hooksStatusIntervalSec == 0 { this.hooksExecutor.onStatus(status) } } From e0d31bad49e751543d0276445eda1a7b710db357 Mon Sep 17 00:00:00 2001 From: shaohk Date: Fri, 24 Jun 2022 11:38:05 +0800 Subject: [PATCH 12/24] fix(lost data in mysql two-phase commit): lost data in mysql two-phase commit --- go/logic/applier.go | 22 +++++++++++++++++++++- go/logic/migrator.go | 3 +++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/go/logic/applier.go b/go/logic/applier.go index 67b519eb7..3ef09125b 100644 --- a/go/logic/applier.go +++ b/go/logic/applier.go @@ -420,8 +420,28 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error { return err } -// ReadMigrationRangeValues reads min/max values that will be used for rowcopy +// ReadMigrationRangeValues reads min/max values that will be used for rowcopy. +// Before read min/max, write a changelog state into the ghc table to avoid lost data in mysql two-phase commit. +/* +Detail description of the lost data in mysql two-phase commit issue by @Fanduzi: + When using semi-sync and setting rpl_semi_sync_master_wait_point=AFTER_SYNC, + if an INSERT statement is being committed but blocks due to an unmet ack count, + the data inserted by the transaction is not visible to ReadMigrationRangeValues, + so the copy of the existing data in the table does not include the new row inserted by the transaction. + However, the binlog event for the transaction is already written to the binlog, + so the addDMLEventsListener only captures the binlog event after the transaction, + and thus the transaction's binlog event is not captured, resulting in data loss. + + If write a changelog into ghc table before ReadMigrationRangeValues, and the transaction commit blocks + because the ack is not met, then the changelog will not be able to write, so the ReadMigrationRangeValues + will not be run. When the changelog writes successfully, the ReadMigrationRangeValues will read the + newly inserted data, thus Avoiding data loss due to the above problem. +*/ func (this *Applier) ReadMigrationRangeValues() error { + if _, err := this.WriteChangelogState(string(ReadMigrationRangeValues)); err != nil { + return err + } + if err := this.ReadMigrationMinValues(this.migrationContext.UniqueKey); err != nil { return err } diff --git a/go/logic/migrator.go b/go/logic/migrator.go index b5c405a98..6956149a6 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -26,6 +26,7 @@ type ChangelogState string const ( GhostTableMigrated ChangelogState = "GhostTableMigrated" AllEventsUpToLockProcessed = "AllEventsUpToLockProcessed" + ReadMigrationRangeValues = "ReadMigrationRangeValues" ) func ReadChangelogState(s string) ChangelogState { @@ -234,6 +235,8 @@ func (this *Migrator) onChangelogStateEvent(dmlEvent *binlog.BinlogDMLEvent) (er this.applyEventsQueue <- newApplyEventStructByFunc(&applyEventFunc) }() } + case ReadMigrationRangeValues: + // no-op event default: { return fmt.Errorf("Unknown changelog state: %+v", changelogState) From fcda553ccba74b018a6fcc31374c3fbf4a13cd33 Mon Sep 17 00:00:00 2001 From: dm-2 <45519614+dm-2@users.noreply.github.com> Date: Wed, 6 Jul 2022 16:23:15 +0100 Subject: [PATCH 13/24] Run CodeQL analysis on PRs --- .github/workflows/codeql.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .github/workflows/codeql.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000..abce58a3b --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,25 @@ +name: "CodeQL analysis" + +on: + push: + pull_request: + schedule: + - cron: '0 0 * * 0' + +jobs: + codeql: + + strategy: + fail-fast: false + + runs-on: ubuntu-latest # windows-latest and ubuntu-latest are supported. macos-latest is not supported at this time. + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 From 9e0808afbed2c169eab000d590ba35ea95065443 Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Thu, 7 Jul 2022 00:22:21 +0300 Subject: [PATCH 14/24] compound pk tests (#387) * compound pk tests * more details in failure diff * more elaborate test; the pk-ts one consistently fails * Fix merge conflict Co-authored-by: Shlomi Noach Co-authored-by: Tim Vaillancourt Co-authored-by: Tim Vaillancourt --- localtests/compound-pk-ts/create.sql | 40 ++++++++++++++++++++++++++++ localtests/compound-pk/create.sql | 40 ++++++++++++++++++++++++++++ localtests/test.sh | 2 ++ 3 files changed, 82 insertions(+) create mode 100644 localtests/compound-pk-ts/create.sql create mode 100644 localtests/compound-pk/create.sql diff --git a/localtests/compound-pk-ts/create.sql b/localtests/compound-pk-ts/create.sql new file mode 100644 index 000000000..1bab87a42 --- /dev/null +++ b/localtests/compound-pk-ts/create.sql @@ -0,0 +1,40 @@ +drop table if exists gh_ost_test; +create table gh_ost_test ( + id int auto_increment, + i int not null, + ts0 timestamp(6) default current_timestamp(6), + updated tinyint unsigned default 0, + primary key(id, ts0) +) auto_increment=1; + +drop event if exists gh_ost_test; +delimiter ;; +create event gh_ost_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into gh_ost_test values (null, 11, sysdate(6), 0); + update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1; + + insert into gh_ost_test values (null, 13, sysdate(6), 0); + update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1; + + insert into gh_ost_test values (null, 17, sysdate(6), 0); + update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1; + + insert into gh_ost_test values (null, 19, sysdate(6), 0); + update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1; + + insert into gh_ost_test values (null, 23, sysdate(6), 0); + update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1; + + insert into gh_ost_test values (null, 29, sysdate(6), 0); + insert into gh_ost_test values (null, 31, sysdate(6), 0); + insert into gh_ost_test values (null, 37, sysdate(6), 0); + insert into gh_ost_test values (null, 41, sysdate(6), 0); + delete from gh_ost_test where i = 31 order by id desc limit 1; +end ;; diff --git a/localtests/compound-pk/create.sql b/localtests/compound-pk/create.sql new file mode 100644 index 000000000..cf838d0a5 --- /dev/null +++ b/localtests/compound-pk/create.sql @@ -0,0 +1,40 @@ +drop table if exists gh_ost_test; +create table gh_ost_test ( + id int auto_increment, + i int not null, + v varchar(128), + updated tinyint unsigned default 0, + primary key(id, v) +) auto_increment=1; + +drop event if exists gh_ost_test; +delimiter ;; +create event gh_ost_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into gh_ost_test values (null, 11, 'eleven', 0); + update gh_ost_test set updated = 1 where i = 11 order by id desc limit 1; + + insert into gh_ost_test values (null, 13, 'thirteen', 0); + update gh_ost_test set updated = 1 where i = 13 order by id desc limit 1; + + insert into gh_ost_test values (null, 17, 'seventeen', 0); + update gh_ost_test set updated = 1 where i = 17 order by id desc limit 1; + + insert into gh_ost_test values (null, 19, 'nineteen', 0); + update gh_ost_test set updated = 1 where i = 19 order by id desc limit 1; + + insert into gh_ost_test values (null, 23, 'twenty three', 0); + update gh_ost_test set updated = 1 where i = 23 order by id desc limit 1; + + insert into gh_ost_test values (null, 29, 'twenty nine', 0); + insert into gh_ost_test values (null, 31, 'thirty one', 0); + insert into gh_ost_test values (null, 37, 'thirty seven', 0); + insert into gh_ost_test values (null, 41, 'forty one', 0); + delete from gh_ost_test where i = 31 order by id desc limit 1; +end ;; diff --git a/localtests/test.sh b/localtests/test.sh index 5e96e28d6..5fe004d00 100755 --- a/localtests/test.sh +++ b/localtests/test.sh @@ -224,6 +224,8 @@ test_single() { ghost_checksum=$(cat $ghost_content_output_file | md5sum) if [ "$orig_checksum" != "$ghost_checksum" ] ; then + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from gh_ost_test" -ss > $orig_content_output_file + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho" -ss > $ghost_content_output_file echo "ERROR $test_name: checksum mismatch" echo "---" diff $orig_content_output_file $ghost_content_output_file From 68b4085343d0a7edfe2f70a4a42aca3fb0905c37 Mon Sep 17 00:00:00 2001 From: Tim Vaillancourt Date: Wed, 6 Jul 2022 23:32:15 +0200 Subject: [PATCH 15/24] Ensure mysql rows responses are closed (#1132) Co-authored-by: dm-2 <45519614+dm-2@users.noreply.github.com> --- .golangci.yml | 2 ++ go/logic/applier.go | 15 +++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 4a487bd97..af04b55a1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,4 +9,6 @@ linters: enable: - gosimple - govet + - rowserrcheck + - sqlclosecheck - unused diff --git a/go/logic/applier.go b/go/logic/applier.go index 3ef09125b..21addf363 100644 --- a/go/logic/applier.go +++ b/go/logic/applier.go @@ -381,10 +381,13 @@ func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error { if err != nil { return err } + rows, err := this.db.Query(query) if err != nil { return err } + defer rows.Close() + for rows.Next() { this.migrationContext.MigrationRangeMinValues = sql.NewColumnValues(uniqueKey.Len()) if err = rows.Scan(this.migrationContext.MigrationRangeMinValues.ValuesPointers...); err != nil { @@ -393,8 +396,7 @@ func (this *Applier) ReadMigrationMinValues(uniqueKey *sql.UniqueKey) error { } this.migrationContext.Log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues) - err = rows.Err() - return err + return rows.Err() } // ReadMigrationMaxValues returns the maximum values to be iterated on rowcopy @@ -404,10 +406,13 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error { if err != nil { return err } + rows, err := this.db.Query(query) if err != nil { return err } + defer rows.Close() + for rows.Next() { this.migrationContext.MigrationRangeMaxValues = sql.NewColumnValues(uniqueKey.Len()) if err = rows.Scan(this.migrationContext.MigrationRangeMaxValues.ValuesPointers...); err != nil { @@ -416,8 +421,7 @@ func (this *Applier) ReadMigrationMaxValues(uniqueKey *sql.UniqueKey) error { } this.migrationContext.Log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues) - err = rows.Err() - return err + return rows.Err() } // ReadMigrationRangeValues reads min/max values that will be used for rowcopy. @@ -478,10 +482,13 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo if err != nil { return hasFurtherRange, err } + rows, err := this.db.Query(query, explodedArgs...) if err != nil { return hasFurtherRange, err } + defer rows.Close() + iterationRangeMaxValues := sql.NewColumnValues(this.migrationContext.UniqueKey.Len()) for rows.Next() { if err = rows.Scan(iterationRangeMaxValues.ValuesPointers...); err != nil { From b4566dec4045f22a819d29c43c064cacb27d17f1 Mon Sep 17 00:00:00 2001 From: Tim Vaillancourt Date: Wed, 6 Jul 2022 23:45:26 +0200 Subject: [PATCH 16/24] Use `switch` statements for readability, simplify `.NewGoMySQLReader()` (#1135) * Use `switch` statements for readability * Simplify initBinlogReader() --- go/binlog/gomysql_reader.go | 47 +++++++++++++++++-------------------- go/logic/migrator.go | 20 ++++++++-------- go/logic/streamer.go | 5 +--- 3 files changed, 32 insertions(+), 40 deletions(-) diff --git a/go/binlog/gomysql_reader.go b/go/binlog/gomysql_reader.go index 142c641ef..90ad55a4c 100644 --- a/go/binlog/gomysql_reader.go +++ b/go/binlog/gomysql_reader.go @@ -28,31 +28,24 @@ type GoMySQLReader struct { LastAppliedRowsEventHint mysql.BinlogCoordinates } -func NewGoMySQLReader(migrationContext *base.MigrationContext) (binlogReader *GoMySQLReader, err error) { - binlogReader = &GoMySQLReader{ +func NewGoMySQLReader(migrationContext *base.MigrationContext) *GoMySQLReader { + connectionConfig := migrationContext.InspectorConnectionConfig + return &GoMySQLReader{ migrationContext: migrationContext, - connectionConfig: migrationContext.InspectorConnectionConfig, + connectionConfig: connectionConfig, currentCoordinates: mysql.BinlogCoordinates{}, currentCoordinatesMutex: &sync.Mutex{}, - binlogSyncer: nil, - binlogStreamer: nil, + binlogSyncer: replication.NewBinlogSyncer(replication.BinlogSyncerConfig{ + ServerID: uint32(migrationContext.ReplicaServerId), + Flavor: gomysql.MySQLFlavor, + Host: connectionConfig.Key.Hostname, + Port: uint16(connectionConfig.Key.Port), + User: connectionConfig.User, + Password: connectionConfig.Password, + TLSConfig: connectionConfig.TLSConfig(), + UseDecimal: true, + }), } - - serverId := uint32(migrationContext.ReplicaServerId) - - binlogSyncerConfig := replication.BinlogSyncerConfig{ - ServerID: serverId, - Flavor: "mysql", - Host: binlogReader.connectionConfig.Key.Hostname, - Port: uint16(binlogReader.connectionConfig.Key.Port), - User: binlogReader.connectionConfig.User, - Password: binlogReader.connectionConfig.Password, - TLSConfig: binlogReader.connectionConfig.TLSConfig(), - UseDecimal: true, - } - binlogReader.binlogSyncer = replication.NewBinlogSyncer(binlogSyncerConfig) - - return binlogReader, err } // ConnectBinlogStreamer @@ -145,15 +138,17 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha defer this.currentCoordinatesMutex.Unlock() this.currentCoordinates.LogPos = int64(ev.Header.LogPos) }() - if rotateEvent, ok := ev.Event.(*replication.RotateEvent); ok { + + switch binlogEvent := ev.Event.(type) { + case *replication.RotateEvent: func() { this.currentCoordinatesMutex.Lock() defer this.currentCoordinatesMutex.Unlock() - this.currentCoordinates.LogFile = string(rotateEvent.NextLogName) + this.currentCoordinates.LogFile = string(binlogEvent.NextLogName) }() - this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), rotateEvent.NextLogName) - } else if rowsEvent, ok := ev.Event.(*replication.RowsEvent); ok { - if err := this.handleRowsEvent(ev, rowsEvent, entriesChannel); err != nil { + this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", this.currentCoordinates.LogFile, int64(ev.Header.LogPos), binlogEvent.NextLogName) + case *replication.RowsEvent: + if err := this.handleRowsEvent(ev, binlogEvent, entriesChannel); err != nil { return err } } diff --git a/go/logic/migrator.go b/go/logic/migrator.go index 6956149a6..747750cea 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -537,19 +537,19 @@ func (this *Migrator) cutOver() (err error) { } } } - if this.migrationContext.CutOverType == base.CutOverAtomic { + + switch this.migrationContext.CutOverType { + case base.CutOverAtomic: // Atomic solution: we use low timeout and multiple attempts. But for // each failed attempt, we throttle until replication lag is back to normal - err := this.atomicCutOver() - this.handleCutOverResult(err) - return err - } - if this.migrationContext.CutOverType == base.CutOverTwoStep { - err := this.cutOverTwoStep() - this.handleCutOverResult(err) - return err + err = this.atomicCutOver() + case base.CutOverTwoStep: + err = this.cutOverTwoStep() + default: + return this.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType) } - return this.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType) + this.handleCutOverResult(err) + return err } // Inject the "AllEventsUpToLockProcessed" state hint, wait for it to appear in the binary logs, diff --git a/go/logic/streamer.go b/go/logic/streamer.go index 3fd4e8a8e..8c7e2bfdb 100644 --- a/go/logic/streamer.go +++ b/go/logic/streamer.go @@ -123,10 +123,7 @@ func (this *EventsStreamer) InitDBConnections() (err error) { // initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica func (this *EventsStreamer) initBinlogReader(binlogCoordinates *mysql.BinlogCoordinates) error { - goMySQLReader, err := binlog.NewGoMySQLReader(this.migrationContext) - if err != nil { - return err - } + goMySQLReader := binlog.NewGoMySQLReader(this.migrationContext) if err := goMySQLReader.ConnectBinlogStreamer(*binlogCoordinates); err != nil { return err } From 614b3795285fbe30ef2c55a0fca26e7375b280fe Mon Sep 17 00:00:00 2001 From: Tim Vaillancourt Date: Wed, 6 Jul 2022 23:56:07 +0200 Subject: [PATCH 17/24] Add context/timeout to HTTP throttle check (#1131) * Add context/timeout to HTTP throttle check * Dont run `.GetThrottleHTTPInterval()` on every loop * Update help message * Var rename * 2022 * Add timeout flag * Add unix/tcp server commands, use ParseInt() for string->int64 * Var rename * Re-check http timeout on every loop iteration * Remove stale comment * Make throttle interval idempotent * var rename * Usage grammar * Make http timeout idempotent too * Parse time.Duration once * Move timeout to NewThrottler * Help update * Set User-Agent header * Re-add newline Co-authored-by: dm-2 <45519614+dm-2@users.noreply.github.com> --- .golangci.yml | 2 ++ go/base/context.go | 2 ++ go/cmd/gh-ost/main.go | 6 ++++-- go/logic/migrator.go | 6 ++++-- go/logic/throttler.go | 24 +++++++++++++++++++++--- 5 files changed, 33 insertions(+), 7 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index af04b55a1..a71dcdd31 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,6 +9,8 @@ linters: enable: - gosimple - govet + - noctx - rowserrcheck - sqlclosecheck - unused + diff --git a/go/base/context.go b/go/base/context.go index 9e254bb72..5c2cd37d2 100644 --- a/go/base/context.go +++ b/go/base/context.go @@ -185,7 +185,9 @@ type MigrationContext struct { CurrentLag int64 currentProgress uint64 etaNanoseonds int64 + ThrottleHTTPIntervalMillis int64 ThrottleHTTPStatusCode int64 + ThrottleHTTPTimeoutMillis int64 controlReplicasLagResult mysql.ReplicationLagResult TotalRowsCopied int64 TotalDMLEventsApplied int64 diff --git a/go/cmd/gh-ost/main.go b/go/cmd/gh-ost/main.go index 2afe9abc4..c1279974c 100644 --- a/go/cmd/gh-ost/main.go +++ b/go/cmd/gh-ost/main.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2022 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -110,6 +110,8 @@ func main() { throttleControlReplicas := flag.String("throttle-control-replicas", "", "List of replicas on which to check for lag; comma delimited. Example: myhost1.com:3306,myhost2.com,myhost3.com:3307") throttleQuery := flag.String("throttle-query", "", "when given, issued (every second) to check if operation should throttle. Expecting to return zero for no-throttle, >0 for throttle. Query is issued on the migrated server. Make sure this query is lightweight") throttleHTTP := flag.String("throttle-http", "", "when given, gh-ost checks given URL via HEAD request; any response code other than 200 (OK) causes throttling; make sure it has low latency response") + flag.Int64Var(&migrationContext.ThrottleHTTPIntervalMillis, "throttle-http-interval-millis", 100, "Number of milliseconds to wait before triggering another HTTP throttle check") + flag.Int64Var(&migrationContext.ThrottleHTTPTimeoutMillis, "throttle-http-timeout-millis", 1000, "Number of milliseconds to use as an HTTP throttle check timeout") ignoreHTTPErrors := flag.Bool("ignore-http-errors", false, "ignore HTTP connection errors during throttle check") heartbeatIntervalMillis := flag.Int64("heartbeat-interval-millis", 100, "how frequently would gh-ost inject a heartbeat value") flag.StringVar(&migrationContext.ThrottleFlagFile, "throttle-flag-file", "", "operation pauses when this file exists; hint: use a file that is specific to the table being altered") @@ -297,7 +299,7 @@ func main() { log.Infof("starting gh-ost %+v", AppVersion) acceptSignals(migrationContext) - migrator := logic.NewMigrator(migrationContext) + migrator := logic.NewMigrator(migrationContext, AppVersion) err := migrator.Migrate() if err != nil { migrator.ExecOnFailureHook() diff --git a/go/logic/migrator.go b/go/logic/migrator.go index 747750cea..e1fe7d147 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -62,6 +62,7 @@ const ( // Migrator is the main schema migration flow manager. type Migrator struct { + appVersion string parser *sql.AlterTableParser inspector *Inspector applier *Applier @@ -87,8 +88,9 @@ type Migrator struct { finishedMigrating int64 } -func NewMigrator(context *base.MigrationContext) *Migrator { +func NewMigrator(context *base.MigrationContext, appVersion string) *Migrator { migrator := &Migrator{ + appVersion: appVersion, migrationContext: context, parser: sql.NewAlterTableParser(), ghostTableMigrated: make(chan bool), @@ -1068,7 +1070,7 @@ func (this *Migrator) addDMLEventsListener() error { // initiateThrottler kicks in the throttling collection and the throttling checks. func (this *Migrator) initiateThrottler() error { - this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector) + this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector, this.appVersion) go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected) this.migrationContext.Log.Infof("Waiting for first throttle metrics to be collected") diff --git a/go/logic/throttler.go b/go/logic/throttler.go index 9c120b3e5..8f3d0db10 100644 --- a/go/logic/throttler.go +++ b/go/logic/throttler.go @@ -6,6 +6,7 @@ package logic import ( + "context" "fmt" "net/http" "strings" @@ -42,16 +43,22 @@ const frenoMagicHint = "freno" // Throttler collects metrics related to throttling and makes informed decision // whether throttling should take place. type Throttler struct { + appVersion string migrationContext *base.MigrationContext applier *Applier + httpClient *http.Client + httpClientTimeout time.Duration inspector *Inspector finishedMigrating int64 } -func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector) *Throttler { +func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, inspector *Inspector, appVersion string) *Throttler { return &Throttler{ + appVersion: appVersion, migrationContext: migrationContext, applier: applier, + httpClient: &http.Client{}, + httpClientTimeout: time.Duration(migrationContext.ThrottleHTTPTimeoutMillis) * time.Millisecond, inspector: inspector, finishedMigrating: 0, } @@ -285,7 +292,17 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<- if url == "" { return true, nil } - resp, err := http.Head(url) + + ctx, cancel := context.WithTimeout(context.Background(), this.httpClientTimeout) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) + if err != nil { + return false, err + } + req.Header.Set("User-Agent", fmt.Sprintf("gh-ost/%s", this.appVersion)) + + resp, err := this.httpClient.Do(req) if err != nil { return false, err } @@ -303,7 +320,8 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<- firstThrottlingCollected <- true - ticker := time.Tick(100 * time.Millisecond) + collectInterval := time.Duration(this.migrationContext.ThrottleHTTPIntervalMillis) * time.Millisecond + ticker := time.Tick(collectInterval) for range ticker { if atomic.LoadInt64(&this.finishedMigrating) > 0 { return From 3e72f1bd65e21d9dd09598c5f9c7a0a00c2dd8aa Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Wed, 6 Jul 2022 18:23:23 -0400 Subject: [PATCH 18/24] Cancel any row count queries before attempting to cut over (#846) * Cancel any row count queries before attempting to cut over Closes #830. Switches from using `QueryRow` to `QueryRowContext`, and stores a context.CancelFunc in the migration context, which is called to halt any running row count query before beginning the cut over. * Make it threadsafe * Kill the count query on the database side as well * Explicitly grab a connection to run the count, store its connection id * When the query context is canceled, run a `KILL QUERY ?` on that connection id * Rewrite these to use the threadsafe functions, stop exporting the cancel func * Update logger * Update logger Co-authored-by: Tim Vaillancourt Co-authored-by: Tim Vaillancourt Co-authored-by: dm-2 <45519614+dm-2@users.noreply.github.com> --- go/base/context.go | 32 ++++++++++++++++++++++++++++++++ go/logic/inspect.go | 37 ++++++++++++++++++++++++++++++++++--- go/logic/migrator.go | 18 ++++++++++++++---- 3 files changed, 80 insertions(+), 7 deletions(-) diff --git a/go/base/context.go b/go/base/context.go index 5c2cd37d2..f7bab5db3 100644 --- a/go/base/context.go +++ b/go/base/context.go @@ -83,6 +83,8 @@ type MigrationContext struct { AlterStatement string AlterStatementOptions string // anything following the 'ALTER TABLE [schema.]table' from AlterStatement + countMutex sync.Mutex + countTableRowsCancelFunc func() CountTableRows bool ConcurrentCountTableRows bool AllowedRunningOnMaster bool @@ -429,6 +431,36 @@ func (this *MigrationContext) IsTransactionalTable() bool { return false } +// SetCountTableRowsCancelFunc sets the cancel function for the CountTableRows query context +func (this *MigrationContext) SetCountTableRowsCancelFunc(f func()) { + this.countMutex.Lock() + defer this.countMutex.Unlock() + + this.countTableRowsCancelFunc = f +} + +// IsCountingTableRows returns true if the migration has a table count query running +func (this *MigrationContext) IsCountingTableRows() bool { + this.countMutex.Lock() + defer this.countMutex.Unlock() + + return this.countTableRowsCancelFunc != nil +} + +// CancelTableRowsCount cancels the CountTableRows query context. It is safe to +// call function even when IsCountingTableRows is false. +func (this *MigrationContext) CancelTableRowsCount() { + this.countMutex.Lock() + defer this.countMutex.Unlock() + + if this.countTableRowsCancelFunc == nil { + return + } + + this.countTableRowsCancelFunc() + this.countTableRowsCancelFunc = nil +} + // ElapsedTime returns time since very beginning of the process func (this *MigrationContext) ElapsedTime() time.Duration { return time.Since(this.StartTime) diff --git a/go/logic/inspect.go b/go/logic/inspect.go index 4a4ce37e2..252465765 100644 --- a/go/logic/inspect.go +++ b/go/logic/inspect.go @@ -6,6 +6,7 @@ package logic import ( + "context" gosql "database/sql" "fmt" "reflect" @@ -533,18 +534,48 @@ func (this *Inspector) estimateTableRowsViaExplain() error { return nil } +// Kill kills a query for connectionID. +// - @amason: this should go somewhere _other_ than `logic`, but I couldn't decide +// between `base`, `sql`, or `mysql`. +func Kill(db *gosql.DB, connectionID string) error { + _, err := db.Exec(`KILL QUERY %s`, connectionID) + return err +} + // CountTableRows counts exact number of rows on the original table -func (this *Inspector) CountTableRows() error { +func (this *Inspector) CountTableRows(ctx context.Context) error { atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1) defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0) this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while") + conn, err := this.db.Conn(ctx) + if err != nil { + return err + } + defer conn.Close() + + var connectionID string + if err := conn.QueryRowContext(ctx, `SELECT /* gh-ost */ CONNECTION_ID()`).Scan(&connectionID); err != nil { + return err + } + query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) var rowsEstimate int64 - if err := this.db.QueryRow(query).Scan(&rowsEstimate); err != nil { - return err + if err := conn.QueryRowContext(ctx, query).Scan(&rowsEstimate); err != nil { + switch err { + case context.Canceled, context.DeadlineExceeded: + this.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err()) + return Kill(this.db, connectionID) + default: + return err + } } + + // row count query finished. nil out the cancel func, so the main migration thread + // doesn't bother calling it after row copy is done. + this.migrationContext.SetCountTableRowsCancelFunc(nil) + atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate) this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate diff --git a/go/logic/migrator.go b/go/logic/migrator.go index e1fe7d147..bc2a03fe4 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -6,6 +6,7 @@ package logic import ( + "context" "fmt" "io" "math" @@ -295,8 +296,8 @@ func (this *Migrator) countTableRows() (err error) { return nil } - countRowsFunc := func() error { - if err := this.inspector.CountTableRows(); err != nil { + countRowsFunc := func(ctx context.Context) error { + if err := this.inspector.CountTableRows(ctx); err != nil { return err } if err := this.hooksExecutor.onRowCountComplete(); err != nil { @@ -306,12 +307,17 @@ func (this *Migrator) countTableRows() (err error) { } if this.migrationContext.ConcurrentCountTableRows { + // store a cancel func so we can stop this query before a cut over + rowCountContext, rowCountCancel := context.WithCancel(context.Background()) + this.migrationContext.SetCountTableRowsCancelFunc(rowCountCancel) + this.migrationContext.Log.Infof("As instructed, counting rows in the background; meanwhile I will use an estimated count, and will update it later on") - go countRowsFunc() + go countRowsFunc(rowCountContext) + // and we ignore errors, because this turns to be a background job return nil } - return countRowsFunc() + return countRowsFunc(context.Background()) } func (this *Migrator) createFlagFiles() (err error) { @@ -415,6 +421,10 @@ func (this *Migrator) Migrate() (err error) { } this.printStatus(ForcePrintStatusRule) + if this.migrationContext.IsCountingTableRows() { + this.migrationContext.Log.Info("stopping query for exact row count, because that can accidentally lock out the cut over") + this.migrationContext.CancelTableRowsCount() + } if err := this.hooksExecutor.onBeforeCutOver(); err != nil { return err } From 0adb69781f4c97c53cd56df04c286ed32633ffd3 Mon Sep 17 00:00:00 2001 From: dm-2 <45519614+dm-2@users.noreply.github.com> Date: Thu, 7 Jul 2022 11:10:32 +0100 Subject: [PATCH 19/24] v1.1.5 --- RELEASE_VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE_VERSION b/RELEASE_VERSION index 65087b4f5..e25d8d9f3 100644 --- a/RELEASE_VERSION +++ b/RELEASE_VERSION @@ -1 +1 @@ -1.1.4 +1.1.5 From f29e63bc714cd02b6fa35ea0185f0f4edebf731d Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Wed, 14 Jul 2021 10:48:03 -0400 Subject: [PATCH 20/24] Check RowsAffected when applying DML events to get more accurate statistics (#844) * Check RowsAffected when applying DML events to get more accurate statistics Addresses #600. When applying a DML event, check the RowsAffected on the `Result` struct. Since all DML event queries are point queries, this will only ever be 0 or 1. The applier then takes this value and multiplies by the `rowsDelta` of the event, resulting in a properly-signed, accurate row delta to use in the statistics. If an error occurs here, log it, but do not surface this as an actual error .. simply assume the DML affected a row and move on. It will be inaccurate, but this is already the case. * Fix import * update wording to warning log message Co-authored-by: Tim Vaillancourt --- go/logic/applier.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/go/logic/applier.go b/go/logic/applier.go index 21addf363..aa6dda336 100644 --- a/go/logic/applier.go +++ b/go/logic/applier.go @@ -1,5 +1,5 @@ /* - Copyright 2016 GitHub Inc. + Copyright 2021 GitHub Inc. See https://github.com/github/gh-ost/blob/master/LICENSE */ @@ -8,6 +8,7 @@ package logic import ( gosql "database/sql" "fmt" + "sync" "sync/atomic" "time" @@ -16,8 +17,8 @@ import ( "github.com/github/gh-ost/go/mysql" "github.com/github/gh-ost/go/sql" - "github.com/outbrain/golib/sqlutils" - "sync" + "github.com/openark/golib/log" + "github.com/openark/golib/sqlutils" ) const ( @@ -1070,11 +1071,20 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) if buildResult.err != nil { return rollback(buildResult.err) } - if _, err := tx.Exec(buildResult.query, buildResult.args...); err != nil { + result, err := tx.Exec(buildResult.query, buildResult.args...) + if err != nil { err = fmt.Errorf("%s; query=%s; args=%+v", err.Error(), buildResult.query, buildResult.args) return rollback(err) } - totalDelta += buildResult.rowsDelta + + rowsAffected, err := result.RowsAffected() + if err != nil { + log.Warningf("error getting rows affected from DML event query: %s. i'm going to assume that the DML affected a single row, but this may result in inaccurate statistics", err) + rowsAffected = 1 + } + // each DML is either a single insert (delta +1), update (delta +0) or delete (delta -1). + // multiplying by the rows actually affected (either 0 or 1) will give an accurate row delta for this DML event + totalDelta += buildResult.rowsDelta * rowsAffected } } if err := tx.Commit(); err != nil { From f2c203382bb8da61081e72bbe45d5fab9b8a8f94 Mon Sep 17 00:00:00 2001 From: dm-2 <45519614+dm-2@users.noreply.github.com> Date: Thu, 7 Jul 2022 13:09:36 +0100 Subject: [PATCH 21/24] vendor github.com/openark/golib --- vendor/github.com/openark/golib/LICENSE | 201 +++++++++ vendor/github.com/openark/golib/README.md | 9 + vendor/github.com/openark/golib/go.mod | 3 + vendor/github.com/openark/golib/go.sum | 0 vendor/github.com/openark/golib/log/log.go | 268 +++++++++++ vendor/github.com/openark/golib/math/math.go | 119 +++++ .../openark/golib/sqlutils/dialect.go | 49 ++ .../openark/golib/sqlutils/sqlite_dialect.go | 130 ++++++ .../golib/sqlutils/sqlite_dialect_test.go | 242 ++++++++++ .../openark/golib/sqlutils/sqlutils.go | 427 ++++++++++++++++++ vendor/github.com/openark/golib/tests/spec.go | 76 ++++ vendor/github.com/openark/golib/util/text.go | 103 +++++ .../openark/golib/util/text_test.go | 88 ++++ 13 files changed, 1715 insertions(+) create mode 100644 vendor/github.com/openark/golib/LICENSE create mode 100644 vendor/github.com/openark/golib/README.md create mode 100644 vendor/github.com/openark/golib/go.mod create mode 100644 vendor/github.com/openark/golib/go.sum create mode 100644 vendor/github.com/openark/golib/log/log.go create mode 100644 vendor/github.com/openark/golib/math/math.go create mode 100644 vendor/github.com/openark/golib/sqlutils/dialect.go create mode 100644 vendor/github.com/openark/golib/sqlutils/sqlite_dialect.go create mode 100644 vendor/github.com/openark/golib/sqlutils/sqlite_dialect_test.go create mode 100644 vendor/github.com/openark/golib/sqlutils/sqlutils.go create mode 100644 vendor/github.com/openark/golib/tests/spec.go create mode 100644 vendor/github.com/openark/golib/util/text.go create mode 100644 vendor/github.com/openark/golib/util/text_test.go diff --git a/vendor/github.com/openark/golib/LICENSE b/vendor/github.com/openark/golib/LICENSE new file mode 100644 index 000000000..6875dcac6 --- /dev/null +++ b/vendor/github.com/openark/golib/LICENSE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "{}" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2014 Outbrain Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/openark/golib/README.md b/vendor/github.com/openark/golib/README.md new file mode 100644 index 000000000..ee0907e94 --- /dev/null +++ b/vendor/github.com/openark/golib/README.md @@ -0,0 +1,9 @@ +Common Go libraries + +To import & use: +``` +go get "github.com/openark/golib/math" +go get "github.com/openark/golib/sqlutils" +go get "github.com/openark/golib/tests" +... +``` diff --git a/vendor/github.com/openark/golib/go.mod b/vendor/github.com/openark/golib/go.mod new file mode 100644 index 000000000..d2096e2ff --- /dev/null +++ b/vendor/github.com/openark/golib/go.mod @@ -0,0 +1,3 @@ +module github.com/openark/golib + +go 1.16 diff --git a/vendor/github.com/openark/golib/go.sum b/vendor/github.com/openark/golib/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/openark/golib/log/log.go b/vendor/github.com/openark/golib/log/log.go new file mode 100644 index 000000000..26d33c997 --- /dev/null +++ b/vendor/github.com/openark/golib/log/log.go @@ -0,0 +1,268 @@ +/* + Copyright 2014 Outbrain Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "errors" + "fmt" + "log/syslog" + "os" + "runtime/debug" + "time" +) + +// LogLevel indicates the severity of a log entry +type LogLevel int + +func (this LogLevel) String() string { + switch this { + case FATAL: + return "FATAL" + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + } + return "unknown" +} + +func LogLevelFromString(logLevelName string) (LogLevel, error) { + switch logLevelName { + case "FATAL": + return FATAL, nil + case "CRITICAL": + return CRITICAL, nil + case "ERROR": + return ERROR, nil + case "WARNING": + return WARNING, nil + case "NOTICE": + return NOTICE, nil + case "INFO": + return INFO, nil + case "DEBUG": + return DEBUG, nil + } + return 0, fmt.Errorf("Unknown LogLevel name: %+v", logLevelName) +} + +const ( + FATAL LogLevel = iota + CRITICAL + ERROR + WARNING + NOTICE + INFO + DEBUG +) + +const TimeFormat = "2006-01-02 15:04:05" + +// globalLogLevel indicates the global level filter for all logs (only entries with level equals or higher +// than this value will be logged) +var globalLogLevel LogLevel = DEBUG +var printStackTrace bool = false + +// syslogWriter is optional, and defaults to nil (disabled) +var syslogLevel LogLevel = ERROR +var syslogWriter *syslog.Writer + +// SetPrintStackTrace enables/disables dumping the stack upon error logging +func SetPrintStackTrace(shouldPrintStackTrace bool) { + printStackTrace = shouldPrintStackTrace +} + +// SetLevel sets the global log level. Only entries with level equals or higher than +// this value will be logged +func SetLevel(logLevel LogLevel) { + globalLogLevel = logLevel +} + +// GetLevel returns current global log level +func GetLevel() LogLevel { + return globalLogLevel +} + +// EnableSyslogWriter enables, if possible, writes to syslog. These will execute _in addition_ to normal logging +func EnableSyslogWriter(tag string) (err error) { + syslogWriter, err = syslog.New(syslog.LOG_ERR, tag) + if err != nil { + syslogWriter = nil + } + return err +} + +// SetSyslogLevel sets the minimal syslog level. Only entries with level equals or higher than +// this value will be logged. However, this is also capped by the global log level. That is, +// messages with lower level than global-log-level will be discarded at any case. +func SetSyslogLevel(logLevel LogLevel) { + syslogLevel = logLevel +} + +// logFormattedEntry nicely formats and emits a log entry +func logFormattedEntry(logLevel LogLevel, message string, args ...interface{}) string { + if logLevel > globalLogLevel { + return "" + } + // if TZ env variable is set, update the timestamp timezone + localizedTime := time.Now() + tzLocation := os.Getenv("TZ") + if tzLocation != "" { + location, err := time.LoadLocation(tzLocation) + if err == nil { // if invalid tz location was provided, just leave it as the default + localizedTime = time.Now().In(location) + } + } + + msgArgs := fmt.Sprintf(message, args...) + entryString := fmt.Sprintf("%s %s %s", localizedTime.Format(TimeFormat), logLevel, msgArgs) + fmt.Fprintln(os.Stderr, entryString) + + if syslogWriter != nil { + go func() error { + if logLevel > syslogLevel { + return nil + } + switch logLevel { + case FATAL: + return syslogWriter.Emerg(msgArgs) + case CRITICAL: + return syslogWriter.Crit(msgArgs) + case ERROR: + return syslogWriter.Err(msgArgs) + case WARNING: + return syslogWriter.Warning(msgArgs) + case NOTICE: + return syslogWriter.Notice(msgArgs) + case INFO: + return syslogWriter.Info(msgArgs) + case DEBUG: + return syslogWriter.Debug(msgArgs) + } + return nil + }() + } + return entryString +} + +// logEntry emits a formatted log entry +func logEntry(logLevel LogLevel, message string, args ...interface{}) string { + entryString := message + for _, s := range args { + entryString += fmt.Sprintf(" %s", s) + } + return logFormattedEntry(logLevel, entryString) +} + +// logErrorEntry emits a log entry based on given error object +func logErrorEntry(logLevel LogLevel, err error) error { + if err == nil { + // No error + return nil + } + entryString := fmt.Sprintf("%+v", err) + logEntry(logLevel, entryString) + if printStackTrace { + debug.PrintStack() + } + return err +} + +func Debug(message string, args ...interface{}) string { + return logEntry(DEBUG, message, args...) +} + +func Debugf(message string, args ...interface{}) string { + return logFormattedEntry(DEBUG, message, args...) +} + +func Info(message string, args ...interface{}) string { + return logEntry(INFO, message, args...) +} + +func Infof(message string, args ...interface{}) string { + return logFormattedEntry(INFO, message, args...) +} + +func Notice(message string, args ...interface{}) string { + return logEntry(NOTICE, message, args...) +} + +func Noticef(message string, args ...interface{}) string { + return logFormattedEntry(NOTICE, message, args...) +} + +func Warning(message string, args ...interface{}) error { + return errors.New(logEntry(WARNING, message, args...)) +} + +func Warningf(message string, args ...interface{}) error { + return errors.New(logFormattedEntry(WARNING, message, args...)) +} + +func Error(message string, args ...interface{}) error { + return errors.New(logEntry(ERROR, message, args...)) +} + +func Errorf(message string, args ...interface{}) error { + return errors.New(logFormattedEntry(ERROR, message, args...)) +} + +func Errore(err error) error { + return logErrorEntry(ERROR, err) +} + +func Critical(message string, args ...interface{}) error { + return errors.New(logEntry(CRITICAL, message, args...)) +} + +func Criticalf(message string, args ...interface{}) error { + return errors.New(logFormattedEntry(CRITICAL, message, args...)) +} + +func Criticale(err error) error { + return logErrorEntry(CRITICAL, err) +} + +// Fatal emits a FATAL level entry and exists the program +func Fatal(message string, args ...interface{}) error { + logEntry(FATAL, message, args...) + os.Exit(1) + return errors.New(logEntry(CRITICAL, message, args...)) +} + +// Fatalf emits a FATAL level entry and exists the program +func Fatalf(message string, args ...interface{}) error { + logFormattedEntry(FATAL, message, args...) + os.Exit(1) + return errors.New(logFormattedEntry(CRITICAL, message, args...)) +} + +// Fatale emits a FATAL level entry and exists the program +func Fatale(err error) error { + logErrorEntry(FATAL, err) + os.Exit(1) + return err +} diff --git a/vendor/github.com/openark/golib/math/math.go b/vendor/github.com/openark/golib/math/math.go new file mode 100644 index 000000000..f1f2068e4 --- /dev/null +++ b/vendor/github.com/openark/golib/math/math.go @@ -0,0 +1,119 @@ +/* + Copyright 2014 Shlomi Noach. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package math + +func MinInt(i1, i2 int) int { + if i1 < i2 { + return i1 + } + return i2 +} + +func MaxInt(i1, i2 int) int { + if i1 > i2 { + return i1 + } + return i2 +} + +func MinInt64(i1, i2 int64) int64 { + if i1 < i2 { + return i1 + } + return i2 +} + +func MaxInt64(i1, i2 int64) int64 { + if i1 > i2 { + return i1 + } + return i2 +} + +func MinUInt(i1, i2 uint) uint { + if i1 < i2 { + return i1 + } + return i2 +} + +func MaxUInt(i1, i2 uint) uint { + if i1 > i2 { + return i1 + } + return i2 +} + +func MinUInt64(i1, i2 uint64) uint64 { + if i1 < i2 { + return i1 + } + return i2 +} + +func MaxUInt64(i1, i2 uint64) uint64 { + if i1 > i2 { + return i1 + } + return i2 +} + +func MinString(i1, i2 string) string { + if i1 < i2 { + return i1 + } + return i2 +} + +func MaxString(i1, i2 string) string { + if i1 > i2 { + return i1 + } + return i2 +} + +// TernaryString acts like a "? :" C-style ternary operator for strings +func TernaryString(condition bool, resTrue string, resFalse string) string { + if condition { + return resTrue + } + return resFalse +} + +// TernaryString acts like a "? :" C-style ternary operator for ints +func TernaryInt(condition bool, resTrue int, resFalse int) int { + if condition { + return resTrue + } + return resFalse +} + +// AbsInt is an ABS function for int type +func AbsInt(i int) int { + if i >= 0 { + return i + } + return -i +} + +// AbsInt64 is an ABS function for int64 type +func AbsInt64(i int64) int64 { + if i >= 0 { + return i + } + return -i +} diff --git a/vendor/github.com/openark/golib/sqlutils/dialect.go b/vendor/github.com/openark/golib/sqlutils/dialect.go new file mode 100644 index 000000000..19cb55d25 --- /dev/null +++ b/vendor/github.com/openark/golib/sqlutils/dialect.go @@ -0,0 +1,49 @@ +/* + Copyright 2017 GitHub Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sqlutils + +import ( + "regexp" + "strings" +) + +type regexpMap struct { + r *regexp.Regexp + replacement string +} + +func (this *regexpMap) process(text string) (result string) { + return this.r.ReplaceAllString(text, this.replacement) +} + +func rmap(regexpExpression string, replacement string) regexpMap { + return regexpMap{ + r: regexp.MustCompile(regexpSpaces(regexpExpression)), + replacement: replacement, + } +} + +func regexpSpaces(statement string) string { + return strings.Replace(statement, " ", `[\s]+`, -1) +} + +func applyConversions(statement string, conversions []regexpMap) string { + for _, rmap := range conversions { + statement = rmap.process(statement) + } + return statement +} diff --git a/vendor/github.com/openark/golib/sqlutils/sqlite_dialect.go b/vendor/github.com/openark/golib/sqlutils/sqlite_dialect.go new file mode 100644 index 000000000..5937aa42a --- /dev/null +++ b/vendor/github.com/openark/golib/sqlutils/sqlite_dialect.go @@ -0,0 +1,130 @@ +/* + Copyright 2017 GitHub Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// What's this about? +// This is a brute-force regular-expression based conversion from MySQL syntax to sqlite3 syntax. +// It is NOT meant to be a general purpose solution and is only expected & confirmed to run on +// queries issued by orchestrator. There are known limitations to this design. +// It's not even pretty. +// In fact... +// Well, it gets the job done at this time. Call it debt. + +package sqlutils + +import ( + "regexp" +) + +var sqlite3CreateTableConversions = []regexpMap{ + rmap(`(?i) (character set|charset) [\S]+`, ``), + rmap(`(?i)int unsigned`, `int`), + rmap(`(?i)int[\s]*[(][\s]*([0-9]+)[\s]*[)] unsigned`, `int`), + rmap(`(?i)engine[\s]*=[\s]*(innodb|myisam|ndb|memory|tokudb)`, ``), + rmap(`(?i)DEFAULT CHARSET[\s]*=[\s]*[\S]+`, ``), + rmap(`(?i)[\S]*int( not null|) auto_increment`, `integer`), + rmap(`(?i)comment '[^']*'`, ``), + rmap(`(?i)after [\S]+`, ``), + rmap(`(?i)alter table ([\S]+) add (index|key) ([\S]+) (.+)`, `create index ${3}_${1} on $1 $4`), + rmap(`(?i)alter table ([\S]+) add unique (index|key) ([\S]+) (.+)`, `create unique index ${3}_${1} on $1 $4`), + rmap(`(?i)([\S]+) enum[\s]*([(].*?[)])`, `$1 text check($1 in $2)`), + rmap(`(?i)([\s\S]+[/][*] sqlite3-skip [*][/][\s\S]+)`, ``), + rmap(`(?i)timestamp default current_timestamp`, `timestamp default ('')`), + rmap(`(?i)timestamp not null default current_timestamp`, `timestamp not null default ('')`), + + rmap(`(?i)add column (.*int) not null[\s]*$`, `add column $1 not null default 0`), + rmap(`(?i)add column (.* text) not null[\s]*$`, `add column $1 not null default ''`), + rmap(`(?i)add column (.* varchar.*) not null[\s]*$`, `add column $1 not null default ''`), +} + +var sqlite3InsertConversions = []regexpMap{ + rmap(`(?i)insert ignore ([\s\S]+) on duplicate key update [\s\S]+`, `insert or ignore $1`), + rmap(`(?i)insert ignore`, `insert or ignore`), + rmap(`(?i)now[(][)]`, `datetime('now')`), + rmap(`(?i)insert into ([\s\S]+) on duplicate key update [\s\S]+`, `replace into $1`), +} + +var sqlite3GeneralConversions = []regexpMap{ + rmap(`(?i)now[(][)][\s]*[-][\s]*interval [?] ([\w]+)`, `datetime('now', printf('-%d $1', ?))`), + rmap(`(?i)now[(][)][\s]*[+][\s]*interval [?] ([\w]+)`, `datetime('now', printf('+%d $1', ?))`), + rmap(`(?i)now[(][)][\s]*[-][\s]*interval ([0-9.]+) ([\w]+)`, `datetime('now', '-${1} $2')`), + rmap(`(?i)now[(][)][\s]*[+][\s]*interval ([0-9.]+) ([\w]+)`, `datetime('now', '+${1} $2')`), + + rmap(`(?i)[=<>\s]([\S]+[.][\S]+)[\s]*[-][\s]*interval [?] ([\w]+)`, ` datetime($1, printf('-%d $2', ?))`), + rmap(`(?i)[=<>\s]([\S]+[.][\S]+)[\s]*[+][\s]*interval [?] ([\w]+)`, ` datetime($1, printf('+%d $2', ?))`), + + rmap(`(?i)unix_timestamp[(][)]`, `strftime('%s', 'now')`), + rmap(`(?i)unix_timestamp[(]([^)]+)[)]`, `strftime('%s', $1)`), + rmap(`(?i)now[(][)]`, `datetime('now')`), + rmap(`(?i)cast[(][\s]*([\S]+) as signed[\s]*[)]`, `cast($1 as integer)`), + + rmap(`(?i)\bconcat[(][\s]*([^,)]+)[\s]*,[\s]*([^,)]+)[\s]*[)]`, `($1 || $2)`), + rmap(`(?i)\bconcat[(][\s]*([^,)]+)[\s]*,[\s]*([^,)]+)[\s]*,[\s]*([^,)]+)[\s]*[)]`, `($1 || $2 || $3)`), + + rmap(`(?i) rlike `, ` like `), + + rmap(`(?i)create index([\s\S]+)[(][\s]*[0-9]+[\s]*[)]([\s\S]+)`, `create index ${1}${2}`), + rmap(`(?i)drop index ([\S]+) on ([\S]+)`, `drop index if exists $1`), +} + +var ( + sqlite3IdentifyCreateTableStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*create table`)) + sqlite3IdentifyCreateIndexStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*create( unique|) index`)) + sqlite3IdentifyDropIndexStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*drop index`)) + sqlite3IdentifyAlterTableStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*alter table`)) + sqlite3IdentifyInsertStatement = regexp.MustCompile(regexpSpaces(`(?i)^[\s]*(insert|replace)`)) +) + +func IsInsert(statement string) bool { + return sqlite3IdentifyInsertStatement.MatchString(statement) +} + +func IsCreateTable(statement string) bool { + return sqlite3IdentifyCreateTableStatement.MatchString(statement) +} + +func IsCreateIndex(statement string) bool { + return sqlite3IdentifyCreateIndexStatement.MatchString(statement) +} + +func IsDropIndex(statement string) bool { + return sqlite3IdentifyDropIndexStatement.MatchString(statement) +} + +func IsAlterTable(statement string) bool { + return sqlite3IdentifyAlterTableStatement.MatchString(statement) +} + +func ToSqlite3CreateTable(statement string) string { + return applyConversions(statement, sqlite3CreateTableConversions) +} + +func ToSqlite3Insert(statement string) string { + return applyConversions(statement, sqlite3InsertConversions) +} + +func ToSqlite3Dialect(statement string) (translated string) { + if IsCreateTable(statement) { + return ToSqlite3CreateTable(statement) + } + if IsAlterTable(statement) { + return ToSqlite3CreateTable(statement) + } + statement = applyConversions(statement, sqlite3GeneralConversions) + if IsInsert(statement) { + return ToSqlite3Insert(statement) + } + return statement +} diff --git a/vendor/github.com/openark/golib/sqlutils/sqlite_dialect_test.go b/vendor/github.com/openark/golib/sqlutils/sqlite_dialect_test.go new file mode 100644 index 000000000..a3eea7120 --- /dev/null +++ b/vendor/github.com/openark/golib/sqlutils/sqlite_dialect_test.go @@ -0,0 +1,242 @@ +/* + Copyright 2017 GitHub Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sqlutils + +import ( + "regexp" + "strings" + "testing" + + test "github.com/openark/golib/tests" +) + +var spacesRegexp = regexp.MustCompile(`[\s]+`) + +func init() { +} + +func stripSpaces(statement string) string { + statement = strings.TrimSpace(statement) + statement = spacesRegexp.ReplaceAllString(statement, " ") + return statement +} + +func TestIsCreateTable(t *testing.T) { + test.S(t).ExpectTrue(IsCreateTable("create table t(id int)")) + test.S(t).ExpectTrue(IsCreateTable(" create table t(id int)")) + test.S(t).ExpectTrue(IsCreateTable("CREATE TABLE t(id int)")) + test.S(t).ExpectTrue(IsCreateTable(` + create table t(id int) + `)) + test.S(t).ExpectFalse(IsCreateTable("where create table t(id int)")) + test.S(t).ExpectFalse(IsCreateTable("insert")) +} + +func TestToSqlite3CreateTable(t *testing.T) { + { + statement := "create table t(id int)" + result := ToSqlite3CreateTable(statement) + test.S(t).ExpectEquals(result, statement) + } + { + statement := "create table t(id int, v varchar(123) CHARACTER SET ascii NOT NULL default '')" + result := ToSqlite3CreateTable(statement) + test.S(t).ExpectEquals(result, "create table t(id int, v varchar(123) NOT NULL default '')") + } + { + statement := "create table t(id int, v varchar ( 123 ) CHARACTER SET ascii NOT NULL default '')" + result := ToSqlite3CreateTable(statement) + test.S(t).ExpectEquals(result, "create table t(id int, v varchar ( 123 ) NOT NULL default '')") + } + { + statement := "create table t(i smallint unsigned)" + result := ToSqlite3CreateTable(statement) + test.S(t).ExpectEquals(result, "create table t(i smallint)") + } + { + statement := "create table t(i smallint(5) unsigned)" + result := ToSqlite3CreateTable(statement) + test.S(t).ExpectEquals(result, "create table t(i smallint)") + } + { + statement := "create table t(i smallint ( 5 ) unsigned)" + result := ToSqlite3CreateTable(statement) + test.S(t).ExpectEquals(result, "create table t(i smallint)") + } +} + +func TestToSqlite3AlterTable(t *testing.T) { + { + statement := ` + ALTER TABLE + database_instance + ADD COLUMN sql_delay INT UNSIGNED NOT NULL AFTER slave_lag_seconds + ` + result := stripSpaces(ToSqlite3Dialect(statement)) + test.S(t).ExpectEquals(result, stripSpaces(` + ALTER TABLE + database_instance + add column sql_delay int not null default 0 + `)) + } + { + statement := ` + ALTER TABLE + database_instance + ADD INDEX master_host_port_idx (master_host, master_port) + ` + result := stripSpaces(ToSqlite3Dialect(statement)) + test.S(t).ExpectEquals(result, stripSpaces(` + create index + master_host_port_idx_database_instance + on database_instance (master_host, master_port) + `)) + } + { + statement := ` + ALTER TABLE + topology_recovery + ADD KEY last_detection_idx (last_detection_id) + ` + result := stripSpaces(ToSqlite3Dialect(statement)) + test.S(t).ExpectEquals(result, stripSpaces(` + create index + last_detection_idx_topology_recovery + on topology_recovery (last_detection_id) + `)) + } + +} + +func TestCreateIndex(t *testing.T) { + { + statement := ` + create index + master_host_port_idx_database_instance + on database_instance (master_host(128), master_port) + ` + result := stripSpaces(ToSqlite3Dialect(statement)) + test.S(t).ExpectEquals(result, stripSpaces(` + create index + master_host_port_idx_database_instance + on database_instance (master_host, master_port) + `)) + } +} + +func TestIsInsert(t *testing.T) { + test.S(t).ExpectTrue(IsInsert("insert into t")) + test.S(t).ExpectTrue(IsInsert("insert ignore into t")) + test.S(t).ExpectTrue(IsInsert(` + insert ignore into t + `)) + test.S(t).ExpectFalse(IsInsert("where create table t(id int)")) + test.S(t).ExpectFalse(IsInsert("create table t(id int)")) + test.S(t).ExpectTrue(IsInsert(` + insert into + cluster_domain_name (cluster_name, domain_name, last_registered) + values + (?, ?, datetime('now')) + on duplicate key update + domain_name=values(domain_name), + last_registered=values(last_registered) + `)) +} + +func TestToSqlite3Insert(t *testing.T) { + { + statement := ` + insert into + cluster_domain_name (cluster_name, domain_name, last_registered) + values + (?, ?, datetime('now')) + on duplicate key update + domain_name=values(domain_name), + last_registered=values(last_registered) + ` + result := stripSpaces(ToSqlite3Dialect(statement)) + test.S(t).ExpectEquals(result, stripSpaces(` + replace into + cluster_domain_name (cluster_name, domain_name, last_registered) + values + (?, ?, datetime('now')) + `)) + } +} + +func TestToSqlite3GeneralConversions(t *testing.T) { + { + statement := "select now()" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select datetime('now')") + } + { + statement := "select now() - interval ? second" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select datetime('now', printf('-%d second', ?))") + } + { + statement := "select now() + interval ? minute" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select datetime('now', printf('+%d minute', ?))") + } + { + statement := "select now() + interval 5 minute" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select datetime('now', '+5 minute')") + } + { + statement := "select some_table.some_column + interval ? minute" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select datetime(some_table.some_column, printf('+%d minute', ?))") + } + { + statement := "AND master_instance.last_attempted_check <= master_instance.last_seen + interval ? minute" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "AND master_instance.last_attempted_check <= datetime(master_instance.last_seen, printf('+%d minute', ?))") + } + { + statement := "select concat(master_instance.port, '') as port" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select (master_instance.port || '') as port") + } + { + statement := "select concat( 'abc' , 'def') as s" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select ('abc' || 'def') as s") + } + { + statement := "select concat( 'abc' , 'def', last.col) as s" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select ('abc' || 'def' || last.col) as s") + } + { + statement := "select concat(myself.only) as s" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select concat(myself.only) as s") + } + { + statement := "select concat(1, '2', 3, '4') as s" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select concat(1, '2', 3, '4') as s") + } + { + statement := "select group_concat( 'abc' , 'def') as s" + result := ToSqlite3Dialect(statement) + test.S(t).ExpectEquals(result, "select group_concat( 'abc' , 'def') as s") + } +} diff --git a/vendor/github.com/openark/golib/sqlutils/sqlutils.go b/vendor/github.com/openark/golib/sqlutils/sqlutils.go new file mode 100644 index 000000000..0a2eda262 --- /dev/null +++ b/vendor/github.com/openark/golib/sqlutils/sqlutils.go @@ -0,0 +1,427 @@ +/* + Copyright 2014 Outbrain Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sqlutils + +import ( + "database/sql" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/openark/golib/log" +) + +const DateTimeFormat = "2006-01-02 15:04:05.999999" + +// RowMap represents one row in a result set. Its objective is to allow +// for easy, typed getters by column name. +type RowMap map[string]CellData + +// Cell data is the result of a single (atomic) column in a single row +type CellData sql.NullString + +func (this *CellData) MarshalJSON() ([]byte, error) { + if this.Valid { + return json.Marshal(this.String) + } else { + return json.Marshal(nil) + } +} + +// UnmarshalJSON reds this object from JSON +func (this *CellData) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + (*this).String = s + (*this).Valid = true + + return nil +} + +func (this *CellData) NullString() *sql.NullString { + return (*sql.NullString)(this) +} + +// RowData is the result of a single row, in positioned array format +type RowData []CellData + +// MarshalJSON will marshal this map as JSON +func (this *RowData) MarshalJSON() ([]byte, error) { + cells := make([](*CellData), len(*this), len(*this)) + for i, val := range *this { + d := CellData(val) + cells[i] = &d + } + return json.Marshal(cells) +} + +func (this *RowData) Args() []interface{} { + result := make([]interface{}, len(*this)) + for i := range *this { + result[i] = (*(*this)[i].NullString()) + } + return result +} + +// ResultData is an ordered row set of RowData +type ResultData []RowData +type NamedResultData struct { + Columns []string + Data ResultData +} + +var EmptyResultData = ResultData{} + +func (this *RowMap) GetString(key string) string { + return (*this)[key].String +} + +// GetStringD returns a string from the map, or a default value if the key does not exist +func (this *RowMap) GetStringD(key string, def string) string { + if cell, ok := (*this)[key]; ok { + return cell.String + } + return def +} + +func (this *RowMap) GetInt64(key string) int64 { + res, _ := strconv.ParseInt(this.GetString(key), 10, 0) + return res +} + +func (this *RowMap) GetNullInt64(key string) sql.NullInt64 { + i, err := strconv.ParseInt(this.GetString(key), 10, 0) + if err == nil { + return sql.NullInt64{Int64: i, Valid: true} + } else { + return sql.NullInt64{Valid: false} + } +} + +func (this *RowMap) GetInt(key string) int { + res, _ := strconv.Atoi(this.GetString(key)) + return res +} + +func (this *RowMap) GetIntD(key string, def int) int { + res, err := strconv.Atoi(this.GetString(key)) + if err != nil { + return def + } + return res +} + +func (this *RowMap) GetUint(key string) uint { + res, _ := strconv.ParseUint(this.GetString(key), 10, 0) + return uint(res) +} + +func (this *RowMap) GetUintD(key string, def uint) uint { + res, err := strconv.Atoi(this.GetString(key)) + if err != nil { + return def + } + return uint(res) +} + +func (this *RowMap) GetUint64(key string) uint64 { + res, _ := strconv.ParseUint(this.GetString(key), 10, 0) + return res +} + +func (this *RowMap) GetUint64D(key string, def uint64) uint64 { + res, err := strconv.ParseUint(this.GetString(key), 10, 0) + if err != nil { + return def + } + return uint64(res) +} + +func (this *RowMap) GetBool(key string) bool { + return this.GetInt(key) != 0 +} + +func (this *RowMap) GetTime(key string) time.Time { + if t, err := time.Parse(DateTimeFormat, this.GetString(key)); err == nil { + return t + } + return time.Time{} +} + +// knownDBs is a DB cache by uri +var knownDBs map[string]*sql.DB = make(map[string]*sql.DB) +var knownDBsMutex = &sync.Mutex{} + +// GetDB returns a DB instance based on uri. +// bool result indicates whether the DB was returned from cache; err +func GetGenericDB(driverName, dataSourceName string) (*sql.DB, bool, error) { + knownDBsMutex.Lock() + defer func() { + knownDBsMutex.Unlock() + }() + + var exists bool + if _, exists = knownDBs[dataSourceName]; !exists { + if db, err := sql.Open(driverName, dataSourceName); err == nil { + knownDBs[dataSourceName] = db + } else { + return db, exists, err + } + } + return knownDBs[dataSourceName], exists, nil +} + +// GetDB returns a MySQL DB instance based on uri. +// bool result indicates whether the DB was returned from cache; err +func GetDB(mysql_uri string) (*sql.DB, bool, error) { + return GetGenericDB("mysql", mysql_uri) +} + +// GetDB returns a SQLite DB instance based on DB file name. +// bool result indicates whether the DB was returned from cache; err +func GetSQLiteDB(dbFile string) (*sql.DB, bool, error) { + return GetGenericDB("sqlite3", dbFile) +} + +// RowToArray is a convenience function, typically not called directly, which maps a +// single read database row into a NullString +func RowToArray(rows *sql.Rows, columns []string) []CellData { + buff := make([]interface{}, len(columns)) + data := make([]CellData, len(columns)) + for i, _ := range buff { + buff[i] = data[i].NullString() + } + rows.Scan(buff...) + return data +} + +// ScanRowsToArrays is a convenience function, typically not called directly, which maps rows +// already read from the databse into arrays of NullString +func ScanRowsToArrays(rows *sql.Rows, on_row func([]CellData) error) error { + columns, _ := rows.Columns() + for rows.Next() { + arr := RowToArray(rows, columns) + err := on_row(arr) + if err != nil { + return err + } + } + return nil +} + +func rowToMap(row []CellData, columns []string) map[string]CellData { + m := make(map[string]CellData) + for k, data_col := range row { + m[columns[k]] = data_col + } + return m +} + +// ScanRowsToMaps is a convenience function, typically not called directly, which maps rows +// already read from the databse into RowMap entries. +func ScanRowsToMaps(rows *sql.Rows, on_row func(RowMap) error) error { + columns, _ := rows.Columns() + err := ScanRowsToArrays(rows, func(arr []CellData) error { + m := rowToMap(arr, columns) + err := on_row(m) + if err != nil { + return err + } + return nil + }) + return err +} + +// QueryRowsMap is a convenience function allowing querying a result set while poviding a callback +// function activated per read row. +func QueryRowsMap(db *sql.DB, query string, on_row func(RowMap) error, args ...interface{}) (err error) { + defer func() { + if derr := recover(); derr != nil { + err = fmt.Errorf("QueryRowsMap unexpected error: %+v", derr) + } + }() + + var rows *sql.Rows + rows, err = db.Query(query, args...) + if rows != nil { + defer rows.Close() + } + if err != nil && err != sql.ErrNoRows { + return log.Errore(err) + } + err = ScanRowsToMaps(rows, on_row) + return +} + +// queryResultData returns a raw array of rows for a given query, optionally reading and returning column names +func queryResultData(db *sql.DB, query string, retrieveColumns bool, args ...interface{}) (resultData ResultData, columns []string, err error) { + defer func() { + if derr := recover(); derr != nil { + err = errors.New(fmt.Sprintf("QueryRowsMap unexpected error: %+v", derr)) + } + }() + + var rows *sql.Rows + rows, err = db.Query(query, args...) + defer rows.Close() + if err != nil && err != sql.ErrNoRows { + return EmptyResultData, columns, err + } + if retrieveColumns { + // Don't pay if you don't want to + columns, _ = rows.Columns() + } + resultData = ResultData{} + err = ScanRowsToArrays(rows, func(rowData []CellData) error { + resultData = append(resultData, rowData) + return nil + }) + return resultData, columns, err +} + +// QueryResultData returns a raw array of rows +func QueryResultData(db *sql.DB, query string, args ...interface{}) (ResultData, error) { + resultData, _, err := queryResultData(db, query, false, args...) + return resultData, err +} + +// QueryResultDataNamed returns a raw array of rows, with column names +func QueryNamedResultData(db *sql.DB, query string, args ...interface{}) (NamedResultData, error) { + resultData, columns, err := queryResultData(db, query, true, args...) + return NamedResultData{Columns: columns, Data: resultData}, err +} + +// QueryRowsMapBuffered reads data from the database into a buffer, and only then applies the given function per row. +// This allows the application to take its time with processing the data, albeit consuming as much memory as required by +// the result set. +func QueryRowsMapBuffered(db *sql.DB, query string, on_row func(RowMap) error, args ...interface{}) error { + resultData, columns, err := queryResultData(db, query, true, args...) + if err != nil { + // Already logged + return err + } + for _, row := range resultData { + err = on_row(rowToMap(row, columns)) + if err != nil { + return err + } + } + return nil +} + +// ExecNoPrepare executes given query using given args on given DB, without using prepared statements. +func ExecNoPrepare(db *sql.DB, query string, args ...interface{}) (res sql.Result, err error) { + defer func() { + if derr := recover(); derr != nil { + err = errors.New(fmt.Sprintf("ExecNoPrepare unexpected error: %+v", derr)) + } + }() + + res, err = db.Exec(query, args...) + if err != nil { + log.Errore(err) + } + return res, err +} + +// ExecQuery executes given query using given args on given DB. It will safele prepare, execute and close +// the statement. +func execInternal(silent bool, db *sql.DB, query string, args ...interface{}) (res sql.Result, err error) { + defer func() { + if derr := recover(); derr != nil { + err = errors.New(fmt.Sprintf("execInternal unexpected error: %+v", derr)) + } + }() + var stmt *sql.Stmt + stmt, err = db.Prepare(query) + if err != nil { + return nil, err + } + defer stmt.Close() + res, err = stmt.Exec(args...) + if err != nil && !silent { + log.Errore(err) + } + return res, err +} + +// Exec executes given query using given args on given DB. It will safele prepare, execute and close +// the statement. +func Exec(db *sql.DB, query string, args ...interface{}) (sql.Result, error) { + return execInternal(false, db, query, args...) +} + +// ExecSilently acts like Exec but does not report any error +func ExecSilently(db *sql.DB, query string, args ...interface{}) (sql.Result, error) { + return execInternal(true, db, query, args...) +} + +func InClauseStringValues(terms []string) string { + quoted := []string{} + for _, s := range terms { + quoted = append(quoted, fmt.Sprintf("'%s'", strings.Replace(s, ",", "''", -1))) + } + return strings.Join(quoted, ", ") +} + +// Convert variable length arguments into arguments array +func Args(args ...interface{}) []interface{} { + return args +} + +func NilIfZero(i int64) interface{} { + if i == 0 { + return nil + } + return i +} + +func ScanTable(db *sql.DB, tableName string) (NamedResultData, error) { + query := fmt.Sprintf("select * from %s", tableName) + return QueryNamedResultData(db, query) +} + +func WriteTable(db *sql.DB, tableName string, data NamedResultData) (err error) { + if len(data.Data) == 0 { + return nil + } + if len(data.Columns) == 0 { + return nil + } + placeholders := make([]string, len(data.Columns)) + for i := range placeholders { + placeholders[i] = "?" + } + query := fmt.Sprintf( + `replace into %s (%s) values (%s)`, + tableName, + strings.Join(data.Columns, ","), + strings.Join(placeholders, ","), + ) + for _, rowData := range data.Data { + if _, execErr := db.Exec(query, rowData.Args()...); execErr != nil { + err = execErr + } + } + return err +} diff --git a/vendor/github.com/openark/golib/tests/spec.go b/vendor/github.com/openark/golib/tests/spec.go new file mode 100644 index 000000000..a52c7291a --- /dev/null +++ b/vendor/github.com/openark/golib/tests/spec.go @@ -0,0 +1,76 @@ +package tests + +import ( + "testing" +) + +// Spec is an access point to test Expections +type Spec struct { + t *testing.T +} + +// S generates a spec. You will want to use it once in a test file, once in a test or once per each check +func S(t *testing.T) *Spec { + return &Spec{t: t} +} + +// ExpectNil expects given value to be nil, or errors +func (spec *Spec) ExpectNil(actual interface{}) { + if actual == nil { + return + } + spec.t.Errorf("Expected %+v to be nil", actual) +} + +// ExpectNotNil expects given value to be not nil, or errors +func (spec *Spec) ExpectNotNil(actual interface{}) { + if actual != nil { + return + } + spec.t.Errorf("Expected %+v to be not nil", actual) +} + +// ExpectEquals expects given values to be equal (comparison via `==`), or errors +func (spec *Spec) ExpectEquals(actual, value interface{}) { + if actual == value { + return + } + spec.t.Errorf("Expected:\n[[[%+v]]]\n- got:\n[[[%+v]]]", value, actual) +} + +// ExpectNotEquals expects given values to be nonequal (comparison via `==`), or errors +func (spec *Spec) ExpectNotEquals(actual, value interface{}) { + if !(actual == value) { + return + } + spec.t.Errorf("Expected not %+v", value) +} + +// ExpectEqualsAny expects given actual to equal (comparison via `==`) at least one of given values, or errors +func (spec *Spec) ExpectEqualsAny(actual interface{}, values ...interface{}) { + for _, value := range values { + if actual == value { + return + } + } + spec.t.Errorf("Expected %+v to equal any of given values", actual) +} + +// ExpectNotEqualsAny expects given actual to be nonequal (comparison via `==`)tp any of given values, or errors +func (spec *Spec) ExpectNotEqualsAny(actual interface{}, values ...interface{}) { + for _, value := range values { + if actual == value { + spec.t.Errorf("Expected not %+v", value) + } + } +} + +// ExpectFalse expects given values to be false, or errors +func (spec *Spec) ExpectFalse(actual interface{}) { + spec.ExpectEquals(actual, false) +} + +// ExpectTrue expects given values to be true, or errors +func (spec *Spec) ExpectTrue(actual interface{}) { + spec.ExpectEquals(actual, true) +} diff --git a/vendor/github.com/openark/golib/util/text.go b/vendor/github.com/openark/golib/util/text.go new file mode 100644 index 000000000..2b0ae9a7e --- /dev/null +++ b/vendor/github.com/openark/golib/util/text.go @@ -0,0 +1,103 @@ +/* + Copyright 2015 Shlomi Noach. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package util + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +const ( + TabulateLeft = 0 + TabulateRight = 1 +) + +// ParseSimpleTime parses input in the format 7s, 55m, 3h, 31d, 4w (second, minute, hour, day, week) +// The time.ParseDuration() function should have done this, but it does not support "d" and "w" extensions. +func SimpleTimeToSeconds(simpleTime string) (int, error) { + if matched, _ := regexp.MatchString("^[0-9]+s$", simpleTime); matched { + i, _ := strconv.Atoi(simpleTime[0 : len(simpleTime)-1]) + return i, nil + } + if matched, _ := regexp.MatchString("^[0-9]+m$", simpleTime); matched { + i, _ := strconv.Atoi(simpleTime[0 : len(simpleTime)-1]) + return i * 60, nil + } + if matched, _ := regexp.MatchString("^[0-9]+h$", simpleTime); matched { + i, _ := strconv.Atoi(simpleTime[0 : len(simpleTime)-1]) + return i * 60 * 60, nil + } + if matched, _ := regexp.MatchString("^[0-9]+d$", simpleTime); matched { + i, _ := strconv.Atoi(simpleTime[0 : len(simpleTime)-1]) + return i * 60 * 60 * 24, nil + } + if matched, _ := regexp.MatchString("^[0-9]+w$", simpleTime); matched { + i, _ := strconv.Atoi(simpleTime[0 : len(simpleTime)-1]) + return i * 60 * 60 * 24 * 7, nil + } + return 0, errors.New(fmt.Sprintf("Cannot parse simple time: %s", simpleTime)) +} + +func Tabulate(lines []string, separator string, outputSeparator string, directionFlags ...int) (result []string) { + tokens := make([][]string, 0) + widths := make([][]int, 0) + countColumns := 0 + for _, line := range lines { + lineTokens := strings.Split(line, separator) + lineWidths := make([]int, len(lineTokens)) + for i := range lineTokens { + lineWidths[i] = len(lineTokens[i]) + } + tokens = append(tokens, lineTokens) + widths = append(widths, lineWidths) + if len(lineTokens) > countColumns { + countColumns = len(lineTokens) + } + } + columnWidths := make([]int, countColumns) + for _, lineTokens := range tokens { + for col, token := range lineTokens { + if len(token) > columnWidths[col] { + columnWidths[col] = len(token) + } + } + } + for _, lineTokens := range tokens { + resultRow := "" + for col := 0; col < countColumns; col++ { + token := "" + if col < len(lineTokens) { + token = lineTokens[col] + } + format := fmt.Sprintf("%%-%ds", columnWidths[col]) // format left + if col < len(directionFlags) && directionFlags[col] == TabulateRight { + format = fmt.Sprintf("%%%ds", columnWidths[col]) + } + formattedToken := fmt.Sprintf(format, token) + if col == 0 { + resultRow = formattedToken + } else { + resultRow = fmt.Sprintf("%s%s%s", resultRow, outputSeparator, formattedToken) + } + } + result = append(result, resultRow) + } + return result +} diff --git a/vendor/github.com/openark/golib/util/text_test.go b/vendor/github.com/openark/golib/util/text_test.go new file mode 100644 index 000000000..aa5052d18 --- /dev/null +++ b/vendor/github.com/openark/golib/util/text_test.go @@ -0,0 +1,88 @@ +/* + Copyright 2014 Outbrain Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package util + +import ( + "reflect" + "strings" + "testing" + + test "github.com/openark/golib/tests" +) + +func init() { +} + +func TestTabulate(t *testing.T) { + { + text := strings.TrimSpace(` +a,b,c +d,e,f +g,h,i + `) + + tabulated := Tabulate(strings.Split(text, "\n"), ",", ",") + expected := strings.Split(text, "\n") + test.S(t).ExpectTrue(reflect.DeepEqual(tabulated, expected)) + } + { + text := strings.TrimSpace(` +a,b,c +d,e,f +g,h,i + `) + + tabulated := Tabulate(strings.Split(text, "\n"), ",", "|") + expected := []string{ + "a|b|c", + "d|e|f", + "g|h|i", + } + test.S(t).ExpectTrue(reflect.DeepEqual(tabulated, expected)) + } + { + text := strings.TrimSpace(` +a,20,c +d,e,100 +0000,h,i + `) + + tabulated := Tabulate(strings.Split(text, "\n"), ",", "|") + expected := []string{ + "a |20|c ", + "d |e |100", + "0000|h |i ", + } + test.S(t).ExpectTrue(reflect.DeepEqual(tabulated, expected)) + } + { + text := strings.TrimSpace(` +a,20,c +d,1,100 +0000,3,i + `) + + tabulated := Tabulate(strings.Split(text, "\n"), ",", "|", TabulateLeft, TabulateRight, TabulateRight) + expected := []string{ + "a |20| c", + "d | 1|100", + "0000| 3| i", + } + + test.S(t).ExpectTrue(reflect.DeepEqual(tabulated, expected)) + } +} From 36d380057dbc6da92de3e08c12bd8514d8b33d52 Mon Sep 17 00:00:00 2001 From: dm-2 <45519614+dm-2@users.noreply.github.com> Date: Wed, 20 Jul 2022 14:47:51 +0100 Subject: [PATCH 22/24] Only build RPM and deb packages for amd64 --- build.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build.sh b/build.sh index 3a29360e3..9ff37eac3 100755 --- a/build.sh +++ b/build.sh @@ -35,7 +35,8 @@ function build { (cd $buildpath && tar cfz ./gh-ost-binary-${osshort}-${GOARCH}-${timestamp}.tar.gz $target) - if [ "$GOOS" == "linux" ] ; then + # build RPM and deb for Linux, x86-64 only + if [ "$GOOS" == "linux" ] && [ "$GOARCH" == "amd64" ] ; then echo "Creating Distro full packages" builddir=$(setuptree) cp $buildpath/$target $builddir/gh-ost/usr/bin From 8cdd30a109deb36400b944810d2887956480f215 Mon Sep 17 00:00:00 2001 From: Rashiq Date: Thu, 5 Oct 2023 14:46:32 +0000 Subject: [PATCH 23/24] Focal build --- script/build-deploy-tarball | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/script/build-deploy-tarball b/script/build-deploy-tarball index dc28b43d5..327321c1e 100755 --- a/script/build-deploy-tarball +++ b/script/build-deploy-tarball @@ -29,7 +29,7 @@ mkdir -p "$BUILD_ARTIFACT_DIR"/gh-ost cp ${tarball}.gz "$BUILD_ARTIFACT_DIR"/gh-ost/ ### HACK HACK HACK HACK ### -# blame @carlosmn, @mattr and @timvaillancourt- -# Allow builds on buster to also be used for stretch -stretch_tarball_name=$(echo $(basename "${tarball}") | sed s/-buster-/-stretch-/) -cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${stretch_tarball_name}.gz" +# blame @carlosmn, @mattr, @timvaillancourt and @rashiq +# Allow builds on buster to also be used for focal +focal_tarball_name=$(echo $(basename "${tarball}") | sed s/-stretch-/-focal-/) +cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${focal_tarball_name}.gz" \ No newline at end of file From 49c1e332d492457480e7583b9f06bae695b0a8c3 Mon Sep 17 00:00:00 2001 From: Rashiq Date: Fri, 6 Oct 2023 15:12:43 +0000 Subject: [PATCH 24/24] Fix focal build The base image is `buster` instead of previously assumed `stretch` --- script/build-deploy-tarball | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/build-deploy-tarball b/script/build-deploy-tarball index 327321c1e..164a01964 100755 --- a/script/build-deploy-tarball +++ b/script/build-deploy-tarball @@ -31,5 +31,5 @@ cp ${tarball}.gz "$BUILD_ARTIFACT_DIR"/gh-ost/ ### HACK HACK HACK HACK ### # blame @carlosmn, @mattr, @timvaillancourt and @rashiq # Allow builds on buster to also be used for focal -focal_tarball_name=$(echo $(basename "${tarball}") | sed s/-stretch-/-focal-/) +focal_tarball_name=$(echo $(basename "${tarball}") | sed s/-buster-/-focal-/) cp ${tarball}.gz "$BUILD_ARTIFACT_DIR/gh-ost/${focal_tarball_name}.gz" \ No newline at end of file