diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 57a9b112f..83bb4c871 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -18,6 +18,6 @@ jobs: with: go-version-file: go.mod - name: golangci-lint - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@v9 with: - version: v1.61.0 + version: v2.11 diff --git a/.golangci.yml b/.golangci.yml index 8be65e2ab..b52429cd8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,8 +1,5 @@ -run: - timeout: 5m +version: "2" linters: - disable: - - errcheck enable: - bodyclose - containedctx @@ -11,18 +8,38 @@ linters: - durationcheck - errname - errorlint - - gofmt - misspell - nilerr - nilnil - - noctx - nolintlint - nosprintfhostport - - prealloc - rowserrcheck - sqlclosecheck - unconvert - unparam - - unused - wastedassign - whitespace + disable: + - errcheck + - noctx + - prealloc + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/go/base/context.go b/go/base/context.go index 2c8d28d56..c8eec7799 100644 --- a/go/base/context.go +++ b/go/base/context.go @@ -332,7 +332,7 @@ func NewMigrationContext() *MigrationContext { } } -func (this *MigrationContext) SetConnectionConfig(storageEngine string) error { +func (mctx *MigrationContext) SetConnectionConfig(storageEngine string) error { var transactionIsolation string switch storageEngine { case "rocksdb": @@ -340,18 +340,18 @@ func (this *MigrationContext) SetConnectionConfig(storageEngine string) error { default: transactionIsolation = "REPEATABLE-READ" } - this.InspectorConnectionConfig.TransactionIsolation = transactionIsolation - this.ApplierConnectionConfig.TransactionIsolation = transactionIsolation + mctx.InspectorConnectionConfig.TransactionIsolation = transactionIsolation + mctx.ApplierConnectionConfig.TransactionIsolation = transactionIsolation return nil } -func (this *MigrationContext) SetConnectionCharset(charset string) { +func (mctx *MigrationContext) SetConnectionCharset(charset string) { if charset == "" { charset = "utf8mb4,utf8,latin1" } - this.InspectorConnectionConfig.Charset = charset - this.ApplierConnectionConfig.Charset = charset + mctx.InspectorConnectionConfig.Charset = charset + mctx.ApplierConnectionConfig.Charset = charset } func getSafeTableName(baseName string, suffix string) string { @@ -365,33 +365,33 @@ func getSafeTableName(baseName string, suffix string) string { // GetGhostTableName generates the name of ghost table, based on original table name // or a given table name -func (this *MigrationContext) GetGhostTableName() string { - if this.Revert { +func (mctx *MigrationContext) GetGhostTableName() string { + if mctx.Revert { // When reverting the "ghost" table is the _del table from the original migration. - return this.OldTableName + return mctx.OldTableName } - if this.ForceTmpTableName != "" { - return getSafeTableName(this.ForceTmpTableName, "gho") + if mctx.ForceTmpTableName != "" { + return getSafeTableName(mctx.ForceTmpTableName, "gho") } else { - return getSafeTableName(this.OriginalTableName, "gho") + return getSafeTableName(mctx.OriginalTableName, "gho") } } // GetOldTableName generates the name of the "old" table, into which the original table is renamed. -func (this *MigrationContext) GetOldTableName() string { +func (mctx *MigrationContext) GetOldTableName() string { var tableName string - if this.ForceTmpTableName != "" { - tableName = this.ForceTmpTableName + if mctx.ForceTmpTableName != "" { + tableName = mctx.ForceTmpTableName } else { - tableName = this.OriginalTableName + tableName = mctx.OriginalTableName } suffix := "del" - if this.Revert { + if mctx.Revert { suffix = "rev_del" } - if this.TimestampOldTable { - t := this.StartTime + if mctx.TimestampOldTable { + t := mctx.StartTime timestamp := fmt.Sprintf("%d%02d%02d%02d%02d%02d", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) @@ -402,105 +402,105 @@ func (this *MigrationContext) GetOldTableName() string { // GetChangelogTableName generates the name of changelog table, based on original table name // or a given table name. -func (this *MigrationContext) GetChangelogTableName() string { - if this.ForceTmpTableName != "" { - return getSafeTableName(this.ForceTmpTableName, "ghc") +func (mctx *MigrationContext) GetChangelogTableName() string { + if mctx.ForceTmpTableName != "" { + return getSafeTableName(mctx.ForceTmpTableName, "ghc") } else { - return getSafeTableName(this.OriginalTableName, "ghc") + return getSafeTableName(mctx.OriginalTableName, "ghc") } } // GetCheckpointTableName generates the name of checkpoint table. -func (this *MigrationContext) GetCheckpointTableName() string { - if this.ForceTmpTableName != "" { - return getSafeTableName(this.ForceTmpTableName, "ghk") +func (mctx *MigrationContext) GetCheckpointTableName() string { + if mctx.ForceTmpTableName != "" { + return getSafeTableName(mctx.ForceTmpTableName, "ghk") } else { - return getSafeTableName(this.OriginalTableName, "ghk") + return getSafeTableName(mctx.OriginalTableName, "ghk") } } // GetVoluntaryLockName returns a name of a voluntary lock to be used throughout // the swap-tables process. -func (this *MigrationContext) GetVoluntaryLockName() string { - return fmt.Sprintf("%s.%s.lock", this.DatabaseName, this.OriginalTableName) +func (mctx *MigrationContext) GetVoluntaryLockName() string { + return fmt.Sprintf("%s.%s.lock", mctx.DatabaseName, mctx.OriginalTableName) } // RequiresBinlogFormatChange is `true` when the original binlog format isn't `ROW` -func (this *MigrationContext) RequiresBinlogFormatChange() bool { - return this.OriginalBinlogFormat != "ROW" +func (mctx *MigrationContext) RequiresBinlogFormatChange() bool { + return mctx.OriginalBinlogFormat != "ROW" } // GetApplierHostname is a safe access method to the applier hostname -func (this *MigrationContext) GetApplierHostname() string { - if this.ApplierConnectionConfig == nil { +func (mctx *MigrationContext) GetApplierHostname() string { + if mctx.ApplierConnectionConfig == nil { return "" } - if this.ApplierConnectionConfig.ImpliedKey == nil { + if mctx.ApplierConnectionConfig.ImpliedKey == nil { return "" } - return this.ApplierConnectionConfig.ImpliedKey.Hostname + return mctx.ApplierConnectionConfig.ImpliedKey.Hostname } // GetInspectorHostname is a safe access method to the inspector hostname -func (this *MigrationContext) GetInspectorHostname() string { - if this.InspectorConnectionConfig == nil { +func (mctx *MigrationContext) GetInspectorHostname() string { + if mctx.InspectorConnectionConfig == nil { return "" } - if this.InspectorConnectionConfig.ImpliedKey == nil { + if mctx.InspectorConnectionConfig.ImpliedKey == nil { return "" } - return this.InspectorConnectionConfig.ImpliedKey.Hostname + return mctx.InspectorConnectionConfig.ImpliedKey.Hostname } // InspectorIsAlsoApplier is `true` when the both inspector and applier are the // same database instance. This would be true when running directly on master or when // testing on replica. -func (this *MigrationContext) InspectorIsAlsoApplier() bool { - return this.InspectorConnectionConfig.Equals(this.ApplierConnectionConfig) +func (mctx *MigrationContext) InspectorIsAlsoApplier() bool { + return mctx.InspectorConnectionConfig.Equals(mctx.ApplierConnectionConfig) } // HasMigrationRange tells us whether there's a range to iterate for copying rows. // It will be `false` if the table is initially empty -func (this *MigrationContext) HasMigrationRange() bool { - return this.MigrationRangeMinValues != nil && this.MigrationRangeMaxValues != nil +func (mctx *MigrationContext) HasMigrationRange() bool { + return mctx.MigrationRangeMinValues != nil && mctx.MigrationRangeMaxValues != nil } -func (this *MigrationContext) SetCutOverLockTimeoutSeconds(timeoutSeconds int64) error { +func (mctx *MigrationContext) SetCutOverLockTimeoutSeconds(timeoutSeconds int64) error { if timeoutSeconds < 1 { - return fmt.Errorf("Minimal timeout is 1sec. Timeout remains at %d", this.CutOverLockTimeoutSeconds) + return fmt.Errorf("minimal timeout is 1sec. Timeout remains at %d", mctx.CutOverLockTimeoutSeconds) } if timeoutSeconds > 10 { - return fmt.Errorf("Maximal timeout is 10sec. Timeout remains at %d", this.CutOverLockTimeoutSeconds) + return fmt.Errorf("maximal timeout is 10sec. Timeout remains at %d", mctx.CutOverLockTimeoutSeconds) } - this.CutOverLockTimeoutSeconds = timeoutSeconds + mctx.CutOverLockTimeoutSeconds = timeoutSeconds return nil } -func (this *MigrationContext) SetExponentialBackoffMaxInterval(intervalSeconds int64) error { +func (mctx *MigrationContext) SetExponentialBackoffMaxInterval(intervalSeconds int64) error { if intervalSeconds < 2 { - return fmt.Errorf("Minimal maximum interval is 2sec. Timeout remains at %d", this.ExponentialBackoffMaxInterval) + return fmt.Errorf("minimal maximum interval is 2sec. Timeout remains at %d", mctx.ExponentialBackoffMaxInterval) } - this.ExponentialBackoffMaxInterval = intervalSeconds + mctx.ExponentialBackoffMaxInterval = intervalSeconds return nil } -func (this *MigrationContext) SetDefaultNumRetries(retries int64) { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) SetDefaultNumRetries(retries int64) { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() if retries > 0 { - this.defaultNumRetries = retries + mctx.defaultNumRetries = retries } } -func (this *MigrationContext) MaxRetries() int64 { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() - retries := this.defaultNumRetries +func (mctx *MigrationContext) MaxRetries() int64 { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() + retries := mctx.defaultNumRetries return retries } -func (this *MigrationContext) IsTransactionalTable() bool { - switch strings.ToLower(this.TableEngine) { +func (mctx *MigrationContext) IsTransactionalTable() bool { + switch strings.ToLower(mctx.TableEngine) { case "innodb": { return true @@ -518,96 +518,96 @@ func (this *MigrationContext) IsTransactionalTable() bool { } // SetCountTableRowsCancelFunc sets the cancel function for the CountTableRows query context -func (this *MigrationContext) SetCountTableRowsCancelFunc(f func()) { - this.countMutex.Lock() - defer this.countMutex.Unlock() +func (mctx *MigrationContext) SetCountTableRowsCancelFunc(f func()) { + mctx.countMutex.Lock() + defer mctx.countMutex.Unlock() - this.countTableRowsCancelFunc = f + mctx.countTableRowsCancelFunc = f } // IsCountingTableRows returns true if the migration has a table count query running -func (this *MigrationContext) IsCountingTableRows() bool { - this.countMutex.Lock() - defer this.countMutex.Unlock() +func (mctx *MigrationContext) IsCountingTableRows() bool { + mctx.countMutex.Lock() + defer mctx.countMutex.Unlock() - return this.countTableRowsCancelFunc != nil + return mctx.countTableRowsCancelFunc != nil } // CancelTableRowsCount cancels the CountTableRows query context. It is safe to // call function even when IsCountingTableRows is false. -func (this *MigrationContext) CancelTableRowsCount() { - this.countMutex.Lock() - defer this.countMutex.Unlock() +func (mctx *MigrationContext) CancelTableRowsCount() { + mctx.countMutex.Lock() + defer mctx.countMutex.Unlock() - if this.countTableRowsCancelFunc == nil { + if mctx.countTableRowsCancelFunc == nil { return } - this.countTableRowsCancelFunc() - this.countTableRowsCancelFunc = nil + mctx.countTableRowsCancelFunc() + mctx.countTableRowsCancelFunc = nil } // ElapsedTime returns time since very beginning of the process -func (this *MigrationContext) ElapsedTime() time.Duration { - return time.Since(this.StartTime) +func (mctx *MigrationContext) ElapsedTime() time.Duration { + return time.Since(mctx.StartTime) } // MarkRowCopyStartTime -func (this *MigrationContext) MarkRowCopyStartTime() { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() - this.RowCopyStartTime = time.Now() +func (mctx *MigrationContext) MarkRowCopyStartTime() { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() + mctx.RowCopyStartTime = time.Now() } // ElapsedRowCopyTime returns time since starting to copy chunks of rows -func (this *MigrationContext) ElapsedRowCopyTime() time.Duration { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) ElapsedRowCopyTime() time.Duration { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - if this.RowCopyStartTime.IsZero() { + if mctx.RowCopyStartTime.IsZero() { // Row copy hasn't started yet return 0 } - if this.RowCopyEndTime.IsZero() { - return time.Since(this.RowCopyStartTime) + if mctx.RowCopyEndTime.IsZero() { + return time.Since(mctx.RowCopyStartTime) } - return this.RowCopyEndTime.Sub(this.RowCopyStartTime) + return mctx.RowCopyEndTime.Sub(mctx.RowCopyStartTime) } // ElapsedRowCopyTime returns time since starting to copy chunks of rows -func (this *MigrationContext) MarkRowCopyEndTime() { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() - this.RowCopyEndTime = time.Now() +func (mctx *MigrationContext) MarkRowCopyEndTime() { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() + mctx.RowCopyEndTime = time.Now() } -func (this *MigrationContext) TimeSinceLastHeartbeatOnChangelog() time.Duration { - return time.Since(this.GetLastHeartbeatOnChangelogTime()) +func (mctx *MigrationContext) TimeSinceLastHeartbeatOnChangelog() time.Duration { + return time.Since(mctx.GetLastHeartbeatOnChangelogTime()) } -func (this *MigrationContext) GetCurrentLagDuration() time.Duration { - return time.Duration(atomic.LoadInt64(&this.CurrentLag)) +func (mctx *MigrationContext) GetCurrentLagDuration() time.Duration { + return time.Duration(atomic.LoadInt64(&mctx.CurrentLag)) } -func (this *MigrationContext) GetProgressPct() float64 { - return math.Float64frombits(atomic.LoadUint64(&this.currentProgress)) +func (mctx *MigrationContext) GetProgressPct() float64 { + return math.Float64frombits(atomic.LoadUint64(&mctx.currentProgress)) } -func (this *MigrationContext) SetProgressPct(progressPct float64) { - atomic.StoreUint64(&this.currentProgress, math.Float64bits(progressPct)) +func (mctx *MigrationContext) SetProgressPct(progressPct float64) { + atomic.StoreUint64(&mctx.currentProgress, math.Float64bits(progressPct)) } -func (this *MigrationContext) GetETADuration() time.Duration { - return time.Duration(atomic.LoadInt64(&this.etaNanoseonds)) +func (mctx *MigrationContext) GetETADuration() time.Duration { + return time.Duration(atomic.LoadInt64(&mctx.etaNanoseonds)) } -func (this *MigrationContext) SetETADuration(etaDuration time.Duration) { - atomic.StoreInt64(&this.etaNanoseonds, etaDuration.Nanoseconds()) +func (mctx *MigrationContext) SetETADuration(etaDuration time.Duration) { + atomic.StoreInt64(&mctx.etaNanoseonds, etaDuration.Nanoseconds()) } -func (this *MigrationContext) GetETASeconds() int64 { - nano := atomic.LoadInt64(&this.etaNanoseonds) +func (mctx *MigrationContext) GetETASeconds() int64 { + nano := atomic.LoadInt64(&mctx.etaNanoseonds) if nano < 0 { return ETAUnknown } @@ -618,112 +618,112 @@ func (this *MigrationContext) GetETASeconds() int64 { // GetTotalRowsCopied returns the accurate number of rows being copied (affected) // This is not exactly the same as the rows being iterated via chunks, but potentially close enough -func (this *MigrationContext) GetTotalRowsCopied() int64 { - return atomic.LoadInt64(&this.TotalRowsCopied) +func (mctx *MigrationContext) GetTotalRowsCopied() int64 { + return atomic.LoadInt64(&mctx.TotalRowsCopied) } -func (this *MigrationContext) GetIteration() int64 { - return atomic.LoadInt64(&this.Iteration) +func (mctx *MigrationContext) GetIteration() int64 { + return atomic.LoadInt64(&mctx.Iteration) } -func (this *MigrationContext) SetNextIterationRangeMinValues() { - this.MigrationIterationRangeMinValues = this.MigrationIterationRangeMaxValues - if this.MigrationIterationRangeMinValues == nil { - this.MigrationIterationRangeMinValues = this.MigrationRangeMinValues +func (mctx *MigrationContext) SetNextIterationRangeMinValues() { + mctx.MigrationIterationRangeMinValues = mctx.MigrationIterationRangeMaxValues + if mctx.MigrationIterationRangeMinValues == nil { + mctx.MigrationIterationRangeMinValues = mctx.MigrationRangeMinValues } } -func (this *MigrationContext) MarkPointOfInterest() int64 { - this.pointOfInterestTimeMutex.Lock() - defer this.pointOfInterestTimeMutex.Unlock() +func (mctx *MigrationContext) MarkPointOfInterest() int64 { + mctx.pointOfInterestTimeMutex.Lock() + defer mctx.pointOfInterestTimeMutex.Unlock() - this.pointOfInterestTime = time.Now() - return atomic.LoadInt64(&this.Iteration) + mctx.pointOfInterestTime = time.Now() + return atomic.LoadInt64(&mctx.Iteration) } -func (this *MigrationContext) TimeSincePointOfInterest() time.Duration { - this.pointOfInterestTimeMutex.Lock() - defer this.pointOfInterestTimeMutex.Unlock() +func (mctx *MigrationContext) TimeSincePointOfInterest() time.Duration { + mctx.pointOfInterestTimeMutex.Lock() + defer mctx.pointOfInterestTimeMutex.Unlock() - return time.Since(this.pointOfInterestTime) + return time.Since(mctx.pointOfInterestTime) } -func (this *MigrationContext) SetLastHeartbeatOnChangelogTime(t time.Time) { - this.lastHeartbeatOnChangelogMutex.Lock() - defer this.lastHeartbeatOnChangelogMutex.Unlock() +func (mctx *MigrationContext) SetLastHeartbeatOnChangelogTime(t time.Time) { + mctx.lastHeartbeatOnChangelogMutex.Lock() + defer mctx.lastHeartbeatOnChangelogMutex.Unlock() - this.lastHeartbeatOnChangelogTime = t + mctx.lastHeartbeatOnChangelogTime = t } -func (this *MigrationContext) GetLastHeartbeatOnChangelogTime() time.Time { - this.lastHeartbeatOnChangelogMutex.Lock() - defer this.lastHeartbeatOnChangelogMutex.Unlock() +func (mctx *MigrationContext) GetLastHeartbeatOnChangelogTime() time.Time { + mctx.lastHeartbeatOnChangelogMutex.Lock() + defer mctx.lastHeartbeatOnChangelogMutex.Unlock() - return this.lastHeartbeatOnChangelogTime + return mctx.lastHeartbeatOnChangelogTime } -func (this *MigrationContext) SetHeartbeatIntervalMilliseconds(heartbeatIntervalMilliseconds int64) { +func (mctx *MigrationContext) SetHeartbeatIntervalMilliseconds(heartbeatIntervalMilliseconds int64) { if heartbeatIntervalMilliseconds < 100 { heartbeatIntervalMilliseconds = 100 } if heartbeatIntervalMilliseconds > 1000 { heartbeatIntervalMilliseconds = 1000 } - this.HeartbeatIntervalMilliseconds = heartbeatIntervalMilliseconds + mctx.HeartbeatIntervalMilliseconds = heartbeatIntervalMilliseconds } -func (this *MigrationContext) SetMaxLagMillisecondsThrottleThreshold(maxLagMillisecondsThrottleThreshold int64) { +func (mctx *MigrationContext) SetMaxLagMillisecondsThrottleThreshold(maxLagMillisecondsThrottleThreshold int64) { if maxLagMillisecondsThrottleThreshold < 100 { maxLagMillisecondsThrottleThreshold = 100 } - atomic.StoreInt64(&this.MaxLagMillisecondsThrottleThreshold, maxLagMillisecondsThrottleThreshold) + atomic.StoreInt64(&mctx.MaxLagMillisecondsThrottleThreshold, maxLagMillisecondsThrottleThreshold) } -func (this *MigrationContext) SetChunkSize(chunkSize int64) { +func (mctx *MigrationContext) SetChunkSize(chunkSize int64) { if chunkSize < 10 { chunkSize = 10 } if chunkSize > 100000 { chunkSize = 100000 } - atomic.StoreInt64(&this.ChunkSize, chunkSize) + atomic.StoreInt64(&mctx.ChunkSize, chunkSize) } -func (this *MigrationContext) SetDMLBatchSize(batchSize int64) { +func (mctx *MigrationContext) SetDMLBatchSize(batchSize int64) { if batchSize < 1 { batchSize = 1 } if batchSize > MaxEventsBatchSize { batchSize = MaxEventsBatchSize } - atomic.StoreInt64(&this.DMLBatchSize, batchSize) + atomic.StoreInt64(&mctx.DMLBatchSize, batchSize) } -func (this *MigrationContext) SetThrottleGeneralCheckResult(checkResult *ThrottleCheckResult) *ThrottleCheckResult { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() - this.throttleGeneralCheckResult = *checkResult +func (mctx *MigrationContext) SetThrottleGeneralCheckResult(checkResult *ThrottleCheckResult) *ThrottleCheckResult { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() + mctx.throttleGeneralCheckResult = *checkResult return checkResult } -func (this *MigrationContext) GetThrottleGeneralCheckResult() *ThrottleCheckResult { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() - result := this.throttleGeneralCheckResult +func (mctx *MigrationContext) GetThrottleGeneralCheckResult() *ThrottleCheckResult { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() + result := mctx.throttleGeneralCheckResult return &result } -func (this *MigrationContext) SetThrottled(throttle bool, reason string, reasonHint ThrottleReasonHint) { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() - this.isThrottled = throttle - this.throttleReason = reason - this.throttleReasonHint = reasonHint +func (mctx *MigrationContext) SetThrottled(throttle bool, reason string, reasonHint ThrottleReasonHint) { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() + mctx.isThrottled = throttle + mctx.throttleReason = reason + mctx.throttleReasonHint = reasonHint } -func (this *MigrationContext) IsThrottled() (bool, string, ThrottleReasonHint) { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) IsThrottled() (bool, string, ThrottleReasonHint) { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() // we don't throttle when cutting over. We _do_ throttle: // - during copy phase @@ -731,71 +731,71 @@ func (this *MigrationContext) IsThrottled() (bool, string, ThrottleReasonHint) { // - in between cut-over retries // When cutting over, we need to be aggressive. Cut-over holds table locks. // We need to release those asap. - if atomic.LoadInt64(&this.InCutOverCriticalSectionFlag) > 0 { + if atomic.LoadInt64(&mctx.InCutOverCriticalSectionFlag) > 0 { return false, "critical section", NoThrottleReasonHint } - return this.isThrottled, this.throttleReason, this.throttleReasonHint + return mctx.isThrottled, mctx.throttleReason, mctx.throttleReasonHint } -func (this *MigrationContext) GetThrottleQuery() string { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) GetThrottleQuery() string { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - var query = this.throttleQuery + var query = mctx.throttleQuery return query } -func (this *MigrationContext) SetThrottleQuery(newQuery string) { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) SetThrottleQuery(newQuery string) { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - this.throttleQuery = newQuery + mctx.throttleQuery = newQuery } -func (this *MigrationContext) GetThrottleHTTP() string { - this.throttleHTTPMutex.Lock() - defer this.throttleHTTPMutex.Unlock() +func (mctx *MigrationContext) GetThrottleHTTP() string { + mctx.throttleHTTPMutex.Lock() + defer mctx.throttleHTTPMutex.Unlock() - var throttleHTTP = this.throttleHTTP + var throttleHTTP = mctx.throttleHTTP return throttleHTTP } -func (this *MigrationContext) SetThrottleHTTP(throttleHTTP string) { - this.throttleHTTPMutex.Lock() - defer this.throttleHTTPMutex.Unlock() +func (mctx *MigrationContext) SetThrottleHTTP(throttleHTTP string) { + mctx.throttleHTTPMutex.Lock() + defer mctx.throttleHTTPMutex.Unlock() - this.throttleHTTP = throttleHTTP + mctx.throttleHTTP = throttleHTTP } -func (this *MigrationContext) SetIgnoreHTTPErrors(ignoreHTTPErrors bool) { - this.throttleHTTPMutex.Lock() - defer this.throttleHTTPMutex.Unlock() +func (mctx *MigrationContext) SetIgnoreHTTPErrors(ignoreHTTPErrors bool) { + mctx.throttleHTTPMutex.Lock() + defer mctx.throttleHTTPMutex.Unlock() - this.IgnoreHTTPErrors = ignoreHTTPErrors + mctx.IgnoreHTTPErrors = ignoreHTTPErrors } -func (this *MigrationContext) GetMaxLoad() LoadMap { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) GetMaxLoad() LoadMap { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - return this.maxLoad.Duplicate() + return mctx.maxLoad.Duplicate() } -func (this *MigrationContext) GetCriticalLoad() LoadMap { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) GetCriticalLoad() LoadMap { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - return this.criticalLoad.Duplicate() + return mctx.criticalLoad.Duplicate() } -func (this *MigrationContext) GetNiceRatio() float64 { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) GetNiceRatio() float64 { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - return this.niceRatio + return mctx.niceRatio } -func (this *MigrationContext) SetNiceRatio(newRatio float64) { +func (mctx *MigrationContext) SetNiceRatio(newRatio float64) { if newRatio < 0.0 { newRatio = 0.0 } @@ -803,180 +803,180 @@ func (this *MigrationContext) SetNiceRatio(newRatio float64) { newRatio = 100.0 } - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() - this.niceRatio = newRatio + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() + mctx.niceRatio = newRatio } -func (this *MigrationContext) GetRecentBinlogCoordinates() mysql.BinlogCoordinates { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) GetRecentBinlogCoordinates() mysql.BinlogCoordinates { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - return this.recentBinlogCoordinates + return mctx.recentBinlogCoordinates } -func (this *MigrationContext) SetRecentBinlogCoordinates(coordinates mysql.BinlogCoordinates) { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() - this.recentBinlogCoordinates = coordinates +func (mctx *MigrationContext) SetRecentBinlogCoordinates(coordinates mysql.BinlogCoordinates) { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() + mctx.recentBinlogCoordinates = coordinates } // ReadMaxLoad parses the `--max-load` flag, which is in multiple key-value format, // such as: 'Threads_running=100,Threads_connected=500' // It only applies changes in case there's no parsing error. -func (this *MigrationContext) ReadMaxLoad(maxLoadList string) error { +func (mctx *MigrationContext) ReadMaxLoad(maxLoadList string) error { loadMap, err := ParseLoadMap(maxLoadList) if err != nil { return err } - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - this.maxLoad = loadMap + mctx.maxLoad = loadMap return nil } // ReadCriticalLoad parses the `--max-load` flag, which is in multiple key-value format, // such as: 'Threads_running=100,Threads_connected=500' // It only applies changes in case there's no parsing error. -func (this *MigrationContext) ReadCriticalLoad(criticalLoadList string) error { +func (mctx *MigrationContext) ReadCriticalLoad(criticalLoadList string) error { loadMap, err := ParseLoadMap(criticalLoadList) if err != nil { return err } - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - this.criticalLoad = loadMap + mctx.criticalLoad = loadMap return nil } -func (this *MigrationContext) GetControlReplicasLagResult() mysql.ReplicationLagResult { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) GetControlReplicasLagResult() mysql.ReplicationLagResult { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - lagResult := this.controlReplicasLagResult + lagResult := mctx.controlReplicasLagResult return lagResult } -func (this *MigrationContext) SetControlReplicasLagResult(lagResult *mysql.ReplicationLagResult) { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) SetControlReplicasLagResult(lagResult *mysql.ReplicationLagResult) { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() if lagResult == nil { - this.controlReplicasLagResult = *mysql.NewNoReplicationLagResult() + mctx.controlReplicasLagResult = *mysql.NewNoReplicationLagResult() } else { - this.controlReplicasLagResult = *lagResult + mctx.controlReplicasLagResult = *lagResult } } -func (this *MigrationContext) GetThrottleControlReplicaKeys() *mysql.InstanceKeyMap { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) GetThrottleControlReplicaKeys() *mysql.InstanceKeyMap { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() keys := mysql.NewInstanceKeyMap() - keys.AddKeys(this.throttleControlReplicaKeys.GetInstanceKeys()) + keys.AddKeys(mctx.throttleControlReplicaKeys.GetInstanceKeys()) return keys } -func (this *MigrationContext) ReadThrottleControlReplicaKeys(throttleControlReplicas string) error { +func (mctx *MigrationContext) ReadThrottleControlReplicaKeys(throttleControlReplicas string) error { keys := mysql.NewInstanceKeyMap() if err := keys.ReadCommaDelimitedList(throttleControlReplicas); err != nil { return err } - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - this.throttleControlReplicaKeys = keys + mctx.throttleControlReplicaKeys = keys return nil } -func (this *MigrationContext) AddThrottleControlReplicaKey(key mysql.InstanceKey) error { - this.throttleMutex.Lock() - defer this.throttleMutex.Unlock() +func (mctx *MigrationContext) AddThrottleControlReplicaKey(key mysql.InstanceKey) error { + mctx.throttleMutex.Lock() + defer mctx.throttleMutex.Unlock() - this.throttleControlReplicaKeys.AddKey(key) + mctx.throttleControlReplicaKeys.AddKey(key) return nil } // ApplyCredentials sorts out the credentials between the config file and the CLI flags -func (this *MigrationContext) ApplyCredentials() { - this.configMutex.Lock() - defer this.configMutex.Unlock() +func (mctx *MigrationContext) ApplyCredentials() { + mctx.configMutex.Lock() + defer mctx.configMutex.Unlock() - if this.config.Client.User != "" { - this.InspectorConnectionConfig.User = this.config.Client.User + if mctx.config.Client.User != "" { + mctx.InspectorConnectionConfig.User = mctx.config.Client.User } - if this.CliUser != "" { + if mctx.CliUser != "" { // Override - this.InspectorConnectionConfig.User = this.CliUser + mctx.InspectorConnectionConfig.User = mctx.CliUser } - if this.config.Client.Password != "" { - this.InspectorConnectionConfig.Password = this.config.Client.Password + if mctx.config.Client.Password != "" { + mctx.InspectorConnectionConfig.Password = mctx.config.Client.Password } - if this.CliPassword != "" { + if mctx.CliPassword != "" { // Override - this.InspectorConnectionConfig.Password = this.CliPassword + mctx.InspectorConnectionConfig.Password = mctx.CliPassword } } -func (this *MigrationContext) SetupTLS() error { - if this.UseTLS { - return this.InspectorConnectionConfig.UseTLS(this.TLSCACertificate, this.TLSCertificate, this.TLSKey, this.TLSAllowInsecure) +func (mctx *MigrationContext) SetupTLS() error { + if mctx.UseTLS { + return mctx.InspectorConnectionConfig.UseTLS(mctx.TLSCACertificate, mctx.TLSCertificate, mctx.TLSKey, mctx.TLSAllowInsecure) } return nil } // ReadConfigFile attempts to read the config file, if it exists -func (this *MigrationContext) ReadConfigFile() error { - this.configMutex.Lock() - defer this.configMutex.Unlock() +func (mctx *MigrationContext) ReadConfigFile() error { + mctx.configMutex.Lock() + defer mctx.configMutex.Unlock() - if this.ConfigFile == "" { + if mctx.ConfigFile == "" { return nil } - cfg, err := ini.Load(this.ConfigFile) + cfg, err := ini.Load(mctx.ConfigFile) if err != nil { return err } if cfg.Section("client").HasKey("user") { - this.config.Client.User = cfg.Section("client").Key("user").String() + mctx.config.Client.User = cfg.Section("client").Key("user").String() } if cfg.Section("client").HasKey("password") { - this.config.Client.Password = cfg.Section("client").Key("password").String() + mctx.config.Client.Password = cfg.Section("client").Key("password").String() } if cfg.Section("osc").HasKey("chunk_size") { - this.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64() + mctx.config.Osc.Chunk_Size, err = cfg.Section("osc").Key("chunk_size").Int64() if err != nil { - return fmt.Errorf("Unable to read osc chunk size: %w", err) + return fmt.Errorf("unable to read osc chunk size: %w", err) } } if cfg.Section("osc").HasKey("max_load") { - this.config.Osc.Max_Load = cfg.Section("osc").Key("max_load").String() + mctx.config.Osc.Max_Load = cfg.Section("osc").Key("max_load").String() } if cfg.Section("osc").HasKey("replication_lag_query") { - this.config.Osc.Replication_Lag_Query = cfg.Section("osc").Key("replication_lag_query").String() + mctx.config.Osc.Replication_Lag_Query = cfg.Section("osc").Key("replication_lag_query").String() } if cfg.Section("osc").HasKey("max_lag_millis") { - this.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64() + mctx.config.Osc.Max_Lag_Millis, err = cfg.Section("osc").Key("max_lag_millis").Int64() if err != nil { - return fmt.Errorf("Unable to read max lag millis: %w", err) + return fmt.Errorf("unable to read max lag millis: %w", err) } } // We accept user & password in the form "${SOME_ENV_VARIABLE}" in which case we pull // the given variable from os env - if submatch := envVariableRegexp.FindStringSubmatch(this.config.Client.User); len(submatch) > 1 { - this.config.Client.User = os.Getenv(submatch[1]) + if submatch := envVariableRegexp.FindStringSubmatch(mctx.config.Client.User); len(submatch) > 1 { + mctx.config.Client.User = os.Getenv(submatch[1]) } - if submatch := envVariableRegexp.FindStringSubmatch(this.config.Client.Password); len(submatch) > 1 { - this.config.Client.Password = os.Getenv(submatch[1]) + if submatch := envVariableRegexp.FindStringSubmatch(mctx.config.Client.Password); len(submatch) > 1 { + mctx.config.Client.Password = os.Getenv(submatch[1]) } return nil @@ -984,47 +984,47 @@ func (this *MigrationContext) ReadConfigFile() error { // getGhostTriggerName generates the name of a ghost trigger, based on original trigger name // or a given trigger name -func (this *MigrationContext) GetGhostTriggerName(triggerName string) string { - if this.RemoveTriggerSuffix && strings.HasSuffix(triggerName, this.TriggerSuffix) { - return strings.TrimSuffix(triggerName, this.TriggerSuffix) +func (mctx *MigrationContext) GetGhostTriggerName(triggerName string) string { + if mctx.RemoveTriggerSuffix && strings.HasSuffix(triggerName, mctx.TriggerSuffix) { + return strings.TrimSuffix(triggerName, mctx.TriggerSuffix) } // else - return triggerName + this.TriggerSuffix + return triggerName + mctx.TriggerSuffix } // ValidateGhostTriggerLengthBelowMaxLength checks if the given trigger name (already transformed // by GetGhostTriggerName) does not exceed the maximum allowed length. -func (this *MigrationContext) ValidateGhostTriggerLengthBelowMaxLength(triggerName string) bool { +func (mctx *MigrationContext) ValidateGhostTriggerLengthBelowMaxLength(triggerName string) bool { return utf8.RuneCountInString(triggerName) <= mysql.MaxTableNameLength } // GetContext returns the migration context for cancellation checking -func (this *MigrationContext) GetContext() context.Context { - return this.ctx +func (mctx *MigrationContext) GetContext() context.Context { + return mctx.ctx } // SetAbortError stores the fatal error that triggered abort // Only the first error is stored (subsequent errors are ignored) -func (this *MigrationContext) SetAbortError(err error) { - this.abortMutex.Lock() - defer this.abortMutex.Unlock() - if this.AbortError == nil { - this.AbortError = err +func (mctx *MigrationContext) SetAbortError(err error) { + mctx.abortMutex.Lock() + defer mctx.abortMutex.Unlock() + if mctx.AbortError == nil { + mctx.AbortError = err } } // GetAbortError retrieves the stored abort error -func (this *MigrationContext) GetAbortError() error { - this.abortMutex.Lock() - defer this.abortMutex.Unlock() - return this.AbortError +func (mctx *MigrationContext) GetAbortError() error { + mctx.abortMutex.Lock() + defer mctx.abortMutex.Unlock() + return mctx.AbortError } // CancelContext cancels the migration context to signal all goroutines to stop // The cancel function is safe to call multiple times and from multiple goroutines. -func (this *MigrationContext) CancelContext() { - if this.cancelFunc != nil { - this.cancelFunc() +func (mctx *MigrationContext) CancelContext() { + if mctx.cancelFunc != nil { + mctx.cancelFunc() } } diff --git a/go/base/load_map.go b/go/base/load_map.go index cfe92154b..ea51a485f 100644 --- a/go/base/load_map.go +++ b/go/base/load_map.go @@ -51,18 +51,18 @@ func ParseLoadMap(loadList string) (LoadMap, error) { } // Duplicate creates a clone of this map -func (this *LoadMap) Duplicate() LoadMap { +func (lm *LoadMap) Duplicate() LoadMap { dup := make(map[string]int64) - for k, v := range *this { + for k, v := range *lm { dup[k] = v } return dup } // String() returns a string representation of this map -func (this *LoadMap) String() string { +func (lm *LoadMap) String() string { tokens := []string{} - for key, val := range *this { + for key, val := range *lm { token := fmt.Sprintf("%s=%d", key, val) tokens = append(tokens, token) } diff --git a/go/base/utils.go b/go/base/utils.go index 89f6d315f..f1781438e 100644 --- a/go/base/utils.go +++ b/go/base/utils.go @@ -97,8 +97,8 @@ func ValidateConnection(db *gosql.DB, connectionConfig *mysql.ConnectionConfig, migrationContext.Log.Infof("%s connection validated on %+v", name, connectionConfig.Key) return version, nil } else if extraPort == 0 { - return "", fmt.Errorf("Unexpected database port reported: %+v", port) + return "", fmt.Errorf("unexpected database port reported: %+v", port) } else { - return "", fmt.Errorf("Unexpected database port reported: %+v / extra_port: %+v", port, extraPort) + return "", fmt.Errorf("unexpected database port reported: %+v / extra_port: %+v", port, extraPort) } } diff --git a/go/binlog/binlog_dml_event.go b/go/binlog/binlog_dml_event.go index 2c7aa365d..626e5759e 100644 --- a/go/binlog/binlog_dml_event.go +++ b/go/binlog/binlog_dml_event.go @@ -62,6 +62,6 @@ func NewBinlogDMLEvent(databaseName, tableName string, dml EventDML) *BinlogDMLE return event } -func (this *BinlogDMLEvent) String() string { - return fmt.Sprintf("[%+v on %s:%s]", this.DML, this.DatabaseName, this.TableName) +func (bde *BinlogDMLEvent) String() string { + return fmt.Sprintf("[%+v on %s:%s]", bde.DML, bde.DatabaseName, bde.TableName) } diff --git a/go/binlog/binlog_entry.go b/go/binlog/binlog_entry.go index 69a2fc31d..7620281d2 100644 --- a/go/binlog/binlog_entry.go +++ b/go/binlog/binlog_entry.go @@ -26,6 +26,6 @@ func NewBinlogEntryAt(coordinates mysql.BinlogCoordinates) *BinlogEntry { } // String() returns a string representation of this binlog entry -func (this *BinlogEntry) String() string { - return fmt.Sprintf("[BinlogEntry at %+v; dml:%+v]", this.Coordinates, this.DmlEvent) +func (ble *BinlogEntry) String() string { + return fmt.Sprintf("[BinlogEntry at %+v; dml:%+v]", ble.Coordinates, ble.DmlEvent) } diff --git a/go/binlog/gomysql_reader.go b/go/binlog/gomysql_reader.go index d690a9f65..f548cef11 100644 --- a/go/binlog/gomysql_reader.go +++ b/go/binlog/gomysql_reader.go @@ -55,23 +55,23 @@ func NewGoMySQLReader(migrationContext *base.MigrationContext) *GoMySQLReader { } // ConnectBinlogStreamer -func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) { +func (gmr *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordinates) (err error) { if coordinates.IsEmpty() { - return this.migrationContext.Log.Errorf("Empty coordinates at ConnectBinlogStreamer()") + return gmr.migrationContext.Log.Errorf("empty coordinates at ConnectBinlogStreamer()") } - this.currentCoordinatesMutex.Lock() - defer this.currentCoordinatesMutex.Unlock() - this.currentCoordinates = coordinates - this.migrationContext.Log.Infof("Connecting binlog streamer at %+v", coordinates) + gmr.currentCoordinatesMutex.Lock() + defer gmr.currentCoordinatesMutex.Unlock() + gmr.currentCoordinates = coordinates + gmr.migrationContext.Log.Infof("Connecting binlog streamer at %+v", coordinates) // Start sync with specified GTID set or binlog file and position - if this.migrationContext.UseGTIDs { + if gmr.migrationContext.UseGTIDs { coords := coordinates.(*mysql.GTIDBinlogCoordinates) - this.binlogStreamer, err = this.binlogSyncer.StartSyncGTID(coords.GTIDSet) + gmr.binlogStreamer, err = gmr.binlogSyncer.StartSyncGTID(coords.GTIDSet) } else { - coords := this.currentCoordinates.(*mysql.FileBinlogCoordinates) - this.binlogStreamer, err = this.binlogSyncer.StartSync(gomysql.Position{ + coords := gmr.currentCoordinates.(*mysql.FileBinlogCoordinates) + gmr.binlogStreamer, err = gmr.binlogSyncer.StartSync(gomysql.Position{ Name: coords.LogFile, Pos: uint32(coords.LogPos)}, ) @@ -79,17 +79,17 @@ func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordin return err } -func (this *GoMySQLReader) GetCurrentBinlogCoordinates() mysql.BinlogCoordinates { - this.currentCoordinatesMutex.Lock() - defer this.currentCoordinatesMutex.Unlock() - return this.currentCoordinates.Clone() +func (gmr *GoMySQLReader) GetCurrentBinlogCoordinates() mysql.BinlogCoordinates { + gmr.currentCoordinatesMutex.Lock() + defer gmr.currentCoordinatesMutex.Unlock() + return gmr.currentCoordinates.Clone() } -func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *replication.RowsEvent, entriesChannel chan<- *BinlogEntry) error { - currentCoords := this.GetCurrentBinlogCoordinates() +func (gmr *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *replication.RowsEvent, entriesChannel chan<- *BinlogEntry) error { + currentCoords := gmr.GetCurrentBinlogCoordinates() dml := ToEventDML(ev.Header.EventType.String()) if dml == NotDML { - return fmt.Errorf("Unknown DML type: %s", ev.Header.EventType.String()) + return fmt.Errorf("unknown DML type: %s", ev.Header.EventType.String()) } for i, row := range rowsEvent.Rows { if dml == UpdateDML && i%2 == 1 { @@ -129,78 +129,72 @@ func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEven } // StreamEvents -func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesChannel chan<- *BinlogEntry) error { - if canStopStreaming() { - return nil - } - for { - if canStopStreaming() { - break - } - ev, err := this.binlogStreamer.GetEvent(context.Background()) +func (gmr *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesChannel chan<- *BinlogEntry) error { + for !canStopStreaming() { + ev, err := gmr.binlogStreamer.GetEvent(context.Background()) if err != nil { return err } // Update binlog coords if using file-based coords. // GTID coordinates are updated on receiving GTID events. - if !this.migrationContext.UseGTIDs { - this.currentCoordinatesMutex.Lock() - coords := this.currentCoordinates.(*mysql.FileBinlogCoordinates) + if !gmr.migrationContext.UseGTIDs { + gmr.currentCoordinatesMutex.Lock() + coords := gmr.currentCoordinates.(*mysql.FileBinlogCoordinates) prevCoords := coords.Clone().(*mysql.FileBinlogCoordinates) coords.LogPos = int64(ev.Header.LogPos) coords.EventSize = int64(ev.Header.EventSize) if coords.IsLogPosOverflowBeyond4Bytes(prevCoords) { - this.currentCoordinatesMutex.Unlock() - return fmt.Errorf("Unexpected rows event at %+v, the binlog end_log_pos is overflow 4 bytes", coords) + gmr.currentCoordinatesMutex.Unlock() + return fmt.Errorf("unexpected rows event at %+v, the binlog end_log_pos is overflow 4 bytes", coords) } - this.currentCoordinatesMutex.Unlock() + gmr.currentCoordinatesMutex.Unlock() } switch event := ev.Event.(type) { case *replication.GTIDEvent: - if !this.migrationContext.UseGTIDs { + if !gmr.migrationContext.UseGTIDs { continue } sid, err := uuid.FromBytes(event.SID) if err != nil { return err } - this.currentCoordinatesMutex.Lock() - if this.LastTrxCoords != nil { - this.currentCoordinates = this.LastTrxCoords.Clone() + gmr.currentCoordinatesMutex.Lock() + if gmr.LastTrxCoords != nil { + gmr.currentCoordinates = gmr.LastTrxCoords.Clone() } - coords := this.currentCoordinates.(*mysql.GTIDBinlogCoordinates) + coords := gmr.currentCoordinates.(*mysql.GTIDBinlogCoordinates) trxGset := gomysql.NewUUIDSet(sid, gomysql.Interval{Start: event.GNO, Stop: event.GNO + 1}) coords.GTIDSet.AddSet(trxGset) - this.currentCoordinatesMutex.Unlock() + gmr.currentCoordinatesMutex.Unlock() case *replication.RotateEvent: - if this.migrationContext.UseGTIDs { + if gmr.migrationContext.UseGTIDs { continue } - this.currentCoordinatesMutex.Lock() - coords := this.currentCoordinates.(*mysql.FileBinlogCoordinates) + gmr.currentCoordinatesMutex.Lock() + coords := gmr.currentCoordinates.(*mysql.FileBinlogCoordinates) coords.LogFile = string(event.NextLogName) - this.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", coords.LogFile, int64(ev.Header.LogPos), event.NextLogName) - this.currentCoordinatesMutex.Unlock() + gmr.migrationContext.Log.Infof("rotate to next log from %s:%d to %s", coords.LogFile, int64(ev.Header.LogPos), event.NextLogName) + gmr.currentCoordinatesMutex.Unlock() case *replication.XIDEvent: - if this.migrationContext.UseGTIDs { - this.LastTrxCoords = &mysql.GTIDBinlogCoordinates{GTIDSet: event.GSet.(*gomysql.MysqlGTIDSet)} + if gmr.migrationContext.UseGTIDs { + gmr.LastTrxCoords = &mysql.GTIDBinlogCoordinates{GTIDSet: event.GSet.(*gomysql.MysqlGTIDSet)} } else { - this.LastTrxCoords = this.currentCoordinates.Clone() + gmr.LastTrxCoords = gmr.currentCoordinates.Clone() } case *replication.RowsEvent: - if err := this.handleRowsEvent(ev, event, entriesChannel); err != nil { + if err := gmr.handleRowsEvent(ev, event, entriesChannel); err != nil { return err } } } - this.migrationContext.Log.Debugf("done streaming events") + gmr.migrationContext.Log.Debugf("done streaming events") return nil } -func (this *GoMySQLReader) Close() error { - this.binlogSyncer.Close() +func (gmr *GoMySQLReader) Close() error { + gmr.binlogSyncer.Close() return nil } diff --git a/go/logic/applier.go b/go/logic/applier.go index 9e336e2f5..b49e131b8 100644 --- a/go/logic/applier.go +++ b/go/logic/applier.go @@ -98,9 +98,9 @@ func NewApplier(migrationContext *base.MigrationContext) *Applier { // for the migration's unique key. Duplicate warnings are formatted differently across MySQL versions, // hence the optional table name prefix. Metacharacters in table/index names are escaped to avoid // regex syntax errors. -func (this *Applier) compileMigrationKeyWarningRegex() (*regexp.Regexp, error) { - escapedTable := regexp.QuoteMeta(this.migrationContext.GetGhostTableName()) - escapedKey := regexp.QuoteMeta(this.migrationContext.UniqueKey.NameInGhostTable) +func (apl *Applier) compileMigrationKeyWarningRegex() (*regexp.Regexp, error) { + escapedTable := regexp.QuoteMeta(apl.migrationContext.GetGhostTableName()) + escapedKey := regexp.QuoteMeta(apl.migrationContext.UniqueKey.NameInGhostTable) migrationUniqueKeyPattern := fmt.Sprintf(`for key '(%s\.)?%s'`, escapedTable, escapedKey) migrationKeyRegex, err := regexp.Compile(migrationUniqueKeyPattern) if err != nil { @@ -109,75 +109,75 @@ func (this *Applier) compileMigrationKeyWarningRegex() (*regexp.Regexp, error) { return migrationKeyRegex, nil } -func (this *Applier) InitDBConnections() (err error) { - applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName) +func (apl *Applier) InitDBConnections() (err error) { + applierUri := apl.connectionConfig.GetDBUri(apl.migrationContext.DatabaseName) uriWithMulti := fmt.Sprintf("%s&multiStatements=true", applierUri) - if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, uriWithMulti); err != nil { + if apl.db, _, err = mysql.GetDB(apl.migrationContext.Uuid, uriWithMulti); err != nil { return err } singletonApplierUri := fmt.Sprintf("%s&timeout=0", applierUri) - if this.singletonDB, _, err = mysql.GetDB(this.migrationContext.Uuid, singletonApplierUri); err != nil { + if apl.singletonDB, _, err = mysql.GetDB(apl.migrationContext.Uuid, singletonApplierUri); err != nil { return err } - this.singletonDB.SetMaxOpenConns(1) - version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name) + apl.singletonDB.SetMaxOpenConns(1) + version, err := base.ValidateConnection(apl.db, apl.connectionConfig, apl.migrationContext, apl.name) if err != nil { return err } - if _, err := base.ValidateConnection(this.singletonDB, this.connectionConfig, this.migrationContext, this.name); err != nil { + if _, err := base.ValidateConnection(apl.singletonDB, apl.connectionConfig, apl.migrationContext, apl.name); err != nil { return err } - this.migrationContext.ApplierMySQLVersion = version - if err := this.validateAndReadGlobalVariables(); err != nil { + apl.migrationContext.ApplierMySQLVersion = version + if err := apl.validateAndReadGlobalVariables(); err != nil { return err } - if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL { - if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil { + if !apl.migrationContext.AliyunRDS && !apl.migrationContext.GoogleCloudPlatform && !apl.migrationContext.AzureMySQL { + if impliedKey, err := mysql.GetInstanceKey(apl.db); err != nil { return err } else { - this.connectionConfig.ImpliedKey = impliedKey + apl.connectionConfig.ImpliedKey = impliedKey } } - if err := this.readTableColumns(); err != nil { + if err := apl.readTableColumns(); err != nil { return err } - this.migrationContext.Log.Infof("Applier initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.ApplierMySQLVersion) + apl.migrationContext.Log.Infof("Applier initiated on %+v, version %+v", apl.connectionConfig.ImpliedKey, apl.migrationContext.ApplierMySQLVersion) return nil } -func (this *Applier) prepareQueries() (err error) { - if this.dmlDeleteQueryBuilder, err = sql.NewDMLDeleteQueryBuilder( - this.migrationContext.DatabaseName, - this.migrationContext.GetGhostTableName(), - this.migrationContext.OriginalTableColumns, - &this.migrationContext.UniqueKey.Columns, +func (apl *Applier) prepareQueries() (err error) { + if apl.dmlDeleteQueryBuilder, err = sql.NewDMLDeleteQueryBuilder( + apl.migrationContext.DatabaseName, + apl.migrationContext.GetGhostTableName(), + apl.migrationContext.OriginalTableColumns, + &apl.migrationContext.UniqueKey.Columns, ); err != nil { return err } - if this.dmlInsertQueryBuilder, err = sql.NewDMLInsertQueryBuilder( - this.migrationContext.DatabaseName, - this.migrationContext.GetGhostTableName(), - this.migrationContext.OriginalTableColumns, - this.migrationContext.SharedColumns, - this.migrationContext.MappedSharedColumns, + if apl.dmlInsertQueryBuilder, err = sql.NewDMLInsertQueryBuilder( + apl.migrationContext.DatabaseName, + apl.migrationContext.GetGhostTableName(), + apl.migrationContext.OriginalTableColumns, + apl.migrationContext.SharedColumns, + apl.migrationContext.MappedSharedColumns, ); err != nil { return err } - if this.dmlUpdateQueryBuilder, err = sql.NewDMLUpdateQueryBuilder( - this.migrationContext.DatabaseName, - this.migrationContext.GetGhostTableName(), - this.migrationContext.OriginalTableColumns, - this.migrationContext.SharedColumns, - this.migrationContext.MappedSharedColumns, - &this.migrationContext.UniqueKey.Columns, + if apl.dmlUpdateQueryBuilder, err = sql.NewDMLUpdateQueryBuilder( + apl.migrationContext.DatabaseName, + apl.migrationContext.GetGhostTableName(), + apl.migrationContext.OriginalTableColumns, + apl.migrationContext.SharedColumns, + apl.migrationContext.MappedSharedColumns, + &apl.migrationContext.UniqueKey.Columns, ); err != nil { return err } - if this.migrationContext.Checkpoint { - if this.checkpointInsertQueryBuilder, err = sql.NewCheckpointQueryBuilder( - this.migrationContext.DatabaseName, - this.migrationContext.GetCheckpointTableName(), - &this.migrationContext.UniqueKey.Columns, + if apl.migrationContext.Checkpoint { + if apl.checkpointInsertQueryBuilder, err = sql.NewCheckpointQueryBuilder( + apl.migrationContext.DatabaseName, + apl.migrationContext.GetCheckpointTableName(), + &apl.migrationContext.UniqueKey.Columns, ); err != nil { return err } @@ -186,16 +186,16 @@ func (this *Applier) prepareQueries() (err error) { } // validateAndReadGlobalVariables potentially reads server global variables, such as the time_zone and wait_timeout. -func (this *Applier) validateAndReadGlobalVariables() error { +func (apl *Applier) validateAndReadGlobalVariables() error { query := `select /* gh-ost */ @@global.time_zone, @@global.wait_timeout` - if err := this.db.QueryRow(query).Scan( - &this.migrationContext.ApplierTimeZone, - &this.migrationContext.ApplierWaitTimeout, + if err := apl.db.QueryRow(query).Scan( + &apl.migrationContext.ApplierTimeZone, + &apl.migrationContext.ApplierWaitTimeout, ); err != nil { return err } - this.migrationContext.Log.Infof("will use time_zone='%s' on applier", this.migrationContext.ApplierTimeZone) + apl.migrationContext.Log.Infof("will use time_zone='%s' on applier", apl.migrationContext.ApplierTimeZone) return nil } @@ -203,13 +203,13 @@ func (this *Applier) validateAndReadGlobalVariables() error { // based on gh-ost configuration: // - User may skip strict mode // - User may allow zero dats or zero in dates -func (this *Applier) generateSqlModeQuery() string { +func (apl *Applier) generateSqlModeQuery() string { sqlModeAddendum := []string{`NO_AUTO_VALUE_ON_ZERO`} - if !this.migrationContext.SkipStrictMode { + if !apl.migrationContext.SkipStrictMode { sqlModeAddendum = append(sqlModeAddendum, `STRICT_ALL_TABLES`) } sqlModeQuery := fmt.Sprintf("CONCAT(@@session.sql_mode, ',%s')", strings.Join(sqlModeAddendum, ",")) - if this.migrationContext.AllowZeroInDate { + if apl.migrationContext.AllowZeroInDate { sqlModeQuery = fmt.Sprintf("REPLACE(REPLACE(%s, 'NO_ZERO_IN_DATE', ''), 'NO_ZERO_DATE', '')", sqlModeQuery) } @@ -218,18 +218,18 @@ func (this *Applier) generateSqlModeQuery() string { // generateInstantDDLQuery returns the SQL for this ALTER operation // with an INSTANT assertion (requires MySQL 8.0+) -func (this *Applier) generateInstantDDLQuery() string { +func (apl *Applier) generateInstantDDLQuery() string { return fmt.Sprintf(`ALTER /* gh-ost */ TABLE %s.%s %s, ALGORITHM=INSTANT`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), - this.migrationContext.AlterStatementOptions, + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), + apl.migrationContext.AlterStatementOptions, ) } // readTableColumns reads table columns on applier -func (this *Applier) readTableColumns() (err error) { - this.migrationContext.Log.Infof("Examining table structure on applier") - this.migrationContext.OriginalTableColumnsOnApplier, _, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName) +func (apl *Applier) readTableColumns() (err error) { + apl.migrationContext.Log.Infof("Examining table structure on applier") + apl.migrationContext.OriginalTableColumnsOnApplier, _, err = mysql.GetTableColumns(apl.db, apl.migrationContext.DatabaseName, apl.migrationContext.OriginalTableName) if err != nil { return err } @@ -237,9 +237,9 @@ func (this *Applier) readTableColumns() (err error) { } // showTableStatus returns the output of `show table status like '...'` command -func (this *Applier) showTableStatus(tableName string) (rowMap sqlutils.RowMap) { - query := fmt.Sprintf(`show /* gh-ost */ table status from %s like '%s'`, sql.EscapeName(this.migrationContext.DatabaseName), tableName) - sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { +func (apl *Applier) showTableStatus(tableName string) (rowMap sqlutils.RowMap) { + query := fmt.Sprintf(`show /* gh-ost */ table status from %s like '%s'`, sql.EscapeName(apl.migrationContext.DatabaseName), tableName) + sqlutils.QueryRowsMap(apl.db, query, func(m sqlutils.RowMap) error { rowMap = m return nil }) @@ -247,33 +247,33 @@ func (this *Applier) showTableStatus(tableName string) (rowMap sqlutils.RowMap) } // tableExists checks if a given table exists in database -func (this *Applier) tableExists(tableName string) (tableFound bool) { - m := this.showTableStatus(tableName) +func (apl *Applier) tableExists(tableName string) (tableFound bool) { + m := apl.showTableStatus(tableName) return (m != nil) } // ValidateOrDropExistingTables verifies ghost and changelog tables do not exist, // or attempts to drop them if instructed to. -func (this *Applier) ValidateOrDropExistingTables() error { - if this.migrationContext.InitiallyDropGhostTable { - if err := this.DropGhostTable(); err != nil { +func (apl *Applier) ValidateOrDropExistingTables() error { + if apl.migrationContext.InitiallyDropGhostTable { + if err := apl.DropGhostTable(); err != nil { return err } } - if this.tableExists(this.migrationContext.GetGhostTableName()) { - return fmt.Errorf("Table %s already exists. Panicking. Use --initially-drop-ghost-table to force dropping it, though I really prefer that you drop it or rename it away", sql.EscapeName(this.migrationContext.GetGhostTableName())) + if apl.tableExists(apl.migrationContext.GetGhostTableName()) { + return fmt.Errorf("table %s already exists. Panicking. Use --initially-drop-ghost-table to force dropping it, though I really prefer that you drop it or rename it away", sql.EscapeName(apl.migrationContext.GetGhostTableName())) } - if this.migrationContext.InitiallyDropOldTable { - if err := this.DropOldTable(); err != nil { + if apl.migrationContext.InitiallyDropOldTable { + if err := apl.DropOldTable(); err != nil { return err } } - if len(this.migrationContext.GetOldTableName()) > mysql.MaxTableNameLength { - this.migrationContext.Log.Fatalf("--timestamp-old-table defined, but resulting table name (%s) is too long (only %d characters allowed)", this.migrationContext.GetOldTableName(), mysql.MaxTableNameLength) + if len(apl.migrationContext.GetOldTableName()) > mysql.MaxTableNameLength { + apl.migrationContext.Log.Fatalf("--timestamp-old-table defined, but resulting table name (%s) is too long (only %d characters allowed)", apl.migrationContext.GetOldTableName(), mysql.MaxTableNameLength) } - if this.tableExists(this.migrationContext.GetOldTableName()) { - return fmt.Errorf("Table %s already exists. Panicking. Use --initially-drop-old-table to force dropping it, though I really prefer that you drop it or rename it away", sql.EscapeName(this.migrationContext.GetOldTableName())) + if apl.tableExists(apl.migrationContext.GetOldTableName()) { + return fmt.Errorf("table %s already exists. Panicking. Use --initially-drop-old-table to force dropping it, though I really prefer that you drop it or rename it away", sql.EscapeName(apl.migrationContext.GetOldTableName())) } return nil @@ -292,23 +292,23 @@ func (this *Applier) ValidateOrDropExistingTables() error { // It is not reliable to parse the `alter` statement to determine if it is instant or not. // This is because the table might be in an older row format, or have some other incompatibility // that is difficult to identify. -func (this *Applier) AttemptInstantDDL() error { - query := this.generateInstantDDLQuery() - this.migrationContext.Log.Infof("INSTANT DDL query is: %s", query) +func (apl *Applier) AttemptInstantDDL() error { + query := apl.generateInstantDDLQuery() + apl.migrationContext.Log.Infof("INSTANT DDL query is: %s", query) // Reuse cut-over-lock-timeout from regular migration process to reduce risk // in situations where there may be long-running transactions. - tableLockTimeoutSeconds := this.migrationContext.CutOverLockTimeoutSeconds * 2 - this.migrationContext.Log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds) + tableLockTimeoutSeconds := apl.migrationContext.CutOverLockTimeoutSeconds * 2 + apl.migrationContext.Log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds) lockTimeoutQuery := fmt.Sprintf(`set /* gh-ost */ session lock_wait_timeout:=%d`, tableLockTimeoutSeconds) - if _, err := this.db.Exec(lockTimeoutQuery); err != nil { + if _, err := apl.db.Exec(lockTimeoutQuery); err != nil { return err } // We don't need a trx, because for instant DDL the SQL mode doesn't matter. return retryOnLockWaitTimeout(func() error { - _, err := this.db.Exec(query) + _, err := apl.db.Exec(query) return err - }, this.migrationContext.MaxRetries(), this.migrationContext.Log) + }, apl.migrationContext.MaxRetries(), apl.migrationContext.Log) } // retryOnLockWaitTimeout retries the given operation on MySQL lock wait timeout @@ -334,27 +334,27 @@ func retryOnLockWaitTimeout(operation func() error, maxRetries int64, logger bas } // CreateGhostTable creates the ghost table on the applier host -func (this *Applier) CreateGhostTable() error { +func (apl *Applier) CreateGhostTable() error { query := fmt.Sprintf(`create /* gh-ost */ table %s.%s like %s.%s`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), ) - this.migrationContext.Log.Infof("Creating ghost table %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), + apl.migrationContext.Log.Infof("Creating ghost table %s.%s", + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), ) err := func() error { - tx, err := this.db.Begin() + tx, err := apl.db.Begin() if err != nil { return err } defer tx.Rollback() - sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone) - sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery()) + sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, apl.migrationContext.ApplierTimeZone) + sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, apl.generateSqlModeQuery()) if _, err := tx.Exec(sessionQuery); err != nil { return err @@ -362,7 +362,7 @@ func (this *Applier) CreateGhostTable() error { if _, err := tx.Exec(query); err != nil { return err } - this.migrationContext.Log.Infof("Ghost table created") + apl.migrationContext.Log.Infof("Ghost table created") if err := tx.Commit(); err != nil { // Neither SET SESSION nor ALTER are really transactional, so strictly speaking // there's no need to commit; but let's do this the legit way anyway. @@ -375,27 +375,27 @@ func (this *Applier) CreateGhostTable() error { } // AlterGhost applies `alter` statement on ghost table -func (this *Applier) AlterGhost() error { +func (apl *Applier) AlterGhost() error { query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s %s`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), - this.migrationContext.AlterStatementOptions, + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), + apl.migrationContext.AlterStatementOptions, ) - this.migrationContext.Log.Infof("Altering ghost table %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), + apl.migrationContext.Log.Infof("Altering ghost table %s.%s", + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), ) - this.migrationContext.Log.Debugf("ALTER statement: %s", query) + apl.migrationContext.Log.Debugf("ALTER statement: %s", query) err := func() error { - tx, err := this.db.Begin() + tx, err := apl.db.Begin() if err != nil { return err } defer tx.Rollback() - sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone) - sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery()) + sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, apl.migrationContext.ApplierTimeZone) + sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, apl.generateSqlModeQuery()) if _, err := tx.Exec(sessionQuery); err != nil { return err @@ -403,7 +403,7 @@ func (this *Applier) AlterGhost() error { if _, err := tx.Exec(query); err != nil { return err } - this.migrationContext.Log.Infof("Ghost table altered") + apl.migrationContext.Log.Infof("Ghost table altered") if err := tx.Commit(); err != nil { // Neither SET SESSION nor ALTER are really transactional, so strictly speaking // there's no need to commit; but let's do this the legit way anyway. @@ -416,27 +416,27 @@ func (this *Applier) AlterGhost() error { } // AlterGhost applies `alter` statement on ghost table -func (this *Applier) AlterGhostAutoIncrement() error { +func (apl *Applier) AlterGhostAutoIncrement() error { query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s AUTO_INCREMENT=%d`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), - this.migrationContext.OriginalTableAutoIncrement, + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), + apl.migrationContext.OriginalTableAutoIncrement, ) - this.migrationContext.Log.Infof("Altering ghost table AUTO_INCREMENT value %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), + apl.migrationContext.Log.Infof("Altering ghost table AUTO_INCREMENT value %s.%s", + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), ) - this.migrationContext.Log.Debugf("AUTO_INCREMENT ALTER statement: %s", query) - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + apl.migrationContext.Log.Debugf("AUTO_INCREMENT ALTER statement: %s", query) + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } - this.migrationContext.Log.Infof("Ghost table AUTO_INCREMENT altered") + apl.migrationContext.Log.Infof("Ghost table AUTO_INCREMENT altered") return nil } // CreateChangelogTable creates the changelog table on the applier host -func (this *Applier) CreateChangelogTable() error { - if err := this.DropChangelogTable(); err != nil { +func (apl *Applier) CreateChangelogTable() error { + if err := apl.DropChangelogTable(); err != nil { return err } query := fmt.Sprintf(`create /* gh-ost */ table %s.%s ( @@ -447,26 +447,26 @@ func (this *Applier) CreateChangelogTable() error { primary key(id), unique key hint_uidx(hint) ) auto_increment=256 comment='%s'`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetChangelogTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetChangelogTableName()), GhostChangelogTableComment, ) - this.migrationContext.Log.Infof("Creating changelog table %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetChangelogTableName()), + apl.migrationContext.Log.Infof("Creating changelog table %s.%s", + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetChangelogTableName()), ) - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } - this.migrationContext.Log.Infof("Changelog table created") + apl.migrationContext.Log.Infof("Changelog table created") return nil } // Create the checkpoint table to store the chunk copy and applier state. // There are two sets of columns with the same types as the shared unique key, // one for IterationMinValues and one for IterationMaxValues. -func (this *Applier) CreateCheckpointTable() error { - if err := this.DropCheckpointTable(); err != nil { +func (apl *Applier) CreateCheckpointTable() error { + if err := apl.DropCheckpointTable(); err != nil { return err } colDefs := []string{ @@ -478,151 +478,151 @@ func (this *Applier) CreateCheckpointTable() error { "`gh_ost_dml_applied` bigint", "`gh_ost_is_cutover` tinyint(1) DEFAULT '0'", } - for _, col := range this.migrationContext.UniqueKey.Columns.Columns() { + for _, col := range apl.migrationContext.UniqueKey.Columns.Columns() { if col.MySQLType == "" { - return fmt.Errorf("CreateCheckpoinTable: column %s has no type information. applyColumnTypes must be called", sql.EscapeName(col.Name)) + return fmt.Errorf("column %s has no type information. applyColumnTypes must be called", sql.EscapeName(col.Name)) } minColName := sql.TruncateColumnName(col.Name, sql.MaxColumnNameLength-4) + "_min" colDef := fmt.Sprintf("%s %s", sql.EscapeName(minColName), col.MySQLType) colDefs = append(colDefs, colDef) } - for _, col := range this.migrationContext.UniqueKey.Columns.Columns() { + for _, col := range apl.migrationContext.UniqueKey.Columns.Columns() { maxColName := sql.TruncateColumnName(col.Name, sql.MaxColumnNameLength-4) + "_max" colDef := fmt.Sprintf("%s %s", sql.EscapeName(maxColName), col.MySQLType) colDefs = append(colDefs, colDef) } query := fmt.Sprintf("create /* gh-ost */ table %s.%s (\n %s\n)", - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetCheckpointTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetCheckpointTableName()), strings.Join(colDefs, ",\n "), ) - this.migrationContext.Log.Infof("Created checkpoint table") - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + apl.migrationContext.Log.Infof("Created checkpoint table") + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } return nil } // dropTable drops a given table on the applied host -func (this *Applier) dropTable(tableName string) error { +func (apl *Applier) dropTable(tableName string) error { query := fmt.Sprintf(`drop /* gh-ost */ table if exists %s.%s`, - sql.EscapeName(this.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.DatabaseName), sql.EscapeName(tableName), ) - this.migrationContext.Log.Infof("Dropping table %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), + apl.migrationContext.Log.Infof("Dropping table %s.%s", + sql.EscapeName(apl.migrationContext.DatabaseName), sql.EscapeName(tableName), ) - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } - this.migrationContext.Log.Infof("Table dropped") + apl.migrationContext.Log.Infof("Table dropped") return nil } // StateMetadataLockInstrument checks if metadata_locks is enabled in performance_schema. // If not it attempts to enable metadata_locks if this is allowed. -func (this *Applier) StateMetadataLockInstrument() error { +func (apl *Applier) StateMetadataLockInstrument() error { query := `select /*+ MAX_EXECUTION_TIME(300) */ ENABLED, TIMED from performance_schema.setup_instruments WHERE NAME = 'wait/lock/metadata/sql/mdl'` var enabled, timed string - if err := this.db.QueryRow(query).Scan(&enabled, &timed); err != nil { + if err := apl.db.QueryRow(query).Scan(&enabled, &timed); err != nil { if errors.Is(err, gosql.ErrNoRows) { // performance_schema may be disabled. return nil } - return this.migrationContext.Log.Errorf("query performance_schema.setup_instruments with name wait/lock/metadata/sql/mdl error: %s", err) + return apl.migrationContext.Log.Errorf("query performance_schema.setup_instruments with name wait/lock/metadata/sql/mdl error: %s", err) } if strings.EqualFold(enabled, "YES") && strings.EqualFold(timed, "YES") { - this.migrationContext.IsOpenMetadataLockInstruments = true + apl.migrationContext.IsOpenMetadataLockInstruments = true return nil } - if !this.migrationContext.AllowSetupMetadataLockInstruments { + if !apl.migrationContext.AllowSetupMetadataLockInstruments { return nil } - this.migrationContext.Log.Infof("instrument wait/lock/metadata/sql/mdl state: enabled %s, timed %s", enabled, timed) - if _, err := this.db.Exec(`UPDATE performance_schema.setup_instruments SET ENABLED = 'YES', TIMED = 'YES' WHERE NAME = 'wait/lock/metadata/sql/mdl'`); err != nil { - return this.migrationContext.Log.Errorf("enable instrument wait/lock/metadata/sql/mdl error: %s", err) + apl.migrationContext.Log.Infof("instrument wait/lock/metadata/sql/mdl state: enabled %s, timed %s", enabled, timed) + if _, err := apl.db.Exec(`UPDATE performance_schema.setup_instruments SET ENABLED = 'YES', TIMED = 'YES' WHERE NAME = 'wait/lock/metadata/sql/mdl'`); err != nil { + return apl.migrationContext.Log.Errorf("enable instrument wait/lock/metadata/sql/mdl error: %s", err) } - this.migrationContext.IsOpenMetadataLockInstruments = true - this.migrationContext.Log.Infof("instrument wait/lock/metadata/sql/mdl enabled") + apl.migrationContext.IsOpenMetadataLockInstruments = true + apl.migrationContext.Log.Infof("instrument wait/lock/metadata/sql/mdl enabled") return nil } // dropTriggers drop the triggers on the applied host -func (this *Applier) DropTriggersFromGhost() error { - if len(this.migrationContext.Triggers) > 0 { - for _, trigger := range this.migrationContext.Triggers { - triggerName := this.migrationContext.GetGhostTriggerName(trigger.Name) +func (apl *Applier) DropTriggersFromGhost() error { + if len(apl.migrationContext.Triggers) > 0 { + for _, trigger := range apl.migrationContext.Triggers { + triggerName := apl.migrationContext.GetGhostTriggerName(trigger.Name) query := fmt.Sprintf("drop trigger if exists %s", sql.EscapeName(triggerName)) - _, err := sqlutils.ExecNoPrepare(this.db, query) + _, err := sqlutils.ExecNoPrepare(apl.db, query) if err != nil { return err } - this.migrationContext.Log.Infof("Trigger '%s' dropped", triggerName) + apl.migrationContext.Log.Infof("Trigger '%s' dropped", triggerName) } } return nil } // createTriggers creates the triggers on the applied host -func (this *Applier) createTriggers(tableName string) error { - if len(this.migrationContext.Triggers) > 0 { - for _, trigger := range this.migrationContext.Triggers { - triggerName := this.migrationContext.GetGhostTriggerName(trigger.Name) +func (apl *Applier) createTriggers(tableName string) error { + if len(apl.migrationContext.Triggers) > 0 { + for _, trigger := range apl.migrationContext.Triggers { + triggerName := apl.migrationContext.GetGhostTriggerName(trigger.Name) query := fmt.Sprintf(`create /* gh-ost */ trigger %s %s %s on %s.%s for each row %s`, sql.EscapeName(triggerName), trigger.Timing, trigger.Event, - sql.EscapeName(this.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.DatabaseName), sql.EscapeName(tableName), trigger.Statement, ) - this.migrationContext.Log.Infof("Createing trigger %s on %s.%s", + apl.migrationContext.Log.Infof("Createing trigger %s on %s.%s", sql.EscapeName(triggerName), - sql.EscapeName(this.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.DatabaseName), sql.EscapeName(tableName), ) - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } } - this.migrationContext.Log.Infof("Triggers created on %s", tableName) + apl.migrationContext.Log.Infof("Triggers created on %s", tableName) } return nil } // CreateTriggers creates the original triggers on applier host -func (this *Applier) CreateTriggersOnGhost() error { - err := this.createTriggers(this.migrationContext.GetGhostTableName()) +func (apl *Applier) CreateTriggersOnGhost() error { + err := apl.createTriggers(apl.migrationContext.GetGhostTableName()) return err } // DropChangelogTable drops the changelog table on the applier host -func (this *Applier) DropChangelogTable() error { - return this.dropTable(this.migrationContext.GetChangelogTableName()) +func (apl *Applier) DropChangelogTable() error { + return apl.dropTable(apl.migrationContext.GetChangelogTableName()) } // DropCheckpointTable drops the checkpoint table on applier host -func (this *Applier) DropCheckpointTable() error { - return this.dropTable(this.migrationContext.GetCheckpointTableName()) +func (apl *Applier) DropCheckpointTable() error { + return apl.dropTable(apl.migrationContext.GetCheckpointTableName()) } // DropOldTable drops the _Old table on the applier host -func (this *Applier) DropOldTable() error { - return this.dropTable(this.migrationContext.GetOldTableName()) +func (apl *Applier) DropOldTable() error { + return apl.dropTable(apl.migrationContext.GetOldTableName()) } // DropGhostTable drops the ghost table on the applier host -func (this *Applier) DropGhostTable() error { - return this.dropTable(this.migrationContext.GetGhostTableName()) +func (apl *Applier) DropGhostTable() error { + return apl.dropTable(apl.migrationContext.GetGhostTableName()) } // WriteChangelog writes a value to the changelog table. // It returns the hint as given, for convenience -func (this *Applier) WriteChangelog(hint, value string) (string, error) { +func (apl *Applier) WriteChangelog(hint, value string) (string, error) { explicitId := 0 switch hint { case "heartbeat": @@ -642,45 +642,45 @@ func (this *Applier) WriteChangelog(hint, value string) (string, error) { on duplicate key update last_update=NOW(), value=VALUES(value)`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetChangelogTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetChangelogTableName()), ) - _, err := sqlutils.ExecNoPrepare(this.db, query, explicitId, hint, value) + _, err := sqlutils.ExecNoPrepare(apl.db, query, explicitId, hint, value) return hint, err } -func (this *Applier) WriteAndLogChangelog(hint, value string) (string, error) { - this.WriteChangelog(hint, value) - return this.WriteChangelog(fmt.Sprintf("%s at %d", hint, time.Now().UnixNano()), value) +func (apl *Applier) WriteAndLogChangelog(hint, value string) (string, error) { + apl.WriteChangelog(hint, value) + return apl.WriteChangelog(fmt.Sprintf("%s at %d", hint, time.Now().UnixNano()), value) } -func (this *Applier) WriteChangelogState(value string) (string, error) { - return this.WriteAndLogChangelog("state", value) +func (apl *Applier) WriteChangelogState(value string) (string, error) { + return apl.WriteAndLogChangelog("state", value) } // WriteCheckpoints writes a checkpoint to the _ghk table. -func (this *Applier) WriteCheckpoint(chk *Checkpoint) (int64, error) { +func (apl *Applier) WriteCheckpoint(chk *Checkpoint) (int64, error) { var insertId int64 uniqueKeyArgs := sqlutils.Args(chk.IterationRangeMin.AbstractValues()...) uniqueKeyArgs = append(uniqueKeyArgs, chk.IterationRangeMax.AbstractValues()...) - query, uniqueKeyArgs, err := this.checkpointInsertQueryBuilder.BuildQuery(uniqueKeyArgs) + query, uniqueKeyArgs, err := apl.checkpointInsertQueryBuilder.BuildQuery(uniqueKeyArgs) if err != nil { return insertId, err } args := sqlutils.Args(chk.LastTrxCoords.String(), chk.Iteration, chk.RowsCopied, chk.DMLApplied, chk.IsCutover) args = append(args, uniqueKeyArgs...) - res, err := this.db.Exec(query, args...) + res, err := apl.db.Exec(query, args...) if err != nil { return insertId, err } return res.LastInsertId() } -func (this *Applier) ReadLastCheckpoint() (*Checkpoint, error) { - row := this.db.QueryRow(fmt.Sprintf(`select /* gh-ost */ * from %s.%s order by gh_ost_chk_id desc limit 1`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.GetCheckpointTableName()))) +func (apl *Applier) ReadLastCheckpoint() (*Checkpoint, error) { + row := apl.db.QueryRow(fmt.Sprintf(`select /* gh-ost */ * from %s.%s order by gh_ost_chk_id desc limit 1`, sql.EscapeName(apl.migrationContext.DatabaseName), sql.EscapeName(apl.migrationContext.GetCheckpointTableName()))) chk := &Checkpoint{ - IterationRangeMin: sql.NewColumnValues(this.migrationContext.UniqueKey.Columns.Len()), - IterationRangeMax: sql.NewColumnValues(this.migrationContext.UniqueKey.Columns.Len()), + IterationRangeMin: sql.NewColumnValues(apl.migrationContext.UniqueKey.Columns.Len()), + IterationRangeMax: sql.NewColumnValues(apl.migrationContext.UniqueKey.Columns.Len()), } var coordStr string @@ -696,7 +696,7 @@ func (this *Applier) ReadLastCheckpoint() (*Checkpoint, error) { return nil, err } chk.Timestamp = time.Unix(timestamp, 0) - if this.migrationContext.UseGTIDs { + if apl.migrationContext.UseGTIDs { gtidCoords, err := mysql.NewGTIDBinlogCoordinates(coordStr) if err != nil { return nil, err @@ -713,17 +713,17 @@ func (this *Applier) ReadLastCheckpoint() (*Checkpoint, error) { } // InitiateHeartbeat creates a heartbeat cycle, writing to the changelog table. -// This is done asynchronously -func (this *Applier) InitiateHeartbeat() { +// Apl is done asynchronously +func (apl *Applier) InitiateHeartbeat() { var numSuccessiveFailures int64 injectHeartbeat := func() error { - if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 { + if atomic.LoadInt64(&apl.migrationContext.HibernateUntil) > 0 { return nil } - if _, err := this.WriteChangelog("heartbeat", time.Now().Format(time.RFC3339Nano)); err != nil { + if _, err := apl.WriteChangelog("heartbeat", time.Now().Format(time.RFC3339Nano)); err != nil { numSuccessiveFailures++ - if numSuccessiveFailures > this.migrationContext.MaxRetries() { - return this.migrationContext.Log.Errore(err) + if numSuccessiveFailures > apl.migrationContext.MaxRetries() { + return apl.migrationContext.Log.Errore(err) } } else { numSuccessiveFailures = 0 @@ -732,59 +732,59 @@ func (this *Applier) InitiateHeartbeat() { } injectHeartbeat() - ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond) + ticker := time.NewTicker(time.Duration(apl.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond) defer ticker.Stop() for { // Check for context cancellation each iteration - ctx := this.migrationContext.GetContext() + ctx := apl.migrationContext.GetContext() select { case <-ctx.Done(): - this.migrationContext.Log.Debugf("Heartbeat injection cancelled") + apl.migrationContext.Log.Debugf("Heartbeat injection cancelled") return case <-ticker.C: // Process heartbeat } - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&apl.finishedMigrating) > 0 { return } - if atomic.LoadInt64(&this.migrationContext.CleanupImminentFlag) > 0 { + if atomic.LoadInt64(&apl.migrationContext.CleanupImminentFlag) > 0 { return } // Generally speaking, we would issue a goroutine, but I'd actually rather // have this block the loop rather than spam the master in the event something // goes wrong - if throttle, _, reasonHint := this.migrationContext.IsThrottled(); throttle && (reasonHint == base.UserCommandThrottleReasonHint) { + if throttle, _, reasonHint := apl.migrationContext.IsThrottled(); throttle && (reasonHint == base.UserCommandThrottleReasonHint) { continue } if err := injectHeartbeat(); err != nil { // Use helper to prevent deadlock if listenOnPanicAbort already exited - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, fmt.Errorf("injectHeartbeat writing failed %d times, last error: %w", numSuccessiveFailures, err)) + _ = base.SendWithContext(apl.migrationContext.GetContext(), apl.migrationContext.PanicAbort, fmt.Errorf("injectHeartbeat writing failed %d times, last error: %w", numSuccessiveFailures, err)) return } } } // ExecuteThrottleQuery executes the `--throttle-query` and returns its results. -func (this *Applier) ExecuteThrottleQuery() (int64, error) { - throttleQuery := this.migrationContext.GetThrottleQuery() +func (apl *Applier) ExecuteThrottleQuery() (int64, error) { + throttleQuery := apl.migrationContext.GetThrottleQuery() if throttleQuery == "" { return 0, nil } var result int64 - if err := this.db.QueryRow(throttleQuery).Scan(&result); err != nil { - return 0, this.migrationContext.Log.Errore(err) + if err := apl.db.QueryRow(throttleQuery).Scan(&result); err != nil { + return 0, apl.migrationContext.Log.Errore(err) } return result, nil } // readMigrationMinValues returns the minimum values to be iterated on rowcopy -func (this *Applier) readMigrationMinValues(tx *gosql.Tx, uniqueKey *sql.UniqueKey) error { - this.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name) - query, err := sql.BuildUniqueKeyMinValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, uniqueKey) +func (apl *Applier) readMigrationMinValues(tx *gosql.Tx, uniqueKey *sql.UniqueKey) error { + apl.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name) + query, err := sql.BuildUniqueKeyMinValuesPreparedQuery(apl.migrationContext.DatabaseName, apl.migrationContext.OriginalTableName, uniqueKey) if err != nil { return err } @@ -796,20 +796,20 @@ func (this *Applier) readMigrationMinValues(tx *gosql.Tx, uniqueKey *sql.UniqueK defer rows.Close() for rows.Next() { - this.migrationContext.MigrationRangeMinValues = sql.NewColumnValues(uniqueKey.Len()) - if err = rows.Scan(this.migrationContext.MigrationRangeMinValues.ValuesPointers...); err != nil { + apl.migrationContext.MigrationRangeMinValues = sql.NewColumnValues(uniqueKey.Len()) + if err = rows.Scan(apl.migrationContext.MigrationRangeMinValues.ValuesPointers...); err != nil { return err } } - this.migrationContext.Log.Infof("Migration min values: [%s]", this.migrationContext.MigrationRangeMinValues) + apl.migrationContext.Log.Infof("Migration min values: [%s]", apl.migrationContext.MigrationRangeMinValues) return rows.Err() } // readMigrationMaxValues returns the maximum values to be iterated on rowcopy -func (this *Applier) readMigrationMaxValues(tx *gosql.Tx, uniqueKey *sql.UniqueKey) error { - this.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name) - query, err := sql.BuildUniqueKeyMaxValuesPreparedQuery(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, uniqueKey) +func (apl *Applier) readMigrationMaxValues(tx *gosql.Tx, uniqueKey *sql.UniqueKey) error { + apl.migrationContext.Log.Debugf("Reading migration range according to key: %s", uniqueKey.Name) + query, err := sql.BuildUniqueKeyMaxValuesPreparedQuery(apl.migrationContext.DatabaseName, apl.migrationContext.OriginalTableName, uniqueKey) if err != nil { return err } @@ -821,12 +821,12 @@ func (this *Applier) readMigrationMaxValues(tx *gosql.Tx, uniqueKey *sql.UniqueK defer rows.Close() for rows.Next() { - this.migrationContext.MigrationRangeMaxValues = sql.NewColumnValues(uniqueKey.Len()) - if err = rows.Scan(this.migrationContext.MigrationRangeMaxValues.ValuesPointers...); err != nil { + apl.migrationContext.MigrationRangeMaxValues = sql.NewColumnValues(uniqueKey.Len()) + if err = rows.Scan(apl.migrationContext.MigrationRangeMaxValues.ValuesPointers...); err != nil { return err } } - this.migrationContext.Log.Infof("Migration max values: [%s]", this.migrationContext.MigrationRangeMaxValues) + apl.migrationContext.Log.Infof("Migration max values: [%s]", apl.migrationContext.MigrationRangeMaxValues) return rows.Err() } @@ -848,21 +848,21 @@ Detail description of the lost data in mysql two-phase commit issue by @Fanduzi: will not be run. When the changelog writes successfully, the ReadMigrationRangeValues will read the newly inserted data, thus Avoiding data loss due to the above problem. */ -func (this *Applier) ReadMigrationRangeValues() error { - if _, err := this.WriteChangelogState(string(ReadMigrationRangeValues)); err != nil { +func (apl *Applier) ReadMigrationRangeValues() error { + if _, err := apl.WriteChangelogState(string(ReadMigrationRangeValues)); err != nil { return err } - tx, err := this.db.Begin() + tx, err := apl.db.Begin() if err != nil { return err } defer tx.Rollback() - if err := this.readMigrationMinValues(tx, this.migrationContext.UniqueKey); err != nil { + if err := apl.readMigrationMinValues(tx, apl.migrationContext.UniqueKey); err != nil { return err } - if err := this.readMigrationMaxValues(tx, this.migrationContext.UniqueKey); err != nil { + if err := apl.readMigrationMaxValues(tx, apl.migrationContext.UniqueKey); err != nil { return err } @@ -873,33 +873,33 @@ func (this *Applier) ReadMigrationRangeValues() error { // which will be used for copying the next chunk of rows. Ir returns "false" if there is // no further chunk to work through, i.e. we're past the last chunk and are done with // iterating the range (and thus done with copying row chunks) -func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange bool, err error) { +func (apl *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange bool, err error) { for i := 0; i < 2; i++ { buildFunc := sql.BuildUniqueKeyRangeEndPreparedQueryViaOffset if i == 1 { buildFunc = sql.BuildUniqueKeyRangeEndPreparedQueryViaTemptable } query, explodedArgs, err := buildFunc( - this.migrationContext.DatabaseName, - this.migrationContext.OriginalTableName, - &this.migrationContext.UniqueKey.Columns, - this.migrationContext.MigrationIterationRangeMinValues.AbstractValues(), - this.migrationContext.MigrationRangeMaxValues.AbstractValues(), - atomic.LoadInt64(&this.migrationContext.ChunkSize), - this.migrationContext.GetIteration() == 0, - fmt.Sprintf("iteration:%d", this.migrationContext.GetIteration()), + apl.migrationContext.DatabaseName, + apl.migrationContext.OriginalTableName, + &apl.migrationContext.UniqueKey.Columns, + apl.migrationContext.MigrationIterationRangeMinValues.AbstractValues(), + apl.migrationContext.MigrationRangeMaxValues.AbstractValues(), + atomic.LoadInt64(&apl.migrationContext.ChunkSize), + apl.migrationContext.GetIteration() == 0, + fmt.Sprintf("iteration:%d", apl.migrationContext.GetIteration()), ) if err != nil { return hasFurtherRange, err } - rows, err := this.db.Query(query, explodedArgs...) + rows, err := apl.db.Query(query, explodedArgs...) if err != nil { return hasFurtherRange, err } defer rows.Close() - iterationRangeMaxValues := sql.NewColumnValues(this.migrationContext.UniqueKey.Len()) + iterationRangeMaxValues := sql.NewColumnValues(apl.migrationContext.UniqueKey.Len()) for rows.Next() { if err = rows.Scan(iterationRangeMaxValues.ValuesPointers...); err != nil { return hasFurtherRange, err @@ -910,48 +910,48 @@ func (this *Applier) CalculateNextIterationRangeEndValues() (hasFurtherRange boo return hasFurtherRange, err } if hasFurtherRange { - this.migrationContext.MigrationIterationRangeMaxValues = iterationRangeMaxValues + apl.migrationContext.MigrationIterationRangeMaxValues = iterationRangeMaxValues return hasFurtherRange, nil } } - this.migrationContext.Log.Debugf("Iteration complete: no further range to iterate") + apl.migrationContext.Log.Debugf("Iteration complete: no further range to iterate") return hasFurtherRange, nil } // ApplyIterationInsertQuery issues a chunk-INSERT query on the ghost table. It is where // data actually gets copied from original table. -func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected int64, duration time.Duration, err error) { +func (apl *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected int64, duration time.Duration, err error) { startTime := time.Now() - chunkSize = atomic.LoadInt64(&this.migrationContext.ChunkSize) + chunkSize = atomic.LoadInt64(&apl.migrationContext.ChunkSize) query, explodedArgs, err := sql.BuildRangeInsertPreparedQuery( - this.migrationContext.DatabaseName, - this.migrationContext.OriginalTableName, - this.migrationContext.GetGhostTableName(), - this.migrationContext.SharedColumns.Names(), - this.migrationContext.MappedSharedColumns.Names(), - this.migrationContext.UniqueKey.Name, - &this.migrationContext.UniqueKey.Columns, - this.migrationContext.MigrationIterationRangeMinValues.AbstractValues(), - this.migrationContext.MigrationIterationRangeMaxValues.AbstractValues(), - this.migrationContext.GetIteration() == 0, - this.migrationContext.IsTransactionalTable(), + apl.migrationContext.DatabaseName, + apl.migrationContext.OriginalTableName, + apl.migrationContext.GetGhostTableName(), + apl.migrationContext.SharedColumns.Names(), + apl.migrationContext.MappedSharedColumns.Names(), + apl.migrationContext.UniqueKey.Name, + &apl.migrationContext.UniqueKey.Columns, + apl.migrationContext.MigrationIterationRangeMinValues.AbstractValues(), + apl.migrationContext.MigrationIterationRangeMaxValues.AbstractValues(), + apl.migrationContext.GetIteration() == 0, + apl.migrationContext.IsTransactionalTable(), // TODO: Don't hardcode this - strings.HasPrefix(this.migrationContext.ApplierMySQLVersion, "8."), + strings.HasPrefix(apl.migrationContext.ApplierMySQLVersion, "8."), ) if err != nil { return chunkSize, rowsAffected, duration, err } sqlResult, err := func() (gosql.Result, error) { - tx, err := this.db.Begin() + tx, err := apl.db.Begin() if err != nil { return nil, err } defer tx.Rollback() - sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, this.migrationContext.ApplierTimeZone) - sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery()) + sessionQuery := fmt.Sprintf(`SET SESSION time_zone = '%s'`, apl.migrationContext.ApplierTimeZone) + sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, apl.generateSqlModeQuery()) if _, err := tx.Exec(sessionQuery); err != nil { return nil, err @@ -961,8 +961,7 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected return nil, err } - if this.migrationContext.PanicOnWarnings { - //nolint:execinquery + if apl.migrationContext.PanicOnWarnings { rows, err := tx.Query("SHOW WARNINGS") if err != nil { return nil, err @@ -973,7 +972,7 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected } // Compile regex once before loop to avoid performance penalty and handle errors properly - migrationKeyRegex, err := this.compileMigrationKeyWarningRegex() + migrationKeyRegex, err := apl.compileMigrationKeyWarningRegex() if err != nil { return nil, err } @@ -983,7 +982,7 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected var level, message string var code int if err := rows.Scan(&level, &code, &message); err != nil { - this.migrationContext.Log.Warningf("Failed to read SHOW WARNINGS row") + apl.migrationContext.Log.Warningf("Failed to read SHOW WARNINGS row") continue } if strings.Contains(message, "Duplicate entry") && migrationKeyRegex.MatchString(message) { @@ -991,7 +990,7 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected } sqlWarnings = append(sqlWarnings, fmt.Sprintf("%s: %s (%d)", level, message, code)) } - this.migrationContext.MigrationLastInsertSQLWarnings = sqlWarnings + apl.migrationContext.MigrationLastInsertSQLWarnings = sqlWarnings } if err := tx.Commit(); err != nil { @@ -1005,41 +1004,41 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected } rowsAffected, _ = sqlResult.RowsAffected() duration = time.Since(startTime) - this.migrationContext.Log.Debugf( + apl.migrationContext.Log.Debugf( "Issued INSERT on range: [%s]..[%s]; iteration: %d; chunk-size: %d", - this.migrationContext.MigrationIterationRangeMinValues, - this.migrationContext.MigrationIterationRangeMaxValues, - this.migrationContext.GetIteration(), + apl.migrationContext.MigrationIterationRangeMinValues, + apl.migrationContext.MigrationIterationRangeMaxValues, + apl.migrationContext.GetIteration(), chunkSize) return chunkSize, rowsAffected, duration, nil } // LockOriginalTable places a write lock on the original table -func (this *Applier) LockOriginalTable() error { +func (apl *Applier) LockOriginalTable() error { query := fmt.Sprintf(`lock /* gh-ost */ tables %s.%s write`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), ) - this.migrationContext.Log.Infof("Locking %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), + apl.migrationContext.Log.Infof("Locking %s.%s", + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), ) - this.migrationContext.LockTablesStartTime = time.Now() - if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil { + apl.migrationContext.LockTablesStartTime = time.Now() + if _, err := sqlutils.ExecNoPrepare(apl.singletonDB, query); err != nil { return err } - this.migrationContext.Log.Infof("Table locked") + apl.migrationContext.Log.Infof("Table locked") return nil } // UnlockTables makes tea. No wait, it unlocks tables. -func (this *Applier) UnlockTables() error { +func (apl *Applier) UnlockTables() error { query := `unlock /* gh-ost */ tables` - this.migrationContext.Log.Infof("Unlocking tables") - if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil { + apl.migrationContext.Log.Infof("Unlocking tables") + if _, err := sqlutils.ExecNoPrepare(apl.singletonDB, query); err != nil { return err } - this.migrationContext.Log.Infof("Tables unlocked") + apl.migrationContext.Log.Infof("Tables unlocked") return nil } @@ -1047,173 +1046,173 @@ func (this *Applier) UnlockTables() error { // - rename original table to _old // - rename ghost table to original // There is a point in time in between where the table does not exist. -func (this *Applier) SwapTablesQuickAndBumpy() error { +func (apl *Applier) SwapTablesQuickAndBumpy() error { query := fmt.Sprintf(`alter /* gh-ost */ table %s.%s rename %s`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), - sql.EscapeName(this.migrationContext.GetOldTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.GetOldTableName()), ) - this.migrationContext.Log.Infof("Renaming original table") - this.migrationContext.RenameTablesStartTime = time.Now() - if _, err := sqlutils.ExecNoPrepare(this.singletonDB, query); err != nil { + apl.migrationContext.Log.Infof("Renaming original table") + apl.migrationContext.RenameTablesStartTime = time.Now() + if _, err := sqlutils.ExecNoPrepare(apl.singletonDB, query); err != nil { return err } query = fmt.Sprintf(`alter /* gh-ost */ table %s.%s rename %s`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), - sql.EscapeName(this.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), + sql.EscapeName(apl.migrationContext.OriginalTableName), ) - this.migrationContext.Log.Infof("Renaming ghost table") - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + apl.migrationContext.Log.Infof("Renaming ghost table") + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } - this.migrationContext.RenameTablesEndTime = time.Now() + apl.migrationContext.RenameTablesEndTime = time.Now() - this.migrationContext.Log.Infof("Tables renamed") + apl.migrationContext.Log.Infof("Tables renamed") return nil } // RenameTablesRollback renames back both table: original back to ghost, // _old back to original. This is used by `--test-on-replica` -func (this *Applier) RenameTablesRollback() (renameError error) { +func (apl *Applier) RenameTablesRollback() (renameError error) { // Restoring tables to original names. // We prefer the single, atomic operation: query := fmt.Sprintf(`rename /* gh-ost */ table %s.%s to %s.%s, %s.%s to %s.%s`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetOldTableName()), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetOldTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), ) - this.migrationContext.Log.Infof("Renaming back both tables") - if _, err := sqlutils.ExecNoPrepare(this.db, query); err == nil { + apl.migrationContext.Log.Infof("Renaming back both tables") + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err == nil { return nil } // But, if for some reason the above was impossible to do, we rename one by one. query = fmt.Sprintf(`rename /* gh-ost */ table %s.%s to %s.%s`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), ) - this.migrationContext.Log.Infof("Renaming back to ghost table") - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + apl.migrationContext.Log.Infof("Renaming back to ghost table") + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { renameError = err } query = fmt.Sprintf(`rename /* gh-ost */ table %s.%s to %s.%s`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetOldTableName()), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetOldTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), ) - this.migrationContext.Log.Infof("Renaming back to original table") - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + apl.migrationContext.Log.Infof("Renaming back to original table") + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { renameError = err } - return this.migrationContext.Log.Errore(renameError) + return apl.migrationContext.Log.Errore(renameError) } // StopSlaveIOThread is applicable with --test-on-replica; it stops the IO thread, duh. // We need to keep the SQL thread active so as to complete processing received events, // and have them written to the binary log, so that we can then read them via streamer. -func (this *Applier) StopSlaveIOThread() error { - replicaTerm := mysql.ReplicaTermFor(this.migrationContext.ApplierMySQLVersion, `slave`) +func (apl *Applier) StopSlaveIOThread() error { + replicaTerm := mysql.ReplicaTermFor(apl.migrationContext.ApplierMySQLVersion, `slave`) query := fmt.Sprintf("stop /* gh-ost */ %s io_thread", replicaTerm) - this.migrationContext.Log.Infof("Stopping replication IO thread") - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + apl.migrationContext.Log.Infof("Stopping replication IO thread") + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } - this.migrationContext.Log.Infof("Replication IO thread stopped") + apl.migrationContext.Log.Infof("Replication IO thread stopped") return nil } // StartSlaveIOThread is applicable with --test-on-replica -func (this *Applier) StartSlaveIOThread() error { - replicaTerm := mysql.ReplicaTermFor(this.migrationContext.ApplierMySQLVersion, `slave`) +func (apl *Applier) StartSlaveIOThread() error { + replicaTerm := mysql.ReplicaTermFor(apl.migrationContext.ApplierMySQLVersion, `slave`) query := fmt.Sprintf("start /* gh-ost */ %s io_thread", replicaTerm) - this.migrationContext.Log.Infof("Starting replication IO thread") - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + apl.migrationContext.Log.Infof("Starting replication IO thread") + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } - this.migrationContext.Log.Infof("Replication IO thread started") + apl.migrationContext.Log.Infof("Replication IO thread started") return nil } // StopSlaveSQLThread is applicable with --test-on-replica -func (this *Applier) StopSlaveSQLThread() error { - replicaTerm := mysql.ReplicaTermFor(this.migrationContext.ApplierMySQLVersion, `slave`) +func (apl *Applier) StopSlaveSQLThread() error { + replicaTerm := mysql.ReplicaTermFor(apl.migrationContext.ApplierMySQLVersion, `slave`) query := fmt.Sprintf("stop /* gh-ost */ %s sql_thread", replicaTerm) - this.migrationContext.Log.Infof("Verifying SQL thread is stopped") - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + apl.migrationContext.Log.Infof("Verifying SQL thread is stopped") + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } - this.migrationContext.Log.Infof("SQL thread stopped") + apl.migrationContext.Log.Infof("SQL thread stopped") return nil } // StartSlaveSQLThread is applicable with --test-on-replica -func (this *Applier) StartSlaveSQLThread() error { - replicaTerm := mysql.ReplicaTermFor(this.migrationContext.ApplierMySQLVersion, `slave`) +func (apl *Applier) StartSlaveSQLThread() error { + replicaTerm := mysql.ReplicaTermFor(apl.migrationContext.ApplierMySQLVersion, `slave`) query := fmt.Sprintf("start /* gh-ost */ %s sql_thread", replicaTerm) - this.migrationContext.Log.Infof("Verifying SQL thread is running") - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + apl.migrationContext.Log.Infof("Verifying SQL thread is running") + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } - this.migrationContext.Log.Infof("SQL thread started") + apl.migrationContext.Log.Infof("SQL thread started") return nil } // StopReplication is used by `--test-on-replica` and stops replication. -func (this *Applier) StopReplication() error { - if err := this.StopSlaveIOThread(); err != nil { +func (apl *Applier) StopReplication() error { + if err := apl.StopSlaveIOThread(); err != nil { return err } - if err := this.StopSlaveSQLThread(); err != nil { + if err := apl.StopSlaveSQLThread(); err != nil { return err } - readBinlogCoordinates, executeBinlogCoordinates, err := mysql.GetReplicationBinlogCoordinates(this.migrationContext.ApplierMySQLVersion, this.db, this.migrationContext.UseGTIDs) + readBinlogCoordinates, executeBinlogCoordinates, err := mysql.GetReplicationBinlogCoordinates(apl.migrationContext.ApplierMySQLVersion, apl.db, apl.migrationContext.UseGTIDs) if err != nil { return err } - this.migrationContext.Log.Infof("Replication IO thread at %+v. SQL thread is at %+v", readBinlogCoordinates, executeBinlogCoordinates) + apl.migrationContext.Log.Infof("Replication IO thread at %+v. SQL thread is at %+v", readBinlogCoordinates, executeBinlogCoordinates) return nil } // StartReplication is used by `--test-on-replica` on cut-over failure -func (this *Applier) StartReplication() error { - if err := this.StartSlaveIOThread(); err != nil { +func (apl *Applier) StartReplication() error { + if err := apl.StartSlaveIOThread(); err != nil { return err } - if err := this.StartSlaveSQLThread(); err != nil { + if err := apl.StartSlaveSQLThread(); err != nil { return err } - this.migrationContext.Log.Infof("Replication started") + apl.migrationContext.Log.Infof("Replication started") return nil } // GetSessionLockName returns a name for the special hint session voluntary lock -func (this *Applier) GetSessionLockName(sessionId int64) string { +func (apl *Applier) GetSessionLockName(sessionId int64) string { return fmt.Sprintf("gh-ost.%d.lock", sessionId) } // ExpectUsedLock expects the special hint voluntary lock to exist on given session -func (this *Applier) ExpectUsedLock(sessionId int64) error { +func (apl *Applier) ExpectUsedLock(sessionId int64) error { var result int64 query := `select /* gh-ost */ is_used_lock(?)` - lockName := this.GetSessionLockName(sessionId) - this.migrationContext.Log.Infof("Checking session lock: %s", lockName) - if err := this.db.QueryRow(query, lockName).Scan(&result); err != nil || result != sessionId { - return fmt.Errorf("Session lock %s expected to be found but wasn't", lockName) + lockName := apl.GetSessionLockName(sessionId) + apl.migrationContext.Log.Infof("Checking session lock: %s", lockName) + if err := apl.db.QueryRow(query, lockName).Scan(&result); err != nil || result != sessionId { + return fmt.Errorf("session lock %s expected to be found but wasn't", lockName) } return nil } // ExpectProcess expects a process to show up in `SHOW PROCESSLIST` that has given characteristics -func (this *Applier) ExpectProcess(sessionId int64, stateHint, infoHint string) error { +func (apl *Applier) ExpectProcess(sessionId int64, stateHint, infoHint string) error { found := false query := ` select /* gh-ost */ id @@ -1224,7 +1223,7 @@ func (this *Applier) ExpectProcess(sessionId int64, stateHint, infoHint string) and ? in (0, id) and state like concat('%', ?, '%') and info like concat('%', ?, '%')` - err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { + err := sqlutils.QueryRowsMap(apl.db, query, func(m sqlutils.RowMap) error { found = true return nil }, sessionId, stateHint, infoHint) @@ -1232,90 +1231,90 @@ func (this *Applier) ExpectProcess(sessionId int64, stateHint, infoHint string) return err } if !found { - return fmt.Errorf("Cannot find process. Hints: %s, %s", stateHint, infoHint) + return fmt.Errorf("cannot find process. Hints: %s, %s", stateHint, infoHint) } return nil } // DropAtomicCutOverSentryTableIfExists checks if the "old" table name // happens to be a cut-over magic table; if so, it drops it. -func (this *Applier) DropAtomicCutOverSentryTableIfExists() error { - this.migrationContext.Log.Infof("Looking for magic cut-over table") - tableName := this.migrationContext.GetOldTableName() - rowMap := this.showTableStatus(tableName) +func (apl *Applier) DropAtomicCutOverSentryTableIfExists() error { + apl.migrationContext.Log.Infof("Looking for magic cut-over table") + tableName := apl.migrationContext.GetOldTableName() + rowMap := apl.showTableStatus(tableName) if rowMap == nil { // Table does not exist return nil } if rowMap["Comment"].String != atomicCutOverMagicHint { - return fmt.Errorf("Expected magic comment on %s, did not find it", tableName) + return fmt.Errorf("expected magic comment on %s, did not find it", tableName) } - this.migrationContext.Log.Infof("Dropping magic cut-over table") - return this.dropTable(tableName) + apl.migrationContext.Log.Infof("Dropping magic cut-over table") + return apl.dropTable(tableName) } // CreateAtomicCutOverSentryTable -func (this *Applier) CreateAtomicCutOverSentryTable() error { - if err := this.DropAtomicCutOverSentryTableIfExists(); err != nil { +func (apl *Applier) CreateAtomicCutOverSentryTable() error { + if err := apl.DropAtomicCutOverSentryTableIfExists(); err != nil { return err } - tableName := this.migrationContext.GetOldTableName() + tableName := apl.migrationContext.GetOldTableName() query := fmt.Sprintf(` create /* gh-ost */ table %s.%s ( id int auto_increment primary key ) engine=%s comment='%s'`, - sql.EscapeName(this.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.DatabaseName), sql.EscapeName(tableName), - this.migrationContext.TableEngine, + apl.migrationContext.TableEngine, atomicCutOverMagicHint, ) - this.migrationContext.Log.Infof("Creating magic cut-over table %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), + apl.migrationContext.Log.Infof("Creating magic cut-over table %s.%s", + sql.EscapeName(apl.migrationContext.DatabaseName), sql.EscapeName(tableName), ) - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { return err } - this.migrationContext.Log.Infof("Magic cut-over table created") + apl.migrationContext.Log.Infof("Magic cut-over table created") return nil } // InitAtomicCutOverWaitTimeout sets the cut-over session wait_timeout in order to reduce the // time an unresponsive (but still connected) gh-ost process can hold the cut-over lock. -func (this *Applier) InitAtomicCutOverWaitTimeout(tx *gosql.Tx) error { - cutOverWaitTimeoutSeconds := this.migrationContext.CutOverLockTimeoutSeconds * 3 - this.migrationContext.Log.Infof("Setting cut-over idle timeout as %d seconds", cutOverWaitTimeoutSeconds) +func (apl *Applier) InitAtomicCutOverWaitTimeout(tx *gosql.Tx) error { + cutOverWaitTimeoutSeconds := apl.migrationContext.CutOverLockTimeoutSeconds * 3 + apl.migrationContext.Log.Infof("Setting cut-over idle timeout as %d seconds", cutOverWaitTimeoutSeconds) query := fmt.Sprintf(`set /* gh-ost */ session wait_timeout:=%d`, cutOverWaitTimeoutSeconds) _, err := tx.Exec(query) return err } // RevertAtomicCutOverWaitTimeout restores the original wait_timeout for the applier session post-cut-over. -func (this *Applier) RevertAtomicCutOverWaitTimeout() { - this.migrationContext.Log.Infof("Reverting cut-over idle timeout to %d seconds", this.migrationContext.ApplierWaitTimeout) - query := fmt.Sprintf(`set /* gh-ost */ session wait_timeout:=%d`, this.migrationContext.ApplierWaitTimeout) - if _, err := sqlutils.ExecNoPrepare(this.db, query); err != nil { - this.migrationContext.Log.Errorf("Failed to restore applier wait_timeout to %d seconds: %v", - this.migrationContext.ApplierWaitTimeout, err, +func (apl *Applier) RevertAtomicCutOverWaitTimeout() { + apl.migrationContext.Log.Infof("Reverting cut-over idle timeout to %d seconds", apl.migrationContext.ApplierWaitTimeout) + query := fmt.Sprintf(`set /* gh-ost */ session wait_timeout:=%d`, apl.migrationContext.ApplierWaitTimeout) + if _, err := sqlutils.ExecNoPrepare(apl.db, query); err != nil { + apl.migrationContext.Log.Errorf("failed to restore applier wait_timeout to %d seconds: %v", + apl.migrationContext.ApplierWaitTimeout, err, ) } } // AtomicCutOverMagicLock -func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocked chan<- error, okToUnlockTable <-chan bool, tableUnlocked chan<- error, renameLockSessionId *int64) error { - tx, err := this.db.Begin() +func (apl *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocked chan<- error, okToUnlockTable <-chan bool, tableUnlocked chan<- error, renameLockSessionId *int64) error { + tx, err := apl.db.Begin() if err != nil { tableLocked <- err return err } defer func() { sessionIdChan <- -1 - tableLocked <- fmt.Errorf("Unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads") - tableUnlocked <- fmt.Errorf("Unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads") + tableLocked <- fmt.Errorf("unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads") + tableUnlocked <- fmt.Errorf("unexpected error in AtomicCutOverMagicLock(), injected to release blocking channel reads") tx.Rollback() - this.DropAtomicCutOverSentryTableIfExists() + apl.DropAtomicCutOverSentryTableIfExists() }() var sessionId int64 @@ -1327,51 +1326,51 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke lockResult := 0 query := `select /* gh-ost */ get_lock(?, 0)` - lockName := this.GetSessionLockName(sessionId) - this.migrationContext.Log.Infof("Grabbing voluntary lock: %s", lockName) + lockName := apl.GetSessionLockName(sessionId) + apl.migrationContext.Log.Infof("Grabbing voluntary lock: %s", lockName) if err := tx.QueryRow(query, lockName).Scan(&lockResult); err != nil || lockResult != 1 { - err := fmt.Errorf("Unable to acquire lock %s", lockName) + err := fmt.Errorf("unable to acquire lock %s", lockName) tableLocked <- err return err } - tableLockTimeoutSeconds := this.migrationContext.CutOverLockTimeoutSeconds * 2 - this.migrationContext.Log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds) + tableLockTimeoutSeconds := apl.migrationContext.CutOverLockTimeoutSeconds * 2 + apl.migrationContext.Log.Infof("Setting LOCK timeout as %d seconds", tableLockTimeoutSeconds) query = fmt.Sprintf(`set /* gh-ost */ session lock_wait_timeout:=%d`, tableLockTimeoutSeconds) if _, err := tx.Exec(query); err != nil { tableLocked <- err return err } - if err := this.CreateAtomicCutOverSentryTable(); err != nil { + if err := apl.CreateAtomicCutOverSentryTable(); err != nil { tableLocked <- err return err } - if err := this.InitAtomicCutOverWaitTimeout(tx); err != nil { + if err := apl.InitAtomicCutOverWaitTimeout(tx); err != nil { tableLocked <- err return err } - defer this.RevertAtomicCutOverWaitTimeout() + defer apl.RevertAtomicCutOverWaitTimeout() query = fmt.Sprintf(`lock /* gh-ost */ tables %s.%s write, %s.%s write`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetOldTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetOldTableName()), ) - this.migrationContext.Log.Infof("Locking %s.%s, %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetOldTableName()), + apl.migrationContext.Log.Infof("Locking %s.%s, %s.%s", + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetOldTableName()), ) - this.migrationContext.LockTablesStartTime = time.Now() + apl.migrationContext.LockTablesStartTime = time.Now() if _, err := tx.Exec(query); err != nil { tableLocked <- err return err } - this.migrationContext.Log.Infof("Tables locked") + apl.migrationContext.Log.Infof("Tables locked") tableLocked <- nil // No error. // From this point on, we are committed to UNLOCK TABLES. No matter what happens, @@ -1380,29 +1379,29 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke // The cut-over phase will proceed to apply remaining backlog onto ghost table, // and issue RENAME. We wait here until told to proceed. <-okToUnlockTable - this.migrationContext.Log.Infof("Will now proceed to drop magic table and unlock tables") + apl.migrationContext.Log.Infof("Will now proceed to drop magic table and unlock tables") // The magic table is here because we locked it. And we are the only ones allowed to drop it. // And in fact, we will: - this.migrationContext.Log.Infof("Dropping magic cut-over table") + apl.migrationContext.Log.Infof("Dropping magic cut-over table") query = fmt.Sprintf(`drop /* gh-ost */ table if exists %s.%s`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetOldTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetOldTableName()), ) if _, err := tx.Exec(query); err != nil { - this.migrationContext.Log.Errore(err) + apl.migrationContext.Log.Errore(err) // We DO NOT return here because we must `UNLOCK TABLES`! } - this.migrationContext.Log.Infof("Session renameLockSessionId is %+v", *renameLockSessionId) + apl.migrationContext.Log.Infof("Session renameLockSessionId is %+v", *renameLockSessionId) // Checking the lock is held by rename session - if *renameLockSessionId > 0 && this.migrationContext.IsOpenMetadataLockInstruments && !this.migrationContext.SkipMetadataLockCheck { - sleepDuration := time.Duration(10*this.migrationContext.CutOverLockTimeoutSeconds) * time.Millisecond + if *renameLockSessionId > 0 && apl.migrationContext.IsOpenMetadataLockInstruments && !apl.migrationContext.SkipMetadataLockCheck { + sleepDuration := time.Duration(10*apl.migrationContext.CutOverLockTimeoutSeconds) * time.Millisecond for i := 1; i <= 100; i++ { - err := this.ExpectMetadataLock(*renameLockSessionId) + err := apl.ExpectMetadataLock(*renameLockSessionId) if err == nil { - this.migrationContext.Log.Infof("Rename session is pending lock on the origin table !") + apl.migrationContext.Log.Infof("Rename session is pending lock on the origin table !") break } else { time.Sleep(sleepDuration) @@ -1410,32 +1409,32 @@ func (this *Applier) AtomicCutOverMagicLock(sessionIdChan chan int64, tableLocke } } // Tables still locked - this.migrationContext.Log.Infof("Releasing lock from %s.%s, %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetOldTableName()), + apl.migrationContext.Log.Infof("Releasing lock from %s.%s, %s.%s", + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetOldTableName()), ) query = `unlock /* gh-ost */ tables` if _, err := tx.Exec(query); err != nil { tableUnlocked <- err - return this.migrationContext.Log.Errore(err) + return apl.migrationContext.Log.Errore(err) } - this.migrationContext.Log.Infof("Tables unlocked") + apl.migrationContext.Log.Infof("Tables unlocked") tableUnlocked <- nil return nil } // AtomicCutoverRename -func (this *Applier) AtomicCutoverRename(sessionIdChan chan int64, tablesRenamed chan<- error) error { - tx, err := this.db.Begin() +func (apl *Applier) AtomicCutoverRename(sessionIdChan chan int64, tablesRenamed chan<- error) error { + tx, err := apl.db.Begin() if err != nil { return err } defer func() { tx.Rollback() sessionIdChan <- -1 - tablesRenamed <- fmt.Errorf("Unexpected error in AtomicCutoverRename(), injected to release blocking channel reads") + tablesRenamed <- fmt.Errorf("unexpected error in AtomicCutoverRename(), injected to release blocking channel reads") }() var sessionId int64 if err := tx.QueryRow(`select /* gh-ost */ connection_id()`).Scan(&sessionId); err != nil { @@ -1443,35 +1442,35 @@ func (this *Applier) AtomicCutoverRename(sessionIdChan chan int64, tablesRenamed } sessionIdChan <- sessionId - this.migrationContext.Log.Infof("Setting RENAME timeout as %d seconds", this.migrationContext.CutOverLockTimeoutSeconds) - query := fmt.Sprintf(`set /* gh-ost */ session lock_wait_timeout:=%d`, this.migrationContext.CutOverLockTimeoutSeconds) + apl.migrationContext.Log.Infof("Setting RENAME timeout as %d seconds", apl.migrationContext.CutOverLockTimeoutSeconds) + query := fmt.Sprintf(`set /* gh-ost */ session lock_wait_timeout:=%d`, apl.migrationContext.CutOverLockTimeoutSeconds) if _, err := tx.Exec(query); err != nil { return err } query = fmt.Sprintf(`rename /* gh-ost */ table %s.%s to %s.%s, %s.%s to %s.%s`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetOldTableName()), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetOldTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.GetGhostTableName()), + sql.EscapeName(apl.migrationContext.DatabaseName), + sql.EscapeName(apl.migrationContext.OriginalTableName), ) - this.migrationContext.Log.Infof("Issuing and expecting this to block: %s", query) + apl.migrationContext.Log.Infof("Issuing and expecting this to block: %s", query) if _, err := tx.Exec(query); err != nil { tablesRenamed <- err - return this.migrationContext.Log.Errore(err) + return apl.migrationContext.Log.Errore(err) } tablesRenamed <- nil - this.migrationContext.Log.Infof("Tables renamed") + apl.migrationContext.Log.Infof("Tables renamed") return nil } -func (this *Applier) ShowStatusVariable(variableName string) (result int64, err error) { +func (apl *Applier) ShowStatusVariable(variableName string) (result int64, err error) { query := fmt.Sprintf(`show /* gh-ost */ global status like '%s'`, variableName) - if err := this.db.QueryRow(query).Scan(&variableName, &result); err != nil { + if err := apl.db.QueryRow(query).Scan(&variableName, &result); err != nil { return 0, err } return result, nil @@ -1480,9 +1479,9 @@ func (this *Applier) ShowStatusVariable(variableName string) (result int64, err // updateModifiesUniqueKeyColumns checks whether a UPDATE DML event actually // modifies values of the migration's unique key (the iterated key). This will call // for special handling. -func (this *Applier) updateModifiesUniqueKeyColumns(dmlEvent *binlog.BinlogDMLEvent) (modifiedColumn string, isModified bool) { - for _, column := range this.migrationContext.UniqueKey.Columns.Columns() { - tableOrdinal := this.migrationContext.OriginalTableColumns.Ordinals[column.Name] +func (apl *Applier) updateModifiesUniqueKeyColumns(dmlEvent *binlog.BinlogDMLEvent) (modifiedColumn string, isModified bool) { + for _, column := range apl.migrationContext.UniqueKey.Columns.Columns() { + tableOrdinal := apl.migrationContext.OriginalTableColumns.Ordinals[column.Name] whereColumnValue := dmlEvent.WhereColumnValues.AbstractValues()[tableOrdinal] newColumnValue := dmlEvent.NewColumnValues.AbstractValues()[tableOrdinal] @@ -1495,42 +1494,42 @@ func (this *Applier) updateModifiesUniqueKeyColumns(dmlEvent *binlog.BinlogDMLEv // buildDMLEventQuery creates a query to operate on the ghost table, based on an intercepted binlog // event entry on the original table. -func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) []*dmlBuildResult { +func (apl *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) []*dmlBuildResult { switch dmlEvent.DML { case binlog.DeleteDML: { - query, uniqueKeyArgs, err := this.dmlDeleteQueryBuilder.BuildQuery(dmlEvent.WhereColumnValues.AbstractValues()) + query, uniqueKeyArgs, err := apl.dmlDeleteQueryBuilder.BuildQuery(dmlEvent.WhereColumnValues.AbstractValues()) return []*dmlBuildResult{newDmlBuildResult(query, uniqueKeyArgs, -1, err)} } case binlog.InsertDML: { - query, sharedArgs, err := this.dmlInsertQueryBuilder.BuildQuery(dmlEvent.NewColumnValues.AbstractValues()) + query, sharedArgs, err := apl.dmlInsertQueryBuilder.BuildQuery(dmlEvent.NewColumnValues.AbstractValues()) return []*dmlBuildResult{newDmlBuildResult(query, sharedArgs, 1, err)} } case binlog.UpdateDML: { - if _, isModified := this.updateModifiesUniqueKeyColumns(dmlEvent); isModified { + if _, isModified := apl.updateModifiesUniqueKeyColumns(dmlEvent); isModified { results := make([]*dmlBuildResult, 0, 2) dmlEvent.DML = binlog.DeleteDML - results = append(results, this.buildDMLEventQuery(dmlEvent)...) + results = append(results, apl.buildDMLEventQuery(dmlEvent)...) dmlEvent.DML = binlog.InsertDML - results = append(results, this.buildDMLEventQuery(dmlEvent)...) + results = append(results, apl.buildDMLEventQuery(dmlEvent)...) return results } - query, updateArgs, err := this.dmlUpdateQueryBuilder.BuildQuery(dmlEvent.NewColumnValues.AbstractValues(), dmlEvent.WhereColumnValues.AbstractValues()) + query, updateArgs, err := apl.dmlUpdateQueryBuilder.BuildQuery(dmlEvent.NewColumnValues.AbstractValues(), dmlEvent.WhereColumnValues.AbstractValues()) args := sqlutils.Args() args = append(args, updateArgs...) return []*dmlBuildResult{newDmlBuildResult(query, args, 0, err)} } } - return []*dmlBuildResult{newDmlBuildResultError(fmt.Errorf("Unknown dml event type: %+v", dmlEvent.DML))} + return []*dmlBuildResult{newDmlBuildResultError(fmt.Errorf("unknown dml event type: %+v", dmlEvent.DML))} } // executeBatchWithWarningChecking executes a batch of DML statements with SHOW WARNINGS // interleaved after each statement to detect warnings from any statement in the batch. // This is used when PanicOnWarnings is enabled to ensure warnings from middle statements // are not lost (SHOW WARNINGS only shows warnings from the last statement in a multi-statement batch). -func (this *Applier) executeBatchWithWarningChecking(ctx context.Context, tx *gosql.Tx, buildResults []*dmlBuildResult) (int64, error) { +func (apl *Applier) executeBatchWithWarningChecking(ctx context.Context, tx *gosql.Tx, buildResults []*dmlBuildResult) (int64, error) { // Build query with interleaved SHOW WARNINGS: stmt1; SHOW WARNINGS; stmt2; SHOW WARNINGS; ... var queryBuilder strings.Builder args := make([]interface{}, 0) @@ -1568,7 +1567,7 @@ func (this *Applier) executeBatchWithWarningChecking(ctx context.Context, tx *go } // Compile regex once before loop to avoid performance penalty and handle errors properly - migrationKeyRegex, err := this.compileMigrationKeyWarningRegex() + migrationKeyRegex, err := apl.compileMigrationKeyWarningRegex() if err != nil { return 0, err } @@ -1628,19 +1627,19 @@ func (this *Applier) executeBatchWithWarningChecking(ctx context.Context, tx *go } // ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table -func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error { +func (apl *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error { var totalDelta int64 ctx := context.Background() err := func() error { - conn, err := this.db.Conn(ctx) + conn, err := apl.db.Conn(ctx) if err != nil { return err } defer conn.Close() sessionQuery := "SET /* gh-ost */ SESSION time_zone = '+00:00'" - sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, this.generateSqlModeQuery()) + sessionQuery = fmt.Sprintf("%s, %s", sessionQuery, apl.generateSqlModeQuery()) if _, err := conn.ExecContext(ctx, sessionQuery); err != nil { return err } @@ -1657,7 +1656,7 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) buildResults := make([]*dmlBuildResult, 0, len(dmlEvents)) nArgs := 0 for _, dmlEvent := range dmlEvents { - for _, buildResult := range this.buildDMLEventQuery(dmlEvent) { + for _, buildResult := range apl.buildDMLEventQuery(dmlEvent) { if buildResult.err != nil { return rollback(buildResult.err) } @@ -1669,8 +1668,8 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) // When PanicOnWarnings is enabled, we need to check warnings after each statement // in the batch. SHOW WARNINGS only shows warnings from the last statement in a // multi-statement query, so we interleave SHOW WARNINGS after each DML statement. - if this.migrationContext.PanicOnWarnings { - totalDelta, err = this.executeBatchWithWarningChecking(ctx, tx, buildResults) + if apl.migrationContext.PanicOnWarnings { + totalDelta, err = apl.executeBatchWithWarningChecking(ctx, tx, buildResults) if err != nil { return rollback(err) } @@ -1722,25 +1721,25 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) }() if err != nil { - return this.migrationContext.Log.Errore(err) + return apl.migrationContext.Log.Errore(err) } // no error - atomic.AddInt64(&this.migrationContext.TotalDMLEventsApplied, int64(len(dmlEvents))) - if this.migrationContext.CountTableRows { - atomic.AddInt64(&this.migrationContext.RowsDeltaEstimate, totalDelta) + atomic.AddInt64(&apl.migrationContext.TotalDMLEventsApplied, int64(len(dmlEvents))) + if apl.migrationContext.CountTableRows { + atomic.AddInt64(&apl.migrationContext.RowsDeltaEstimate, totalDelta) } - this.migrationContext.Log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents)) + apl.migrationContext.Log.Debugf("ApplyDMLEventQueries() applied %d events in one transaction", len(dmlEvents)) return nil } -func (this *Applier) Teardown() { - this.migrationContext.Log.Debugf("Tearing down...") - this.db.Close() - this.singletonDB.Close() - atomic.StoreInt64(&this.finishedMigrating, 1) +func (apl *Applier) Teardown() { + apl.migrationContext.Log.Debugf("Tearing down...") + apl.db.Close() + apl.singletonDB.Close() + atomic.StoreInt64(&apl.finishedMigrating, 1) } -func (this *Applier) ExpectMetadataLock(sessionId int64) error { +func (apl *Applier) ExpectMetadataLock(sessionId int64) error { found := false query := ` select /* gh-ost */ m.owner_thread_id @@ -1750,16 +1749,16 @@ func (this *Applier) ExpectMetadataLock(sessionId int64) error { and m.lock_type = 'EXCLUSIVE' and m.lock_status = 'PENDING' and t.processlist_id = ? ` - err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { + err := sqlutils.QueryRowsMap(apl.db, query, func(m sqlutils.RowMap) error { found = true return nil - }, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, sessionId) + }, apl.migrationContext.DatabaseName, apl.migrationContext.OriginalTableName, sessionId) if err != nil { return err } if !found { - err = fmt.Errorf("cannot find PENDING metadata lock on original table: `%s`.`%s`", this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName) - return this.migrationContext.Log.Errore(err) + err = fmt.Errorf("cannot find PENDING metadata lock on original table: `%s`.`%s`", apl.migrationContext.DatabaseName, apl.migrationContext.OriginalTableName) + return apl.migrationContext.Log.Errore(err) } return nil } diff --git a/go/logic/applier_test.go b/go/logic/applier_test.go index fd055a9fc..5104a07c1 100644 --- a/go/logic/applier_test.go +++ b/go/logic/applier_test.go @@ -468,7 +468,7 @@ func (suite *ApplierTestSuite) TestValidateOrDropExistingTablesWithGhostTableExi err = applier.ValidateOrDropExistingTables() suite.Require().Error(err) - suite.Require().EqualError(err, "Table `_testing_gho` already exists. Panicking. Use --initially-drop-ghost-table to force dropping it, though I really prefer that you drop it or rename it away") + suite.Require().EqualError(err, "table `_testing_gho` already exists. Panicking. Use --initially-drop-ghost-table to force dropping it, though I really prefer that you drop it or rename it away") } func (suite *ApplierTestSuite) TestValidateOrDropExistingTablesWithGhostTableExistingAndInitiallyDropGhostTableSet() { @@ -502,7 +502,6 @@ func (suite *ApplierTestSuite) TestValidateOrDropExistingTablesWithGhostTableExi // Check that the ghost table was dropped var tableName string - //nolint:execinquery err = suite.db.QueryRow(fmt.Sprintf("SHOW TABLES IN test LIKE '_%s_gho'", testMysqlTableName)).Scan(&tableName) suite.Require().Error(err) suite.Require().Equal(gosql.ErrNoRows, err) @@ -540,14 +539,12 @@ func (suite *ApplierTestSuite) TestCreateGhostTable() { // Check that the ghost table was created var tableName string - //nolint:execinquery err = suite.db.QueryRow("SHOW TABLES IN test LIKE '_testing_gho'").Scan(&tableName) suite.Require().NoError(err) suite.Require().Equal("_testing_gho", tableName) // Check that the ghost table has the same columns as the original table var createDDL string - //nolint:execinquery err = suite.db.QueryRow(fmt.Sprintf("SHOW CREATE TABLE %s", getTestGhostTableName())).Scan(&tableName, &createDDL) suite.Require().NoError(err) suite.Require().Equal("CREATE TABLE `_testing_gho` (\n `id` int DEFAULT NULL,\n `item_id` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", createDDL) diff --git a/go/logic/hooks.go b/go/logic/hooks.go index a1853e7ed..dfb18567d 100644 --- a/go/logic/hooks.go +++ b/go/logic/hooks.go @@ -46,31 +46,31 @@ func NewHooksExecutor(migrationContext *base.MigrationContext) *HooksExecutor { } } -func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) []string { +func (he *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) []string { env := os.Environ() - env = append(env, fmt.Sprintf("GH_OST_DATABASE_NAME=%s", this.migrationContext.DatabaseName)) - env = append(env, fmt.Sprintf("GH_OST_TABLE_NAME=%s", this.migrationContext.OriginalTableName)) - env = append(env, fmt.Sprintf("GH_OST_GHOST_TABLE_NAME=%s", this.migrationContext.GetGhostTableName())) - env = append(env, fmt.Sprintf("GH_OST_OLD_TABLE_NAME=%s", this.migrationContext.GetOldTableName())) - env = append(env, fmt.Sprintf("GH_OST_DDL=%s", this.migrationContext.AlterStatement)) - env = append(env, fmt.Sprintf("GH_OST_ELAPSED_SECONDS=%f", this.migrationContext.ElapsedTime().Seconds())) - env = append(env, fmt.Sprintf("GH_OST_ELAPSED_COPY_SECONDS=%f", this.migrationContext.ElapsedRowCopyTime().Seconds())) - estimatedRows := atomic.LoadInt64(&this.migrationContext.RowsEstimate) + atomic.LoadInt64(&this.migrationContext.RowsDeltaEstimate) + env = append(env, fmt.Sprintf("GH_OST_DATABASE_NAME=%s", he.migrationContext.DatabaseName)) + env = append(env, fmt.Sprintf("GH_OST_TABLE_NAME=%s", he.migrationContext.OriginalTableName)) + env = append(env, fmt.Sprintf("GH_OST_GHOST_TABLE_NAME=%s", he.migrationContext.GetGhostTableName())) + env = append(env, fmt.Sprintf("GH_OST_OLD_TABLE_NAME=%s", he.migrationContext.GetOldTableName())) + env = append(env, fmt.Sprintf("GH_OST_DDL=%s", he.migrationContext.AlterStatement)) + env = append(env, fmt.Sprintf("GH_OST_ELAPSED_SECONDS=%f", he.migrationContext.ElapsedTime().Seconds())) + env = append(env, fmt.Sprintf("GH_OST_ELAPSED_COPY_SECONDS=%f", he.migrationContext.ElapsedRowCopyTime().Seconds())) + estimatedRows := atomic.LoadInt64(&he.migrationContext.RowsEstimate) + atomic.LoadInt64(&he.migrationContext.RowsDeltaEstimate) env = append(env, fmt.Sprintf("GH_OST_ESTIMATED_ROWS=%d", estimatedRows)) - totalRowsCopied := this.migrationContext.GetTotalRowsCopied() + totalRowsCopied := he.migrationContext.GetTotalRowsCopied() env = append(env, fmt.Sprintf("GH_OST_COPIED_ROWS=%d", totalRowsCopied)) - env = append(env, fmt.Sprintf("GH_OST_MIGRATED_HOST=%s", this.migrationContext.GetApplierHostname())) - env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", this.migrationContext.GetInspectorHostname())) - env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", this.migrationContext.Hostname)) - env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", this.migrationContext.GetCurrentLagDuration().Seconds())) - env = append(env, fmt.Sprintf("GH_OST_HEARTBEAT_LAG=%f", this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds())) - env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", this.migrationContext.GetProgressPct())) - env = append(env, fmt.Sprintf("GH_OST_ETA_SECONDS=%d", this.migrationContext.GetETASeconds())) - env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", this.migrationContext.HooksHintMessage)) - env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", this.migrationContext.HooksHintOwner)) - env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", this.migrationContext.HooksHintToken)) - env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", this.migrationContext.Noop)) - env = append(env, fmt.Sprintf("GH_OST_REVERT=%t", this.migrationContext.Revert)) + env = append(env, fmt.Sprintf("GH_OST_MIGRATED_HOST=%s", he.migrationContext.GetApplierHostname())) + env = append(env, fmt.Sprintf("GH_OST_INSPECTED_HOST=%s", he.migrationContext.GetInspectorHostname())) + env = append(env, fmt.Sprintf("GH_OST_EXECUTING_HOST=%s", he.migrationContext.Hostname)) + env = append(env, fmt.Sprintf("GH_OST_INSPECTED_LAG=%f", he.migrationContext.GetCurrentLagDuration().Seconds())) + env = append(env, fmt.Sprintf("GH_OST_HEARTBEAT_LAG=%f", he.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds())) + env = append(env, fmt.Sprintf("GH_OST_PROGRESS=%f", he.migrationContext.GetProgressPct())) + env = append(env, fmt.Sprintf("GH_OST_ETA_SECONDS=%d", he.migrationContext.GetETASeconds())) + env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT=%s", he.migrationContext.HooksHintMessage)) + env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_OWNER=%s", he.migrationContext.HooksHintOwner)) + env = append(env, fmt.Sprintf("GH_OST_HOOKS_HINT_TOKEN=%s", he.migrationContext.HooksHintToken)) + env = append(env, fmt.Sprintf("GH_OST_DRY_RUN=%t", he.migrationContext.Noop)) + env = append(env, fmt.Sprintf("GH_OST_REVERT=%t", he.migrationContext.Revert)) env = append(env, extraVariables...) return env @@ -78,94 +78,94 @@ func (this *HooksExecutor) applyEnvironmentVariables(extraVariables ...string) [ // executeHook executes a command, and sets relevant environment variables // combined output & error are printed to the configured writer. -func (this *HooksExecutor) executeHook(hook string, extraVariables ...string) error { - this.migrationContext.Log.Infof("executing hook: %+v", hook) +func (he *HooksExecutor) executeHook(hook string, extraVariables ...string) error { + he.migrationContext.Log.Infof("executing hook: %+v", hook) cmd := exec.Command(hook) - cmd.Env = this.applyEnvironmentVariables(extraVariables...) + cmd.Env = he.applyEnvironmentVariables(extraVariables...) combinedOutput, err := cmd.CombinedOutput() - fmt.Fprintln(this.writer, string(combinedOutput)) + fmt.Fprintln(he.writer, string(combinedOutput)) return log.Errore(err) } -func (this *HooksExecutor) detectHooks(baseName string) (hooks []string, err error) { - if this.migrationContext.HooksPath == "" { +func (he *HooksExecutor) detectHooks(baseName string) (hooks []string, err error) { + if he.migrationContext.HooksPath == "" { return hooks, err } - pattern := fmt.Sprintf("%s/%s*", this.migrationContext.HooksPath, baseName) + pattern := fmt.Sprintf("%s/%s*", he.migrationContext.HooksPath, baseName) hooks, err = filepath.Glob(pattern) return hooks, err } -func (this *HooksExecutor) executeHooks(baseName string, extraVariables ...string) error { - hooks, err := this.detectHooks(baseName) +func (he *HooksExecutor) executeHooks(baseName string, extraVariables ...string) error { + hooks, err := he.detectHooks(baseName) if err != nil { return err } for _, hook := range hooks { log.Infof("executing %+v hook: %+v", baseName, hook) - if err := this.executeHook(hook, extraVariables...); err != nil { + if err := he.executeHook(hook, extraVariables...); err != nil { return err } } return nil } -func (this *HooksExecutor) onStartup() error { - return this.executeHooks(onStartup) +func (he *HooksExecutor) onStartup() error { + return he.executeHooks(onStartup) } -func (this *HooksExecutor) onValidated() error { - return this.executeHooks(onValidated) +func (he *HooksExecutor) onValidated() error { + return he.executeHooks(onValidated) } -func (this *HooksExecutor) onRowCountComplete() error { - return this.executeHooks(onRowCountComplete) +func (he *HooksExecutor) onRowCountComplete() error { + return he.executeHooks(onRowCountComplete) } -func (this *HooksExecutor) onBeforeRowCopy() error { - return this.executeHooks(onBeforeRowCopy) +func (he *HooksExecutor) onBeforeRowCopy() error { + return he.executeHooks(onBeforeRowCopy) } -func (this *HooksExecutor) onBatchCopyRetry(errorMessage string) error { +func (he *HooksExecutor) onBatchCopyRetry(errorMessage string) error { v := fmt.Sprintf("GH_OST_LAST_BATCH_COPY_ERROR=%s", errorMessage) - return this.executeHooks(onBatchCopyRetry, v) + return he.executeHooks(onBatchCopyRetry, v) } -func (this *HooksExecutor) onRowCopyComplete() error { - return this.executeHooks(onRowCopyComplete) +func (he *HooksExecutor) onRowCopyComplete() error { + return he.executeHooks(onRowCopyComplete) } -func (this *HooksExecutor) onBeginPostponed() error { - return this.executeHooks(onBeginPostponed) +func (he *HooksExecutor) onBeginPostponed() error { + return he.executeHooks(onBeginPostponed) } -func (this *HooksExecutor) onBeforeCutOver() error { - return this.executeHooks(onBeforeCutOver) +func (he *HooksExecutor) onBeforeCutOver() error { + return he.executeHooks(onBeforeCutOver) } -func (this *HooksExecutor) onInteractiveCommand(command string) error { +func (he *HooksExecutor) onInteractiveCommand(command string) error { v := fmt.Sprintf("GH_OST_COMMAND='%s'", command) - return this.executeHooks(onInteractiveCommand, v) + return he.executeHooks(onInteractiveCommand, v) } -func (this *HooksExecutor) onSuccess(instantDDL bool) error { +func (he *HooksExecutor) onSuccess(instantDDL bool) error { v := fmt.Sprintf("GH_OST_INSTANT_DDL=%t", instantDDL) - return this.executeHooks(onSuccess, v) + return he.executeHooks(onSuccess, v) } -func (this *HooksExecutor) onFailure() error { - return this.executeHooks(onFailure) +func (he *HooksExecutor) onFailure() error { + return he.executeHooks(onFailure) } -func (this *HooksExecutor) onStatus(statusMessage string) error { +func (he *HooksExecutor) onStatus(statusMessage string) error { v := fmt.Sprintf("GH_OST_STATUS='%s'", statusMessage) - return this.executeHooks(onStatus, v) + return he.executeHooks(onStatus, v) } -func (this *HooksExecutor) onStopReplication() error { - return this.executeHooks(onStopReplication) +func (he *HooksExecutor) onStopReplication() error { + return he.executeHooks(onStopReplication) } -func (this *HooksExecutor) onStartReplication() error { - return this.executeHooks(onStartReplication) +func (he *HooksExecutor) onStartReplication() error { + return he.executeHooks(onStartReplication) } diff --git a/go/logic/inspect.go b/go/logic/inspect.go index 97895890d..96aadd672 100644 --- a/go/logic/inspect.go +++ b/go/logic/inspect.go @@ -44,72 +44,72 @@ func NewInspector(migrationContext *base.MigrationContext) *Inspector { } } -func (this *Inspector) InitDBConnections() (err error) { - inspectorUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName) - if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, inspectorUri); err != nil { +func (isp *Inspector) InitDBConnections() (err error) { + inspectorUri := isp.connectionConfig.GetDBUri(isp.migrationContext.DatabaseName) + if isp.db, _, err = mysql.GetDB(isp.migrationContext.Uuid, inspectorUri); err != nil { return err } - informationSchemaUri := this.connectionConfig.GetDBUri("information_schema") - if this.informationSchemaDb, _, err = mysql.GetDB(this.migrationContext.Uuid, informationSchemaUri); err != nil { + informationSchemaUri := isp.connectionConfig.GetDBUri("information_schema") + if isp.informationSchemaDb, _, err = mysql.GetDB(isp.migrationContext.Uuid, informationSchemaUri); err != nil { return err } - if err := this.validateConnection(); err != nil { + if err := isp.validateConnection(); err != nil { return err } - this.dbVersion = this.migrationContext.InspectorMySQLVersion + isp.dbVersion = isp.migrationContext.InspectorMySQLVersion - if !this.migrationContext.AliyunRDS && !this.migrationContext.GoogleCloudPlatform && !this.migrationContext.AzureMySQL { - if impliedKey, err := mysql.GetInstanceKey(this.db); err != nil { + if !isp.migrationContext.AliyunRDS && !isp.migrationContext.GoogleCloudPlatform && !isp.migrationContext.AzureMySQL { + if impliedKey, err := mysql.GetInstanceKey(isp.db); err != nil { return err } else { - this.connectionConfig.ImpliedKey = impliedKey + isp.connectionConfig.ImpliedKey = impliedKey } } - if err := this.validateGrants(); err != nil { + if err := isp.validateGrants(); err != nil { return err } - if err := this.validateBinlogs(); err != nil { + if err := isp.validateBinlogs(); err != nil { return err } - if this.migrationContext.UseGTIDs { - if err := this.validateGTIDConfig(); err != nil { + if isp.migrationContext.UseGTIDs { + if err := isp.validateGTIDConfig(); err != nil { return err } } - if err := this.applyBinlogFormat(); err != nil { + if err := isp.applyBinlogFormat(); err != nil { return err } - this.migrationContext.Log.Infof("Inspector initiated on %+v, version %+v", this.connectionConfig.ImpliedKey, this.migrationContext.InspectorMySQLVersion) + isp.migrationContext.Log.Infof("Inspector initiated on %+v, version %+v", isp.connectionConfig.ImpliedKey, isp.migrationContext.InspectorMySQLVersion) return nil } -func (this *Inspector) ValidateOriginalTable() (err error) { - if err := this.validateTable(); err != nil { +func (isp *Inspector) ValidateOriginalTable() (err error) { + if err := isp.validateTable(); err != nil { return err } - if err := this.validateTableForeignKeys(this.migrationContext.DiscardForeignKeys); err != nil { + if err := isp.validateTableForeignKeys(isp.migrationContext.DiscardForeignKeys); err != nil { return err } - if err := this.validateTableTriggers(); err != nil { + if err := isp.validateTableTriggers(); err != nil { return err } - if err := this.estimateTableRowsViaExplain(); err != nil { + if err := isp.estimateTableRowsViaExplain(); err != nil { return err } return nil } -func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, virtualColumns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) { - uniqueKeys, err = this.getCandidateUniqueKeys(tableName) +func (isp *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (columns *sql.ColumnList, virtualColumns *sql.ColumnList, uniqueKeys [](*sql.UniqueKey), err error) { + uniqueKeys, err = isp.getCandidateUniqueKeys(tableName) if err != nil { return columns, virtualColumns, uniqueKeys, err } if len(uniqueKeys) == 0 { - return columns, virtualColumns, uniqueKeys, fmt.Errorf("No PRIMARY nor UNIQUE key found in table! Bailing out") + return columns, virtualColumns, uniqueKeys, fmt.Errorf("no PRIMARY nor UNIQUE key found in table! Bailing out") } - columns, virtualColumns, err = mysql.GetTableColumns(this.db, this.migrationContext.DatabaseName, tableName) + columns, virtualColumns, err = mysql.GetTableColumns(isp.db, isp.migrationContext.DatabaseName, tableName) if err != nil { return columns, virtualColumns, uniqueKeys, err } @@ -117,12 +117,12 @@ func (this *Inspector) InspectTableColumnsAndUniqueKeys(tableName string) (colum return columns, virtualColumns, uniqueKeys, nil } -func (this *Inspector) InspectOriginalTable() (err error) { - this.migrationContext.OriginalTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.OriginalTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.OriginalTableName) +func (isp *Inspector) InspectOriginalTable() (err error) { + isp.migrationContext.OriginalTableColumns, isp.migrationContext.OriginalTableVirtualColumns, isp.migrationContext.OriginalTableUniqueKeys, err = isp.InspectTableColumnsAndUniqueKeys(isp.migrationContext.OriginalTableName) if err != nil { return err } - this.migrationContext.OriginalTableAutoIncrement, err = this.getAutoIncrementValue(this.migrationContext.OriginalTableName) + isp.migrationContext.OriginalTableAutoIncrement, err = isp.getAutoIncrementValue(isp.migrationContext.OriginalTableName) if err != nil { return err } @@ -131,86 +131,86 @@ func (this *Inspector) InspectOriginalTable() (err error) { // inspectOriginalAndGhostTables compares original and ghost tables to see whether the migration // makes sense and is valid. It extracts the list of shared columns and the chosen migration unique key -func (this *Inspector) inspectOriginalAndGhostTables() (err error) { - originalNamesOnApplier := this.migrationContext.OriginalTableColumnsOnApplier.Names() - originalNames := this.migrationContext.OriginalTableColumns.Names() +func (isp *Inspector) inspectOriginalAndGhostTables() (err error) { + originalNamesOnApplier := isp.migrationContext.OriginalTableColumnsOnApplier.Names() + originalNames := isp.migrationContext.OriginalTableColumns.Names() if !reflect.DeepEqual(originalNames, originalNamesOnApplier) { - return fmt.Errorf("It seems like table structure is not identical between master and replica. This scenario is not supported.") + return fmt.Errorf("it seems like table structure is not identical between master and replica. This scenario is not supported") } - this.migrationContext.GhostTableColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.GhostTableUniqueKeys, err = this.InspectTableColumnsAndUniqueKeys(this.migrationContext.GetGhostTableName()) + isp.migrationContext.GhostTableColumns, isp.migrationContext.GhostTableVirtualColumns, isp.migrationContext.GhostTableUniqueKeys, err = isp.InspectTableColumnsAndUniqueKeys(isp.migrationContext.GetGhostTableName()) if err != nil { return err } - sharedUniqueKeys := this.getSharedUniqueKeys(this.migrationContext.OriginalTableUniqueKeys, this.migrationContext.GhostTableUniqueKeys) + sharedUniqueKeys := isp.getSharedUniqueKeys(isp.migrationContext.OriginalTableUniqueKeys, isp.migrationContext.GhostTableUniqueKeys) for i, sharedUniqueKey := range sharedUniqueKeys { - this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, &sharedUniqueKey.Columns) + isp.applyColumnTypes(isp.migrationContext.DatabaseName, isp.migrationContext.OriginalTableName, &sharedUniqueKey.Columns) uniqueKeyIsValid := true for _, column := range sharedUniqueKey.Columns.Columns() { switch column.Type { case sql.FloatColumnType: { - this.migrationContext.Log.Warningf("Will not use %+v as shared key due to FLOAT data type", sharedUniqueKey.Name) + isp.migrationContext.Log.Warningf("Will not use %+v as shared key due to FLOAT data type", sharedUniqueKey.Name) uniqueKeyIsValid = false } case sql.JSONColumnType: { // Noteworthy that at this time MySQL does not allow JSON indexing anyhow, but this code // will remain in place to potentially handle the future case where JSON is supported in indexes. - this.migrationContext.Log.Warningf("Will not use %+v as shared key due to JSON data type", sharedUniqueKey.Name) + isp.migrationContext.Log.Warningf("Will not use %+v as shared key due to JSON data type", sharedUniqueKey.Name) uniqueKeyIsValid = false } } } if uniqueKeyIsValid { - this.migrationContext.UniqueKey = sharedUniqueKeys[i] + isp.migrationContext.UniqueKey = sharedUniqueKeys[i] break } } - if this.migrationContext.UniqueKey == nil { - return fmt.Errorf("No shared unique key can be found after ALTER! Bailing out") + if isp.migrationContext.UniqueKey == nil { + return fmt.Errorf("no shared unique key can be found after ALTER! Bailing out") } - this.migrationContext.Log.Infof("Chosen shared unique key is %s", this.migrationContext.UniqueKey.Name) - if this.migrationContext.UniqueKey.HasNullable { - if this.migrationContext.NullableUniqueKeyAllowed { - this.migrationContext.Log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", this.migrationContext.UniqueKey) + isp.migrationContext.Log.Infof("Chosen shared unique key is %s", isp.migrationContext.UniqueKey.Name) + if isp.migrationContext.UniqueKey.HasNullable { + if isp.migrationContext.NullableUniqueKeyAllowed { + isp.migrationContext.Log.Warningf("Chosen key (%s) has nullable columns. You have supplied with --allow-nullable-unique-key and so this migration proceeds. As long as there aren't NULL values in this key's column, migration should be fine. NULL values will corrupt migration's data", isp.migrationContext.UniqueKey) } else { - return fmt.Errorf("Chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", this.migrationContext.UniqueKey) + return fmt.Errorf("chosen key (%s) has nullable columns. Bailing out. To force this operation to continue, supply --allow-nullable-unique-key flag. Only do so if you are certain there are no actual NULL values in this key. As long as there aren't, migration should be fine. NULL values in columns of this key will corrupt migration's data", isp.migrationContext.UniqueKey) } } - this.migrationContext.SharedColumns, this.migrationContext.MappedSharedColumns = this.getSharedColumns(this.migrationContext.OriginalTableColumns, this.migrationContext.GhostTableColumns, this.migrationContext.OriginalTableVirtualColumns, this.migrationContext.GhostTableVirtualColumns, this.migrationContext.ColumnRenameMap) - this.migrationContext.Log.Infof("Shared columns are %s", this.migrationContext.SharedColumns) + isp.migrationContext.SharedColumns, isp.migrationContext.MappedSharedColumns = isp.getSharedColumns(isp.migrationContext.OriginalTableColumns, isp.migrationContext.GhostTableColumns, isp.migrationContext.OriginalTableVirtualColumns, isp.migrationContext.GhostTableVirtualColumns, isp.migrationContext.ColumnRenameMap) + isp.migrationContext.Log.Infof("Shared columns are %s", isp.migrationContext.SharedColumns) // By fact that a non-empty unique key exists we also know the shared columns are non-empty // This additional step looks at which columns are unsigned. We could have merged this within // the `getTableColumns()` function, but it's a later patch and introduces some complexity; I feel // comfortable in doing this as a separate step. - this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName, this.migrationContext.OriginalTableColumns, this.migrationContext.SharedColumns, &this.migrationContext.UniqueKey.Columns) - this.applyColumnTypes(this.migrationContext.DatabaseName, this.migrationContext.GetGhostTableName(), this.migrationContext.GhostTableColumns, this.migrationContext.MappedSharedColumns) + isp.applyColumnTypes(isp.migrationContext.DatabaseName, isp.migrationContext.OriginalTableName, isp.migrationContext.OriginalTableColumns, isp.migrationContext.SharedColumns, &isp.migrationContext.UniqueKey.Columns) + isp.applyColumnTypes(isp.migrationContext.DatabaseName, isp.migrationContext.GetGhostTableName(), isp.migrationContext.GhostTableColumns, isp.migrationContext.MappedSharedColumns) - for i := range this.migrationContext.SharedColumns.Columns() { - column := this.migrationContext.SharedColumns.Columns()[i] - mappedColumn := this.migrationContext.MappedSharedColumns.Columns()[i] + for i := range isp.migrationContext.SharedColumns.Columns() { + column := isp.migrationContext.SharedColumns.Columns()[i] + mappedColumn := isp.migrationContext.MappedSharedColumns.Columns()[i] if column.Name == mappedColumn.Name && column.Type == sql.DateTimeColumnType && mappedColumn.Type == sql.TimestampColumnType { - this.migrationContext.MappedSharedColumns.SetConvertDatetimeToTimestamp(column.Name, this.migrationContext.ApplierTimeZone) + isp.migrationContext.MappedSharedColumns.SetConvertDatetimeToTimestamp(column.Name, isp.migrationContext.ApplierTimeZone) } if column.Name == mappedColumn.Name && column.Type == sql.EnumColumnType && mappedColumn.Charset != "" { - this.migrationContext.MappedSharedColumns.SetEnumToTextConversion(column.Name) - this.migrationContext.MappedSharedColumns.SetEnumValues(column.Name, column.EnumValues) + isp.migrationContext.MappedSharedColumns.SetEnumToTextConversion(column.Name) + isp.migrationContext.MappedSharedColumns.SetEnumValues(column.Name, column.EnumValues) } if column.Name == mappedColumn.Name && column.Charset != mappedColumn.Charset { - this.migrationContext.SharedColumns.SetCharsetConversion(column.Name, column.Charset, mappedColumn.Charset) + isp.migrationContext.SharedColumns.SetCharsetConversion(column.Name, column.Charset, mappedColumn.Charset) } } - for _, column := range this.migrationContext.UniqueKey.Columns.Columns() { - if this.migrationContext.GhostTableVirtualColumns.GetColumn(column.Name) != nil { + for _, column := range isp.migrationContext.UniqueKey.Columns.Columns() { + if isp.migrationContext.GhostTableVirtualColumns.GetColumn(column.Name) != nil { // this is a virtual column continue } - if this.migrationContext.MappedSharedColumns.HasTimezoneConversion(column.Name) { - return fmt.Errorf("No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key. Column: %s, key: %s", column.Name, this.migrationContext.UniqueKey.Name) + if isp.migrationContext.MappedSharedColumns.HasTimezoneConversion(column.Name) { + return fmt.Errorf("no support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key. Column: %s, key: %s", column.Name, isp.migrationContext.UniqueKey.Name) } } @@ -218,15 +218,15 @@ func (this *Inspector) inspectOriginalAndGhostTables() (err error) { } // validateConnection issues a simple can-connect to MySQL -func (this *Inspector) validateConnection() error { - version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name) - this.migrationContext.InspectorMySQLVersion = version +func (isp *Inspector) validateConnection() error { + version, err := base.ValidateConnection(isp.db, isp.connectionConfig, isp.migrationContext, isp.name) + isp.migrationContext.InspectorMySQLVersion = version return err } // validateGrants verifies the user by which we're executing has necessary grants // to do its thing. -func (this *Inspector) validateGrants() error { +func (isp *Inspector) validateGrants() error { query := `show /* gh-ost */ grants for current_user()` foundAll := false foundSuper := false @@ -234,7 +234,7 @@ func (this *Inspector) validateGrants() error { foundReplicationSlave := false foundDBAll := false - err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error { + err := sqlutils.QueryRowsMap(isp.db, query, func(rowMap sqlutils.RowMap) error { for _, grantData := range rowMap { grant := grantData.String if strings.Contains(grant, `GRANT ALL PRIVILEGES ON *.*`) { @@ -249,16 +249,16 @@ func (this *Inspector) validateGrants() error { if strings.Contains(grant, `REPLICATION SLAVE`) && strings.Contains(grant, ` ON *.*`) { foundReplicationSlave = true } - if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", this.migrationContext.DatabaseName)) { + if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", isp.migrationContext.DatabaseName)) { foundDBAll = true } - if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", strings.Replace(this.migrationContext.DatabaseName, "_", "\\_", -1))) { + if strings.Contains(grant, fmt.Sprintf("GRANT ALL PRIVILEGES ON `%s`.*", strings.ReplaceAll(isp.migrationContext.DatabaseName, "_", "\\_"))) { foundDBAll = true } if base.StringContainsAll(grant, `ALTER`, `CREATE`, `DELETE`, `DROP`, `INDEX`, `INSERT`, `LOCK TABLES`, `SELECT`, `TRIGGER`, `UPDATE`, ` ON *.*`) { foundDBAll = true } - if base.StringContainsAll(grant, `ALTER`, `CREATE`, `DELETE`, `DROP`, `INDEX`, `INSERT`, `LOCK TABLES`, `SELECT`, `TRIGGER`, `UPDATE`, fmt.Sprintf(" ON `%s`.*", this.migrationContext.DatabaseName)) { + if base.StringContainsAll(grant, `ALTER`, `CREATE`, `DELETE`, `DROP`, `INDEX`, `INSERT`, `LOCK TABLES`, `SELECT`, `TRIGGER`, `UPDATE`, fmt.Sprintf(" ON `%s`.*", isp.migrationContext.DatabaseName)) { foundDBAll = true } } @@ -267,41 +267,41 @@ func (this *Inspector) validateGrants() error { if err != nil { return err } - this.migrationContext.HasSuperPrivilege = foundSuper + isp.migrationContext.HasSuperPrivilege = foundSuper if foundAll { - this.migrationContext.Log.Infof("User has ALL privileges") + isp.migrationContext.Log.Infof("User has ALL privileges") return nil } if foundSuper && foundReplicationSlave && foundDBAll { - this.migrationContext.Log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName)) + isp.migrationContext.Log.Infof("User has SUPER, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(isp.migrationContext.DatabaseName)) return nil } if foundReplicationClient && foundReplicationSlave && foundDBAll { - this.migrationContext.Log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(this.migrationContext.DatabaseName)) + isp.migrationContext.Log.Infof("User has REPLICATION CLIENT, REPLICATION SLAVE privileges, and has ALL privileges on %s.*", sql.EscapeName(isp.migrationContext.DatabaseName)) return nil } - this.migrationContext.Log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(this.migrationContext.DatabaseName), foundDBAll) - return this.migrationContext.Log.Errorf("User has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(this.migrationContext.DatabaseName)) + isp.migrationContext.Log.Debugf("Privileges: Super: %t, REPLICATION CLIENT: %t, REPLICATION SLAVE: %t, ALL on *.*: %t, ALL on %s.*: %t", foundSuper, foundReplicationClient, foundReplicationSlave, foundAll, sql.EscapeName(isp.migrationContext.DatabaseName), foundDBAll) + return isp.migrationContext.Log.Errorf("user has insufficient privileges for migration. Needed: SUPER|REPLICATION CLIENT, REPLICATION SLAVE and ALL on %s.*", sql.EscapeName(isp.migrationContext.DatabaseName)) } // restartReplication is required so that we are _certain_ the binlog format and // row image settings have actually been applied to the replication thread. // It is entirely possible, for example, that the replication is using 'STATEMENT' // binlog format even as the variable says 'ROW' -func (this *Inspector) restartReplication() error { - this.migrationContext.Log.Infof("Restarting replication on %s to make sure binlog settings apply to replication thread", this.connectionConfig.Key.String()) +func (isp *Inspector) restartReplication() error { + isp.migrationContext.Log.Infof("Restarting replication on %s to make sure binlog settings apply to replication thread", isp.connectionConfig.Key.String()) - masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(this.dbVersion, this.connectionConfig) + masterKey, _ := mysql.GetMasterKeyFromSlaveStatus(isp.dbVersion, isp.connectionConfig) if masterKey == nil { // This is not a replica return nil } var stopError, startError error - replicaTerm := mysql.ReplicaTermFor(this.dbVersion, `slave`) - _, stopError = sqlutils.ExecNoPrepare(this.db, fmt.Sprintf("stop %s", replicaTerm)) - _, startError = sqlutils.ExecNoPrepare(this.db, fmt.Sprintf("start %s", replicaTerm)) + replicaTerm := mysql.ReplicaTermFor(isp.dbVersion, `slave`) + _, stopError = sqlutils.ExecNoPrepare(isp.db, fmt.Sprintf("stop %s", replicaTerm)) + _, startError = sqlutils.ExecNoPrepare(isp.db, fmt.Sprintf("start %s", replicaTerm)) if stopError != nil { return stopError } @@ -312,32 +312,32 @@ func (this *Inspector) restartReplication() error { // loop until replication is running unless we hit a max timeout. startTime := time.Now() for { - replicationRunning, err := this.validateReplicationRestarted() + replicationRunning, err := isp.validateReplicationRestarted() if err != nil { - return fmt.Errorf("Failed to validate if replication had been restarted: %w", err) + return fmt.Errorf("failed to validate if replication had been restarted: %w", err) } if replicationRunning { break } if time.Since(startTime) > startReplicationMaxWait { - return fmt.Errorf("Replication did not restart within the maximum wait time of %s", startReplicationMaxWait) + return fmt.Errorf("replication did not restart within the maximum wait time of %s", startReplicationMaxWait) } - this.migrationContext.Log.Debugf("Replication not yet restarted, waiting...") + isp.migrationContext.Log.Debugf("Replication not yet restarted, waiting...") time.Sleep(startReplicationPostWait) } - this.migrationContext.Log.Debugf("Replication restarted") + isp.migrationContext.Log.Debugf("Replication restarted") return nil } // validateReplicationRestarted checks that the Slave_IO_Running and Slave_SQL_Running are both 'Yes' // returns true if both are 'Yes', false otherwise -func (this *Inspector) validateReplicationRestarted() (bool, error) { - errNotRunning := fmt.Errorf("Replication not running on %s", this.connectionConfig.Key.String()) - query := fmt.Sprintf("show /* gh-ost */ %s", mysql.ReplicaTermFor(this.dbVersion, "slave status")) - err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error { - ioRunningTerm := mysql.ReplicaTermFor(this.dbVersion, "Slave_IO_Running") - sqlRunningTerm := mysql.ReplicaTermFor(this.dbVersion, "Slave_SQL_Running") +func (isp *Inspector) validateReplicationRestarted() (bool, error) { + errNotRunning := fmt.Errorf("replication not running on %s", isp.connectionConfig.Key.String()) + query := fmt.Sprintf("show /* gh-ost */ %s", mysql.ReplicaTermFor(isp.dbVersion, "slave status")) + err := sqlutils.QueryRowsMap(isp.db, query, func(rowMap sqlutils.RowMap) error { + ioRunningTerm := mysql.ReplicaTermFor(isp.dbVersion, "Slave_IO_Running") + sqlRunningTerm := mysql.ReplicaTermFor(isp.dbVersion, "Slave_SQL_Running") if rowMap.GetString(ioRunningTerm) != "Yes" || rowMap.GetString(sqlRunningTerm) != "Yes" { return errNotRunning } @@ -356,26 +356,26 @@ func (this *Inspector) validateReplicationRestarted() (bool, error) { // applyBinlogFormat sets ROW binlog format and restarts replication to make // the replication thread apply it. -func (this *Inspector) applyBinlogFormat() error { - if this.migrationContext.RequiresBinlogFormatChange() { - if !this.migrationContext.SwitchToRowBinlogFormat { - return fmt.Errorf("Existing binlog_format is %s. Am not switching it to ROW unless you specify --switch-to-rbr", this.migrationContext.OriginalBinlogFormat) +func (isp *Inspector) applyBinlogFormat() error { + if isp.migrationContext.RequiresBinlogFormatChange() { + if !isp.migrationContext.SwitchToRowBinlogFormat { + return fmt.Errorf("existing binlog_format is %s. Am not switching it to ROW unless you specify --switch-to-rbr", isp.migrationContext.OriginalBinlogFormat) } - if _, err := sqlutils.ExecNoPrepare(this.db, `set global binlog_format='ROW'`); err != nil { + if _, err := sqlutils.ExecNoPrepare(isp.db, `set global binlog_format='ROW'`); err != nil { return err } - if _, err := sqlutils.ExecNoPrepare(this.db, `set session binlog_format='ROW'`); err != nil { + if _, err := sqlutils.ExecNoPrepare(isp.db, `set session binlog_format='ROW'`); err != nil { return err } - if err := this.restartReplication(); err != nil { + if err := isp.restartReplication(); err != nil { return err } - this.migrationContext.Log.Debugf("'ROW' binlog format applied") + isp.migrationContext.Log.Debugf("'ROW' binlog format applied") return nil } // We already have RBR, no explicit switch - if !this.migrationContext.AssumeRBR { - if err := this.restartReplication(); err != nil { + if !isp.migrationContext.AssumeRBR { + if err := isp.restartReplication(); err != nil { return err } } @@ -383,22 +383,22 @@ func (this *Inspector) applyBinlogFormat() error { } // validateBinlogs checks that binary log configuration is good to go -func (this *Inspector) validateBinlogs() error { +func (isp *Inspector) validateBinlogs() error { query := `select /* gh-ost */@@global.log_bin, @@global.binlog_format` var hasBinaryLogs bool - if err := this.db.QueryRow(query).Scan(&hasBinaryLogs, &this.migrationContext.OriginalBinlogFormat); err != nil { + if err := isp.db.QueryRow(query).Scan(&hasBinaryLogs, &isp.migrationContext.OriginalBinlogFormat); err != nil { return err } if !hasBinaryLogs { - return fmt.Errorf("%s must have binary logs enabled", this.connectionConfig.Key.String()) + return fmt.Errorf("%s must have binary logs enabled", isp.connectionConfig.Key.String()) } - if this.migrationContext.RequiresBinlogFormatChange() { - if !this.migrationContext.SwitchToRowBinlogFormat { - return fmt.Errorf("You must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", this.connectionConfig.Key.String()) + if isp.migrationContext.RequiresBinlogFormatChange() { + if !isp.migrationContext.SwitchToRowBinlogFormat { + return fmt.Errorf("you must be using ROW binlog format. I can switch it for you, provided --switch-to-rbr and that %s doesn't have replicas", isp.connectionConfig.Key.String()) } - query := fmt.Sprintf("show /* gh-ost */ %s", mysql.ReplicaTermFor(this.dbVersion, `slave hosts`)) + query := fmt.Sprintf("show /* gh-ost */ %s", mysql.ReplicaTermFor(isp.dbVersion, `slave hosts`)) countReplicas := 0 - err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error { + err := sqlutils.QueryRowsMap(isp.db, query, func(rowMap sqlutils.RowMap) error { countReplicas++ return nil }) @@ -406,80 +406,80 @@ func (this *Inspector) validateBinlogs() error { return err } if countReplicas > 0 { - return fmt.Errorf("%s has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat) + return fmt.Errorf("%s has %s binlog_format, but I'm too scared to change it to ROW because it has replicas. Bailing out", isp.connectionConfig.Key.String(), isp.migrationContext.OriginalBinlogFormat) } - this.migrationContext.Log.Infof("%s has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogFormat) + isp.migrationContext.Log.Infof("%s has %s binlog_format. I will change it to ROW, and will NOT change it back, even in the event of failure.", isp.connectionConfig.Key.String(), isp.migrationContext.OriginalBinlogFormat) } query = `select /* gh-ost */ @@global.binlog_row_image` - if err := this.db.QueryRow(query).Scan(&this.migrationContext.OriginalBinlogRowImage); err != nil { + if err := isp.db.QueryRow(query).Scan(&isp.migrationContext.OriginalBinlogRowImage); err != nil { return err } - this.migrationContext.OriginalBinlogRowImage = strings.ToUpper(this.migrationContext.OriginalBinlogRowImage) - if this.migrationContext.OriginalBinlogRowImage != "FULL" { - return fmt.Errorf("%s has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", this.connectionConfig.Key.String(), this.migrationContext.OriginalBinlogRowImage) + isp.migrationContext.OriginalBinlogRowImage = strings.ToUpper(isp.migrationContext.OriginalBinlogRowImage) + if isp.migrationContext.OriginalBinlogRowImage != "FULL" { + return fmt.Errorf("%s has '%s' binlog_row_image, and only 'FULL' is supported. This operation cannot proceed. You may `set global binlog_row_image='full'` and try again", isp.connectionConfig.Key.String(), isp.migrationContext.OriginalBinlogRowImage) } - this.migrationContext.Log.Infof("binary logs validated on %s", this.connectionConfig.Key.String()) + isp.migrationContext.Log.Infof("binary logs validated on %s", isp.connectionConfig.Key.String()) return nil } // validateGTIDConfig checks that the GTID configuration is good to go -func (this *Inspector) validateGTIDConfig() error { +func (isp *Inspector) validateGTIDConfig() error { var gtidMode, enforceGtidConsistency string query := `select @@global.gtid_mode, @@global.enforce_gtid_consistency` - if err := this.db.QueryRow(query).Scan(>idMode, &enforceGtidConsistency); err != nil { + if err := isp.db.QueryRow(query).Scan(>idMode, &enforceGtidConsistency); err != nil { return err } enforceGtidConsistency = strings.ToUpper(enforceGtidConsistency) if strings.ToUpper(gtidMode) != "ON" || (enforceGtidConsistency != "ON" && enforceGtidConsistency != "1") { - return fmt.Errorf("%s must have gtid_mode=ON and enforce_gtid_consistency=ON to use GTID support", this.connectionConfig.Key.String()) + return fmt.Errorf("%s must have gtid_mode=ON and enforce_gtid_consistency=ON to use GTID support", isp.connectionConfig.Key.String()) } - this.migrationContext.Log.Infof("gtid config validated on %s", this.connectionConfig.Key.String()) + isp.migrationContext.Log.Infof("gtid config validated on %s", isp.connectionConfig.Key.String()) return nil } // validateLogSlaveUpdates checks that binary log log_slave_updates is set. This test is not required when migrating on replica or when migrating directly on master -func (this *Inspector) validateLogSlaveUpdates() error { +func (isp *Inspector) validateLogSlaveUpdates() error { query := `select /* gh-ost */ @@global.log_slave_updates` var logSlaveUpdates bool - if err := this.db.QueryRow(query).Scan(&logSlaveUpdates); err != nil { + if err := isp.db.QueryRow(query).Scan(&logSlaveUpdates); err != nil { return err } if logSlaveUpdates { - this.migrationContext.Log.Infof("log_slave_updates validated on %s", this.connectionConfig.Key.String()) + isp.migrationContext.Log.Infof("log_slave_updates validated on %s", isp.connectionConfig.Key.String()) return nil } - if this.migrationContext.IsTungsten { - this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but --tungsten provided, so I'm proceeding", this.connectionConfig.Key.String()) + if isp.migrationContext.IsTungsten { + isp.migrationContext.Log.Warningf("log_slave_updates not found on %s, but --tungsten provided, so I'm proceeding", isp.connectionConfig.Key.String()) return nil } - if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica { - return fmt.Errorf("%s must have log_slave_updates enabled for testing/migrating on replica", this.connectionConfig.Key.String()) + if isp.migrationContext.TestOnReplica || isp.migrationContext.MigrateOnReplica { + return fmt.Errorf("%s must have log_slave_updates enabled for testing/migrating on replica", isp.connectionConfig.Key.String()) } - if this.migrationContext.InspectorIsAlsoApplier() { - this.migrationContext.Log.Warningf("log_slave_updates not found on %s, but executing directly on master, so I'm proceeding", this.connectionConfig.Key.String()) + if isp.migrationContext.InspectorIsAlsoApplier() { + isp.migrationContext.Log.Warningf("log_slave_updates not found on %s, but executing directly on master, so I'm proceeding", isp.connectionConfig.Key.String()) return nil } - return fmt.Errorf("%s must have log_slave_updates enabled for executing migration", this.connectionConfig.Key.String()) + return fmt.Errorf("%s must have log_slave_updates enabled for executing migration", isp.connectionConfig.Key.String()) } // validateTable makes sure the table we need to operate on actually exists -func (this *Inspector) validateTable() error { - query := fmt.Sprintf(`show /* gh-ost */ table status from %s like '%s'`, sql.EscapeName(this.migrationContext.DatabaseName), this.migrationContext.OriginalTableName) +func (isp *Inspector) validateTable() error { + query := fmt.Sprintf(`show /* gh-ost */ table status from %s like '%s'`, sql.EscapeName(isp.migrationContext.DatabaseName), isp.migrationContext.OriginalTableName) tableFound := false - err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error { - this.migrationContext.TableEngine = rowMap.GetString("Engine") - this.migrationContext.RowsEstimate = rowMap.GetInt64("Rows") - this.migrationContext.UsedRowsEstimateMethod = base.TableStatusRowsEstimate + err := sqlutils.QueryRowsMap(isp.db, query, func(rowMap sqlutils.RowMap) error { + isp.migrationContext.TableEngine = rowMap.GetString("Engine") + isp.migrationContext.RowsEstimate = rowMap.GetInt64("Rows") + isp.migrationContext.UsedRowsEstimateMethod = base.TableStatusRowsEstimate if rowMap.GetString("Comment") == "VIEW" { - return fmt.Errorf("%s.%s is a VIEW, not a real table. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + return fmt.Errorf("%s.%s is a VIEW, not a real table. Bailing out", sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(isp.migrationContext.OriginalTableName)) } tableFound = true @@ -489,17 +489,17 @@ func (this *Inspector) validateTable() error { return err } if !tableFound { - return this.migrationContext.Log.Errorf("Cannot find table %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + return isp.migrationContext.Log.Errorf("cannot find table %s.%s!", sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(isp.migrationContext.OriginalTableName)) } - this.migrationContext.Log.Infof("Table found. Engine=%s", this.migrationContext.TableEngine) - this.migrationContext.Log.Debugf("Estimated number of rows via STATUS: %d", this.migrationContext.RowsEstimate) + isp.migrationContext.Log.Infof("Table found. Engine=%s", isp.migrationContext.TableEngine) + isp.migrationContext.Log.Debugf("Estimated number of rows via STATUS: %d", isp.migrationContext.RowsEstimate) return nil } // validateTableForeignKeys makes sure no foreign keys exist on the migrated table -func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) error { - if this.migrationContext.SkipForeignKeyChecks { - this.migrationContext.Log.Warning("--skip-foreign-key-checks provided: will not check for foreign keys") +func (isp *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) error { + if isp.migrationContext.SkipForeignKeyChecks { + isp.migrationContext.Log.Warning("--skip-foreign-key-checks provided: will not check for foreign keys") return nil } query := ` @@ -517,39 +517,39 @@ func (this *Inspector) validateTableForeignKeys(allowChildForeignKeys bool) erro )` numParentForeignKeys := 0 numChildForeignKeys := 0 - err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { + err := sqlutils.QueryRowsMap(isp.db, query, func(m sqlutils.RowMap) error { numChildForeignKeys = m.GetInt("num_child_side_fk") numParentForeignKeys = m.GetInt("num_parent_side_fk") return nil }, - this.migrationContext.DatabaseName, - this.migrationContext.OriginalTableName, - this.migrationContext.DatabaseName, - this.migrationContext.OriginalTableName, - this.migrationContext.DatabaseName, - this.migrationContext.OriginalTableName, - this.migrationContext.DatabaseName, - this.migrationContext.OriginalTableName, + isp.migrationContext.DatabaseName, + isp.migrationContext.OriginalTableName, + isp.migrationContext.DatabaseName, + isp.migrationContext.OriginalTableName, + isp.migrationContext.DatabaseName, + isp.migrationContext.OriginalTableName, + isp.migrationContext.DatabaseName, + isp.migrationContext.OriginalTableName, ) if err != nil { return err } if numParentForeignKeys > 0 { - return this.migrationContext.Log.Errorf("Found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + return isp.migrationContext.Log.Errorf("found %d parent-side foreign keys on %s.%s. Parent-side foreign keys are not supported. Bailing out", numParentForeignKeys, sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(isp.migrationContext.OriginalTableName)) } if numChildForeignKeys > 0 { if allowChildForeignKeys { - this.migrationContext.Log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag") + isp.migrationContext.Log.Debugf("Foreign keys found and will be dropped, as per given --discard-foreign-keys flag") return nil } - return this.migrationContext.Log.Errorf("Found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + return isp.migrationContext.Log.Errorf("found %d child-side foreign keys on %s.%s. Child-side foreign keys are not supported. Bailing out", numChildForeignKeys, sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(isp.migrationContext.OriginalTableName)) } - this.migrationContext.Log.Debugf("Validated no foreign keys exist on table") + isp.migrationContext.Log.Debugf("Validated no foreign keys exist on table") return nil } // validateTableTriggers makes sure no triggers exist on the migrated table. if --include_triggers is used then it fetches the triggers -func (this *Inspector) validateTableTriggers() error { +func (isp *Inspector) validateTableTriggers() error { query := ` SELECT /* gh-ost */ COUNT(*) AS num_triggers FROM @@ -558,46 +558,46 @@ func (this *Inspector) validateTableTriggers() error { TRIGGER_SCHEMA=? AND EVENT_OBJECT_TABLE=?` numTriggers := 0 - err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error { + err := sqlutils.QueryRowsMap(isp.db, query, func(rowMap sqlutils.RowMap) error { numTriggers = rowMap.GetInt("num_triggers") return nil }, - this.migrationContext.DatabaseName, - this.migrationContext.OriginalTableName, + isp.migrationContext.DatabaseName, + isp.migrationContext.OriginalTableName, ) if err != nil { return err } if numTriggers > 0 { - if this.migrationContext.IncludeTriggers { - this.migrationContext.Log.Infof("Found %d triggers on %s.%s.", numTriggers, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) - this.migrationContext.Triggers, err = mysql.GetTriggers(this.db, this.migrationContext.DatabaseName, this.migrationContext.OriginalTableName) + if isp.migrationContext.IncludeTriggers { + isp.migrationContext.Log.Infof("Found %d triggers on %s.%s.", numTriggers, sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(isp.migrationContext.OriginalTableName)) + isp.migrationContext.Triggers, err = mysql.GetTriggers(isp.db, isp.migrationContext.DatabaseName, isp.migrationContext.OriginalTableName) if err != nil { return err } - if err := this.validateGhostTriggersDontExist(); err != nil { + if err := isp.validateGhostTriggersDontExist(); err != nil { return err } - if err := this.validateGhostTriggersLength(); err != nil { + if err := isp.validateGhostTriggersLength(); err != nil { return err } return nil } - return this.migrationContext.Log.Errorf("Found triggers on %s.%s. Tables with triggers are supported only when using \"include-triggers\" flag. Bailing out", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + return isp.migrationContext.Log.Errorf("found triggers on %s.%s. Tables with triggers are supported only when using \"include-triggers\" flag. Bailing out", sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(isp.migrationContext.OriginalTableName)) } - this.migrationContext.Log.Debugf("Validated no triggers exist on table") + isp.migrationContext.Log.Debugf("Validated no triggers exist on table") return nil } // verifyTriggersDontExist verifies before createing new triggers we want to make sure these triggers dont exist already in the DB -func (this *Inspector) validateGhostTriggersDontExist() error { - if len(this.migrationContext.Triggers) > 0 { +func (isp *Inspector) validateGhostTriggersDontExist() error { + if len(isp.migrationContext.Triggers) > 0 { var foundTriggers []string - for _, trigger := range this.migrationContext.Triggers { - triggerName := this.migrationContext.GetGhostTriggerName(trigger.Name) + for _, trigger := range isp.migrationContext.Triggers { + triggerName := isp.migrationContext.GetGhostTriggerName(trigger.Name) query := "select 1 from information_schema.triggers where trigger_name = ? and trigger_schema = ?" - err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error { + err := sqlutils.QueryRowsMap(isp.db, query, func(rowMap sqlutils.RowMap) error { triggerExists := rowMap.GetInt("1") if triggerExists == 1 { foundTriggers = append(foundTriggers, triggerName) @@ -605,44 +605,44 @@ func (this *Inspector) validateGhostTriggersDontExist() error { return nil }, triggerName, - this.migrationContext.DatabaseName, + isp.migrationContext.DatabaseName, ) if err != nil { return err } } if len(foundTriggers) > 0 { - return this.migrationContext.Log.Errorf("Found gh-ost triggers (%s). Please use a different suffix or drop them. Bailing out", strings.Join(foundTriggers, ",")) + return isp.migrationContext.Log.Errorf("found gh-ost triggers (%s). Please use a different suffix or drop them. Bailing out", strings.Join(foundTriggers, ",")) } } return nil } -func (this *Inspector) validateGhostTriggersLength() error { - if len(this.migrationContext.Triggers) > 0 { +func (isp *Inspector) validateGhostTriggersLength() error { + if len(isp.migrationContext.Triggers) > 0 { var foundTriggers []string - for _, trigger := range this.migrationContext.Triggers { - triggerName := this.migrationContext.GetGhostTriggerName(trigger.Name) - if ok := this.migrationContext.ValidateGhostTriggerLengthBelowMaxLength(triggerName); !ok { + for _, trigger := range isp.migrationContext.Triggers { + triggerName := isp.migrationContext.GetGhostTriggerName(trigger.Name) + if ok := isp.migrationContext.ValidateGhostTriggerLengthBelowMaxLength(triggerName); !ok { foundTriggers = append(foundTriggers, triggerName) } } if len(foundTriggers) > 0 { - return this.migrationContext.Log.Errorf("Gh-ost triggers (%s) length > %d characters. Bailing out", strings.Join(foundTriggers, ","), mysql.MaxTableNameLength) + return isp.migrationContext.Log.Errorf("gh-ost triggers (%s) length > %d characters. Bailing out", strings.Join(foundTriggers, ","), mysql.MaxTableNameLength) } } return nil } // estimateTableRowsViaExplain estimates number of rows on original table -func (this *Inspector) estimateTableRowsViaExplain() error { - query := fmt.Sprintf(`explain select /* gh-ost */ * from %s.%s where 1=1`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) +func (isp *Inspector) estimateTableRowsViaExplain() error { + query := fmt.Sprintf(`explain select /* gh-ost */ * from %s.%s where 1=1`, sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(isp.migrationContext.OriginalTableName)) outputFound := false - err := sqlutils.QueryRowsMap(this.db, query, func(rowMap sqlutils.RowMap) error { - this.migrationContext.RowsEstimate = rowMap.GetInt64("rows") - this.migrationContext.UsedRowsEstimateMethod = base.ExplainRowsEstimate + err := sqlutils.QueryRowsMap(isp.db, query, func(rowMap sqlutils.RowMap) error { + isp.migrationContext.RowsEstimate = rowMap.GetInt64("rows") + isp.migrationContext.UsedRowsEstimateMethod = base.ExplainRowsEstimate outputFound = true return nil @@ -651,20 +651,20 @@ func (this *Inspector) estimateTableRowsViaExplain() error { return err } if !outputFound { - return this.migrationContext.Log.Errorf("Cannot run EXPLAIN on %s.%s!", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + return isp.migrationContext.Log.Errorf("cannot run EXPLAIN on %s.%s!", sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(isp.migrationContext.OriginalTableName)) } - this.migrationContext.Log.Infof("Estimated number of rows via EXPLAIN: %d", this.migrationContext.RowsEstimate) + isp.migrationContext.Log.Infof("Estimated number of rows via EXPLAIN: %d", isp.migrationContext.RowsEstimate) return nil } // CountTableRows counts exact number of rows on the original table -func (this *Inspector) CountTableRows(ctx context.Context) error { - atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 1) - defer atomic.StoreInt64(&this.migrationContext.CountingRowsFlag, 0) +func (isp *Inspector) CountTableRows(ctx context.Context) error { + atomic.StoreInt64(&isp.migrationContext.CountingRowsFlag, 1) + defer atomic.StoreInt64(&isp.migrationContext.CountingRowsFlag, 0) - this.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while") + isp.migrationContext.Log.Infof("As instructed, I'm issuing a SELECT COUNT(*) on the table. This may take a while") - conn, err := this.db.Conn(ctx) + conn, err := isp.db.Conn(ctx) if err != nil { return err } @@ -675,30 +675,30 @@ func (this *Inspector) CountTableRows(ctx context.Context) error { return err } - query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + query := fmt.Sprintf(`select /* gh-ost */ count(*) as count_rows from %s.%s`, sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(isp.migrationContext.OriginalTableName)) var rowsEstimate int64 if err := conn.QueryRowContext(ctx, query).Scan(&rowsEstimate); err != nil { if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - this.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err()) - return mysql.Kill(this.db, connectionID) + isp.migrationContext.Log.Infof("exact row count cancelled (%s), likely because I'm about to cut over. I'm going to kill that query.", ctx.Err()) + return mysql.Kill(isp.db, connectionID) } return err } // row count query finished. nil out the cancel func, so the main migration thread // doesn't bother calling it after row copy is done. - this.migrationContext.SetCountTableRowsCancelFunc(nil) + isp.migrationContext.SetCountTableRowsCancelFunc(nil) - atomic.StoreInt64(&this.migrationContext.RowsEstimate, rowsEstimate) - this.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate + atomic.StoreInt64(&isp.migrationContext.RowsEstimate, rowsEstimate) + isp.migrationContext.UsedRowsEstimateMethod = base.CountRowsEstimate - this.migrationContext.Log.Infof("Exact number of rows via COUNT: %d", rowsEstimate) + isp.migrationContext.Log.Infof("Exact number of rows via COUNT: %d", rowsEstimate) return nil } // applyColumnTypes -func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsLists ...*sql.ColumnList) error { +func (isp *Inspector) applyColumnTypes(databaseName, tableName string, columnsLists ...*sql.ColumnList) error { query := ` select /* gh-ost */ * from @@ -706,7 +706,7 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL where table_schema=? and table_name=?` - err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { + err := sqlutils.QueryRowsMap(isp.db, query, func(m sqlutils.RowMap) error { columnName := m.GetString("COLUMN_NAME") columnType := m.GetString("COLUMN_TYPE") columnOctetLength := m.GetUint("CHARACTER_OCTET_LENGTH") @@ -761,7 +761,7 @@ func (this *Inspector) applyColumnTypes(databaseName, tableName string, columnsL } // getAutoIncrementValue get's the original table's AUTO_INCREMENT value, if exists (0 value if not exists) -func (this *Inspector) getAutoIncrementValue(tableName string) (autoIncrement uint64, err error) { +func (isp *Inspector) getAutoIncrementValue(tableName string) (autoIncrement uint64, err error) { query := ` SELECT /* gh-ost */ AUTO_INCREMENT FROM @@ -770,16 +770,16 @@ func (this *Inspector) getAutoIncrementValue(tableName string) (autoIncrement ui TABLES.TABLE_SCHEMA = ? AND TABLES.TABLE_NAME = ? AND AUTO_INCREMENT IS NOT NULL` - err = sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { + err = sqlutils.QueryRowsMap(isp.db, query, func(m sqlutils.RowMap) error { autoIncrement = m.GetUint64("AUTO_INCREMENT") return nil - }, this.migrationContext.DatabaseName, tableName) + }, isp.migrationContext.DatabaseName, tableName) return autoIncrement, err } // getCandidateUniqueKeys investigates a table and returns the list of unique keys // candidate for chunking -func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) { +func (isp *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](*sql.UniqueKey), err error) { query := ` SELECT /* gh-ost */ COLUMNS.TABLE_SCHEMA, @@ -842,7 +842,7 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](* ELSE 100 END, COUNT_COLUMN_IN_INDEX` - err = sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { + err = sqlutils.QueryRowsMap(isp.db, query, func(m sqlutils.RowMap) error { uniqueKey := &sql.UniqueKey{ Name: m.GetString("INDEX_NAME"), Columns: *sql.ParseColumnList(m.GetString("COLUMN_NAMES")), @@ -851,17 +851,17 @@ func (this *Inspector) getCandidateUniqueKeys(tableName string) (uniqueKeys [](* } uniqueKeys = append(uniqueKeys, uniqueKey) return nil - }, this.migrationContext.DatabaseName, tableName, this.migrationContext.DatabaseName, tableName) + }, isp.migrationContext.DatabaseName, tableName, isp.migrationContext.DatabaseName, tableName) if err != nil { return uniqueKeys, err } - this.migrationContext.Log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys) + isp.migrationContext.Log.Debugf("Potential unique keys in %+v: %+v", tableName, uniqueKeys) return uniqueKeys, nil } // getSharedUniqueKeys returns the intersection of two given unique keys, // testing by list of columns -func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys []*sql.UniqueKey) (uniqueKeys []*sql.UniqueKey) { +func (isp *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys []*sql.UniqueKey) (uniqueKeys []*sql.UniqueKey) { // We actually do NOT rely on key name, just on the set of columns. This is because maybe // the ALTER is on the name itself... for _, originalUniqueKey := range originalUniqueKeys { @@ -879,7 +879,7 @@ func (this *Inspector) getSharedUniqueKeys(originalUniqueKeys, ghostUniqueKeys [ } // getSharedColumns returns the intersection of two lists of columns in same order as the first list -func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, originalVirtualColumns, ghostVirtualColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) { +func (isp *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.ColumnList, originalVirtualColumns, ghostVirtualColumns *sql.ColumnList, columnRenameMap map[string]string) (*sql.ColumnList, *sql.ColumnList) { sharedColumnNames := []string{} for _, originalColumn := range originalColumns.Names() { isSharedColumn := false @@ -893,7 +893,7 @@ func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.Colum break } } - for droppedColumn := range this.migrationContext.DroppedColumnsMap { + for droppedColumn := range isp.migrationContext.DroppedColumnsMap { if strings.EqualFold(originalColumn, droppedColumn) { isSharedColumn = false break @@ -925,47 +925,47 @@ func (this *Inspector) getSharedColumns(originalColumns, ghostColumns *sql.Colum } // showCreateTable returns the `show create table` statement for given table -func (this *Inspector) showCreateTable(tableName string) (createTableStatement string, err error) { +func (isp *Inspector) showCreateTable(tableName string) (createTableStatement string, err error) { var dummy string - query := fmt.Sprintf(`show /* gh-ost */ create table %s.%s`, sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(tableName)) - err = this.db.QueryRow(query).Scan(&dummy, &createTableStatement) + query := fmt.Sprintf(`show /* gh-ost */ create table %s.%s`, sql.EscapeName(isp.migrationContext.DatabaseName), sql.EscapeName(tableName)) + err = isp.db.QueryRow(query).Scan(&dummy, &createTableStatement) return createTableStatement, err } // readChangelogState reads changelog hints -func (this *Inspector) readChangelogState(hint string) (string, error) { +func (isp *Inspector) readChangelogState(hint string) (string, error) { query := fmt.Sprintf(` select /* gh-ost */ hint, value from %s.%s where hint = ? and id <= 255`, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetChangelogTableName()), + sql.EscapeName(isp.migrationContext.DatabaseName), + sql.EscapeName(isp.migrationContext.GetChangelogTableName()), ) result := "" - err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { + err := sqlutils.QueryRowsMap(isp.db, query, func(m sqlutils.RowMap) error { result = m.GetString("value") return nil }, hint) return result, err } -func (this *Inspector) getMasterConnectionConfig() (applierConfig *mysql.ConnectionConfig, err error) { - this.migrationContext.Log.Infof("Recursively searching for replication master") +func (isp *Inspector) getMasterConnectionConfig() (applierConfig *mysql.ConnectionConfig, err error) { + isp.migrationContext.Log.Infof("Recursively searching for replication master") visitedKeys := mysql.NewInstanceKeyMap() - return mysql.GetMasterConnectionConfigSafe(this.dbVersion, this.connectionConfig, visitedKeys, this.migrationContext.AllowedMasterMaster) + return mysql.GetMasterConnectionConfigSafe(isp.dbVersion, isp.connectionConfig, visitedKeys, isp.migrationContext.AllowedMasterMaster) } -func (this *Inspector) getReplicationLag() (replicationLag time.Duration, err error) { +func (isp *Inspector) getReplicationLag() (replicationLag time.Duration, err error) { replicationLag, err = mysql.GetReplicationLagFromSlaveStatus( - this.dbVersion, - this.informationSchemaDb, + isp.dbVersion, + isp.informationSchemaDb, ) return replicationLag, err } -func (this *Inspector) Teardown() { - this.db.Close() - this.informationSchemaDb.Close() +func (isp *Inspector) Teardown() { + isp.db.Close() + isp.informationSchemaDb.Close() } diff --git a/go/logic/migrator.go b/go/logic/migrator.go index 4e5f0cc42..9dc6041f5 100644 --- a/go/logic/migrator.go +++ b/go/logic/migrator.go @@ -23,8 +23,8 @@ import ( ) var ( - ErrMigratorUnsupportedRenameAlter = errors.New("ALTER statement seems to RENAME the table. This is not supported, and you should run your RENAME outside gh-ost.") - ErrMigrationNotAllowedOnMaster = errors.New("It seems like this migration attempt to run directly on master. Preferably it would be executed on a replica (this reduces load from the master). To proceed please provide --allow-on-master.") + ErrMigratorUnsupportedRenameAlter = errors.New("alter statement seems to RENAME the table. This is not supported, and you should run your RENAME outside gh-ost") + ErrMigrationNotAllowedOnMaster = errors.New("it seems like this migration attempt to run directly on master. Preferably it would be executed on a replica (this reduces load from the master). To proceed please provide --allow-on-master") RetrySleepFn = time.Sleep checkpointTimeout = 2 * time.Second ) @@ -125,10 +125,10 @@ func NewMigrator(context *base.MigrationContext, appVersion string) *Migrator { // sleepWhileTrue sleeps indefinitely until the given function returns 'false' // (or fails with error) -func (this *Migrator) sleepWhileTrue(operation func() (bool, error)) error { +func (mgtr *Migrator) sleepWhileTrue(operation func() (bool, error)) error { for { // Check for abort before continuing - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } shouldSleep, err := operation() @@ -142,29 +142,29 @@ func (this *Migrator) sleepWhileTrue(operation func() (bool, error)) error { } } -func (this *Migrator) retryBatchCopyWithHooks(operation func() error, notFatalHint ...bool) (err error) { +func (mgtr *Migrator) retryBatchCopyWithHooks(operation func() error, notFatalHint ...bool) (err error) { wrappedOperation := func() error { if err := operation(); err != nil { - this.hooksExecutor.onBatchCopyRetry(err.Error()) + mgtr.hooksExecutor.onBatchCopyRetry(err.Error()) return err } return nil } - return this.retryOperation(wrappedOperation, notFatalHint...) + return mgtr.retryOperation(wrappedOperation, notFatalHint...) } // retryOperation attempts up to `count` attempts at running given function, // exiting as soon as it returns with non-error. -func (this *Migrator) retryOperation(operation func() error, notFatalHint ...bool) (err error) { - maxRetries := int(this.migrationContext.MaxRetries()) +func (mgtr *Migrator) retryOperation(operation func() error, notFatalHint ...bool) (err error) { + maxRetries := int(mgtr.migrationContext.MaxRetries()) for i := 0; i < maxRetries; i++ { if i != 0 { // sleep after previous iteration RetrySleepFn(1 * time.Second) } // Check for abort/context cancellation before each retry - if abortErr := this.checkAbort(); abortErr != nil { + if abortErr := mgtr.checkAbort(); abortErr != nil { return abortErr } err = operation() @@ -174,7 +174,7 @@ func (this *Migrator) retryOperation(operation func() error, notFatalHint ...boo // Check if this is an unrecoverable error (data consistency issues won't resolve on retry) if strings.Contains(err.Error(), "warnings detected") { if len(notFatalHint) == 0 { - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, err) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.migrationContext.PanicAbort, err) } return err } @@ -182,7 +182,7 @@ func (this *Migrator) retryOperation(operation func() error, notFatalHint ...boo } if len(notFatalHint) == 0 { // Use helper to prevent deadlock if listenOnPanicAbort already exited - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, err) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.migrationContext.PanicAbort, err) } return err } @@ -192,9 +192,9 @@ func (this *Migrator) retryOperation(operation func() error, notFatalHint ...boo // as soon as the function returns with non-error, or as soon as `MaxRetries` // attempts are reached. Wait intervals between attempts obey a maximum of // `ExponentialBackoffMaxInterval`. -func (this *Migrator) retryOperationWithExponentialBackoff(operation func() error, notFatalHint ...bool) (err error) { - maxRetries := int(this.migrationContext.MaxRetries()) - maxInterval := this.migrationContext.ExponentialBackoffMaxInterval +func (mgtr *Migrator) retryOperationWithExponentialBackoff(operation func() error, notFatalHint ...bool) (err error) { + maxRetries := int(mgtr.migrationContext.MaxRetries()) + maxInterval := mgtr.migrationContext.ExponentialBackoffMaxInterval for i := 0; i < maxRetries; i++ { interval := math.Min( float64(maxInterval), @@ -205,7 +205,7 @@ func (this *Migrator) retryOperationWithExponentialBackoff(operation func() erro RetrySleepFn(time.Duration(interval) * time.Second) } // Check for abort/context cancellation before each retry - if abortErr := this.checkAbort(); abortErr != nil { + if abortErr := mgtr.checkAbort(); abortErr != nil { return abortErr } err = operation() @@ -215,67 +215,67 @@ func (this *Migrator) retryOperationWithExponentialBackoff(operation func() erro // Check if this is an unrecoverable error (data consistency issues won't resolve on retry) if strings.Contains(err.Error(), "warnings detected") { if len(notFatalHint) == 0 { - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, err) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.migrationContext.PanicAbort, err) } return err } } if len(notFatalHint) == 0 { // Use helper to prevent deadlock if listenOnPanicAbort already exited - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, err) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.migrationContext.PanicAbort, err) } return err } // consumeRowCopyComplete blocks on the rowCopyComplete channel once, and then // consumes and drops any further incoming events that may be left hanging. -func (this *Migrator) consumeRowCopyComplete() { - if err := <-this.rowCopyComplete; err != nil { +func (mgtr *Migrator) consumeRowCopyComplete() { + if err := <-mgtr.rowCopyComplete; err != nil { // Abort synchronously to ensure checkAbort() sees the error immediately - this.abort(err) + mgtr.abort(err) // Don't mark row copy as complete if there was an error return } - atomic.StoreInt64(&this.rowCopyCompleteFlag, 1) - this.migrationContext.MarkRowCopyEndTime() + atomic.StoreInt64(&mgtr.rowCopyCompleteFlag, 1) + mgtr.migrationContext.MarkRowCopyEndTime() go func() { - for err := range this.rowCopyComplete { + for err := range mgtr.rowCopyComplete { if err != nil { // Abort synchronously to ensure the error is stored immediately - this.abort(err) + mgtr.abort(err) return } } }() } -func (this *Migrator) canStopStreaming() bool { - return atomic.LoadInt64(&this.migrationContext.CutOverCompleteFlag) != 0 +func (mgtr *Migrator) canStopStreaming() bool { + return atomic.LoadInt64(&mgtr.migrationContext.CutOverCompleteFlag) != 0 } // onChangelogEvent is called when a binlog event operation on the changelog table is intercepted. -func (this *Migrator) onChangelogEvent(dmlEntry *binlog.BinlogEntry) (err error) { +func (mgtr *Migrator) onChangelogEvent(dmlEntry *binlog.BinlogEntry) (err error) { // Hey, I created the changelog table, I know the type of columns it has! switch hint := dmlEntry.DmlEvent.NewColumnValues.StringColumn(2); hint { case "state": - return this.onChangelogStateEvent(dmlEntry) + return mgtr.onChangelogStateEvent(dmlEntry) case "heartbeat": - return this.onChangelogHeartbeatEvent(dmlEntry) + return mgtr.onChangelogHeartbeatEvent(dmlEntry) default: return nil } } -func (this *Migrator) onChangelogStateEvent(dmlEntry *binlog.BinlogEntry) (err error) { +func (mgtr *Migrator) onChangelogStateEvent(dmlEntry *binlog.BinlogEntry) (err error) { changelogStateString := dmlEntry.DmlEvent.NewColumnValues.StringColumn(3) changelogState := ReadChangelogState(changelogStateString) - this.migrationContext.Log.Infof("Intercepted changelog state %s", changelogState) + mgtr.migrationContext.Log.Infof("Intercepted changelog state %s", changelogState) switch changelogState { case Migrated, ReadMigrationRangeValues: // no-op event case GhostTableMigrated: // Use helper to prevent deadlock if migration aborts before receiver is ready - _ = base.SendWithContext(this.migrationContext.GetContext(), this.ghostTableMigrated, true) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.ghostTableMigrated, true) case AllEventsUpToLockProcessed: lps := &lockProcessedStruct{ state: changelogStateString, @@ -288,15 +288,15 @@ func (this *Migrator) onChangelogStateEvent(dmlEntry *binlog.BinlogEntry) (err e // prevents both goroutine leaks (the original PR #1637 issue) and OOM // when MaxRetries() is very large. select { - case this.allEventsUpToLockProcessed <- lps: + case mgtr.allEventsUpToLockProcessed <- lps: default: // Buffer full — drain the stale value, then send the current one. select { - case <-this.allEventsUpToLockProcessed: + case <-mgtr.allEventsUpToLockProcessed: default: } select { - case this.allEventsUpToLockProcessed <- lps: + case mgtr.allEventsUpToLockProcessed <- lps: default: // Concurrent drain by another goroutine or receiver; the current // value is no longer needed since a newer sentinel will follow. @@ -311,26 +311,26 @@ func (this *Migrator) onChangelogStateEvent(dmlEntry *binlog.BinlogEntry) (err e // asynchronously, understanding it doesn't really matter. go func() { // Use helper to prevent deadlock if buffer fills and executeWriteFuncs exits - _ = base.SendWithContext(this.migrationContext.GetContext(), this.applyEventsQueue, newApplyEventStructByFunc(&applyEventFunc)) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.applyEventsQueue, newApplyEventStructByFunc(&applyEventFunc)) }() default: - return fmt.Errorf("Unknown changelog state: %+v", changelogState) + return fmt.Errorf("unknown changelog state: %+v", changelogState) } - this.migrationContext.Log.Infof("Handled changelog state %s", changelogState) + mgtr.migrationContext.Log.Infof("Handled changelog state %s", changelogState) return nil } -func (this *Migrator) onChangelogHeartbeatEvent(dmlEntry *binlog.BinlogEntry) (err error) { +func (mgtr *Migrator) onChangelogHeartbeatEvent(dmlEntry *binlog.BinlogEntry) (err error) { changelogHeartbeatString := dmlEntry.DmlEvent.NewColumnValues.StringColumn(3) heartbeatTime, err := time.Parse(time.RFC3339Nano, changelogHeartbeatString) if err != nil { - return this.migrationContext.Log.Errore(err) + return mgtr.migrationContext.Log.Errore(err) } else { - this.migrationContext.SetLastHeartbeatOnChangelogTime(heartbeatTime) - this.applier.CurrentCoordinatesMutex.Lock() - this.applier.CurrentCoordinates = dmlEntry.Coordinates - this.applier.CurrentCoordinatesMutex.Unlock() + mgtr.migrationContext.SetLastHeartbeatOnChangelogTime(heartbeatTime) + mgtr.applier.CurrentCoordinatesMutex.Lock() + mgtr.applier.CurrentCoordinates = dmlEntry.Coordinates + mgtr.applier.CurrentCoordinatesMutex.Unlock() return nil } } @@ -338,68 +338,68 @@ func (this *Migrator) onChangelogHeartbeatEvent(dmlEntry *binlog.BinlogEntry) (e // abort stores the error, cancels the context, and logs the abort. // This is the common abort logic used by both listenOnPanicAbort and // consumeRowCopyComplete to ensure consistent error handling. -func (this *Migrator) abort(err error) { +func (mgtr *Migrator) abort(err error) { // Store the error for Migrate() to return - this.migrationContext.SetAbortError(err) + mgtr.migrationContext.SetAbortError(err) // Cancel the context to signal all goroutines to stop - this.migrationContext.CancelContext() + mgtr.migrationContext.CancelContext() // Log the error (but don't panic or exit) - this.migrationContext.Log.Errorf("Migration aborted: %v", err) + mgtr.migrationContext.Log.Errorf("migration aborted: %v", err) } // listenOnPanicAbort listens for fatal errors and initiates graceful shutdown -func (this *Migrator) listenOnPanicAbort() { - err := <-this.migrationContext.PanicAbort - this.abort(err) +func (mgtr *Migrator) listenOnPanicAbort() { + err := <-mgtr.migrationContext.PanicAbort + mgtr.abort(err) } // validateAlterStatement validates the `alter` statement meets criteria. // At this time this means: // - column renames are approved // - no table rename allowed -func (this *Migrator) validateAlterStatement() (err error) { - if this.parser.IsRenameTable() { +func (mgtr *Migrator) validateAlterStatement() (err error) { + if mgtr.parser.IsRenameTable() { return ErrMigratorUnsupportedRenameAlter } - if this.parser.HasNonTrivialRenames() && !this.migrationContext.SkipRenamedColumns { - this.migrationContext.ColumnRenameMap = this.parser.GetNonTrivialRenames() - if !this.migrationContext.ApproveRenamedColumns { - return fmt.Errorf("gh-ost believes the ALTER statement renames columns, as follows: %v; as precaution, you are asked to confirm gh-ost is correct, and provide with `--approve-renamed-columns`, and we're all happy. Or you can skip renamed columns via `--skip-renamed-columns`, in which case column data may be lost", this.parser.GetNonTrivialRenames()) + if mgtr.parser.HasNonTrivialRenames() && !mgtr.migrationContext.SkipRenamedColumns { + mgtr.migrationContext.ColumnRenameMap = mgtr.parser.GetNonTrivialRenames() + if !mgtr.migrationContext.ApproveRenamedColumns { + return fmt.Errorf("gh-ost believes the ALTER statement renames columns, as follows: %v; as precaution, you are asked to confirm gh-ost is correct, and provide with `--approve-renamed-columns`, and we're all happy. Or you can skip renamed columns via `--skip-renamed-columns`, in which case column data may be lost", mgtr.parser.GetNonTrivialRenames()) } - this.migrationContext.Log.Infof("Alter statement has column(s) renamed. gh-ost finds the following renames: %v; --approve-renamed-columns is given and so migration proceeds.", this.parser.GetNonTrivialRenames()) + mgtr.migrationContext.Log.Infof("alter statement has column(s) renamed. gh-ost finds the following renames: %v; --approve-renamed-columns is given and so migration proceeds.", mgtr.parser.GetNonTrivialRenames()) } - this.migrationContext.DroppedColumnsMap = this.parser.DroppedColumnsMap() + mgtr.migrationContext.DroppedColumnsMap = mgtr.parser.DroppedColumnsMap() return nil } -func (this *Migrator) countTableRows() (err error) { - if !this.migrationContext.CountTableRows { +func (mgtr *Migrator) countTableRows() (err error) { + if !mgtr.migrationContext.CountTableRows { // Not counting; we stay with an estimate return nil } - if this.migrationContext.Noop { - this.migrationContext.Log.Debugf("Noop operation; not really counting table rows") + if mgtr.migrationContext.Noop { + mgtr.migrationContext.Log.Debugf("Noop operation; not really counting table rows") return nil } countRowsFunc := func(ctx context.Context) error { - if err := this.inspector.CountTableRows(ctx); err != nil { + if err := mgtr.inspector.CountTableRows(ctx); err != nil { return err } - if err := this.hooksExecutor.onRowCountComplete(); err != nil { + if err := mgtr.hooksExecutor.onRowCountComplete(); err != nil { return err } return nil } - if this.migrationContext.ConcurrentCountTableRows { + if mgtr.migrationContext.ConcurrentCountTableRows { // store a cancel func so we can stop this query before a cut over rowCountContext, rowCountCancel := context.WithCancel(context.Background()) - this.migrationContext.SetCountTableRowsCancelFunc(rowCountCancel) + mgtr.migrationContext.SetCountTableRowsCancelFunc(rowCountCancel) - this.migrationContext.Log.Infof("As instructed, counting rows in the background; meanwhile I will use an estimated count, and will update it later on") + mgtr.migrationContext.Log.Infof("As instructed, counting rows in the background; meanwhile I will use an estimated count, and will update it later on") go countRowsFunc(rowCountContext) // and we ignore errors, because this turns to be a background job @@ -408,30 +408,30 @@ func (this *Migrator) countTableRows() (err error) { return countRowsFunc(context.Background()) } -func (this *Migrator) createFlagFiles() (err error) { - if this.migrationContext.PostponeCutOverFlagFile != "" { - if !base.FileExists(this.migrationContext.PostponeCutOverFlagFile) { - if err := base.TouchFile(this.migrationContext.PostponeCutOverFlagFile); err != nil { - return this.migrationContext.Log.Errorf("--postpone-cut-over-flag-file indicated by gh-ost is unable to create said file: %s", err.Error()) +func (mgtr *Migrator) createFlagFiles() (err error) { + if mgtr.migrationContext.PostponeCutOverFlagFile != "" { + if !base.FileExists(mgtr.migrationContext.PostponeCutOverFlagFile) { + if err := base.TouchFile(mgtr.migrationContext.PostponeCutOverFlagFile); err != nil { + return mgtr.migrationContext.Log.Errorf("--postpone-cut-over-flag-file indicated by gh-ost is unable to create said file: %s", err.Error()) } - this.migrationContext.Log.Infof("Created postpone-cut-over-flag-file: %s", this.migrationContext.PostponeCutOverFlagFile) + mgtr.migrationContext.Log.Infof("Created postpone-cut-over-flag-file: %s", mgtr.migrationContext.PostponeCutOverFlagFile) } } return nil } // checkAbort returns abort error if migration was aborted -func (this *Migrator) checkAbort() error { - if abortErr := this.migrationContext.GetAbortError(); abortErr != nil { +func (mgtr *Migrator) checkAbort() error { + if abortErr := mgtr.migrationContext.GetAbortError(); abortErr != nil { return abortErr } - ctx := this.migrationContext.GetContext() + ctx := mgtr.migrationContext.GetContext() if ctx != nil { select { case <-ctx.Done(): // Context cancelled but no abort error stored yet - if abortErr := this.migrationContext.GetAbortError(); abortErr != nil { + if abortErr := mgtr.migrationContext.GetAbortError(); abortErr != nil { return abortErr } return ctx.Err() @@ -443,213 +443,213 @@ func (this *Migrator) checkAbort() error { } // Migrate executes the complete migration logic. This is *the* major gh-ost function. -func (this *Migrator) Migrate() (err error) { - this.migrationContext.Log.Infof("Migrating %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) - this.migrationContext.StartTime = time.Now() +func (mgtr *Migrator) Migrate() (err error) { + mgtr.migrationContext.Log.Infof("Migrating %s.%s", sql.EscapeName(mgtr.migrationContext.DatabaseName), sql.EscapeName(mgtr.migrationContext.OriginalTableName)) + mgtr.migrationContext.StartTime = time.Now() // Ensure context is cancelled on exit (cleanup) - defer this.migrationContext.CancelContext() + defer mgtr.migrationContext.CancelContext() - if this.migrationContext.Hostname, err = os.Hostname(); err != nil { + if mgtr.migrationContext.Hostname, err = os.Hostname(); err != nil { return err } - go this.listenOnPanicAbort() + go mgtr.listenOnPanicAbort() - if err := this.hooksExecutor.onStartup(); err != nil { + if err := mgtr.hooksExecutor.onStartup(); err != nil { return err } - if err := this.parser.ParseAlterStatement(this.migrationContext.AlterStatement); err != nil { + if err := mgtr.parser.ParseAlterStatement(mgtr.migrationContext.AlterStatement); err != nil { return err } - if err := this.validateAlterStatement(); err != nil { + if err := mgtr.validateAlterStatement(); err != nil { return err } // After this point, we'll need to teardown anything that's been started // so we don't leave things hanging around - defer this.teardown() + defer mgtr.teardown() - if err := this.initiateInspector(); err != nil { + if err := mgtr.initiateInspector(); err != nil { return err } - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } // If we are resuming, we will initiateStreaming later when we know // the binlog coordinates to resume streaming from. // If not resuming, the streamer must be initiated before the applier, // so that the "GhostTableMigrated" event gets processed. - if !this.migrationContext.Resume { - if err := this.initiateStreaming(); err != nil { + if !mgtr.migrationContext.Resume { + if err := mgtr.initiateStreaming(); err != nil { return err } - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } } - if err := this.initiateApplier(); err != nil { + if err := mgtr.initiateApplier(); err != nil { return err } - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } - if err := this.createFlagFiles(); err != nil { + if err := mgtr.createFlagFiles(); err != nil { return err } // In MySQL 8.0 (and possibly earlier) some DDL statements can be applied instantly. // Attempt to do this if AttemptInstantDDL is set. - if this.migrationContext.AttemptInstantDDL { - if this.migrationContext.Noop { - this.migrationContext.Log.Debugf("Noop operation; not really attempting instant DDL") + if mgtr.migrationContext.AttemptInstantDDL { + if mgtr.migrationContext.Noop { + mgtr.migrationContext.Log.Debugf("Noop operation; not really attempting instant DDL") } else { - this.migrationContext.Log.Infof("Attempting to execute alter with ALGORITHM=INSTANT") - if err := this.applier.AttemptInstantDDL(); err == nil { - if err := this.finalCleanup(); err != nil { + mgtr.migrationContext.Log.Infof("Attempting to execute alter with ALGORITHM=INSTANT") + if err := mgtr.applier.AttemptInstantDDL(); err == nil { + if err := mgtr.finalCleanup(); err != nil { return nil } - if err := this.hooksExecutor.onSuccess(true); err != nil { + if err := mgtr.hooksExecutor.onSuccess(true); err != nil { return err } - this.migrationContext.Log.Infof("Success! table %s.%s migrated instantly", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + mgtr.migrationContext.Log.Infof("Success! table %s.%s migrated instantly", sql.EscapeName(mgtr.migrationContext.DatabaseName), sql.EscapeName(mgtr.migrationContext.OriginalTableName)) return nil } else { - this.migrationContext.Log.Infof("ALGORITHM=INSTANT not supported for this operation, proceeding with original algorithm: %s", err) + mgtr.migrationContext.Log.Infof("ALGORITHM=INSTANT not supported for this operation, proceeding with original algorithm: %s", err) } } } - initialLag, _ := this.inspector.getReplicationLag() - if !this.migrationContext.Resume { - this.migrationContext.Log.Infof("Waiting for ghost table to be migrated. Current lag is %+v", initialLag) - <-this.ghostTableMigrated - this.migrationContext.Log.Debugf("ghost table migrated") + initialLag, _ := mgtr.inspector.getReplicationLag() + if !mgtr.migrationContext.Resume { + mgtr.migrationContext.Log.Infof("Waiting for ghost table to be migrated. Current lag is %+v", initialLag) + <-mgtr.ghostTableMigrated + mgtr.migrationContext.Log.Debugf("ghost table migrated") } // Yay! We now know the Ghost and Changelog tables are good to examine! // When running on replica, this means the replica has those tables. When running // on master this is always true, of course, and yet it also implies this knowledge // is in the binlogs. - if err := this.inspector.inspectOriginalAndGhostTables(); err != nil { + if err := mgtr.inspector.inspectOriginalAndGhostTables(); err != nil { return err } // We can prepare some of the queries on the applier - if err := this.applier.prepareQueries(); err != nil { + if err := mgtr.applier.prepareQueries(); err != nil { return err } // inspectOriginalAndGhostTables must be called before creating checkpoint table. - if this.migrationContext.Checkpoint && !this.migrationContext.Resume { - if err := this.applier.CreateCheckpointTable(); err != nil { - this.migrationContext.Log.Errorf("Unable to create checkpoint table, see further error details.") + if mgtr.migrationContext.Checkpoint && !mgtr.migrationContext.Resume { + if err := mgtr.applier.CreateCheckpointTable(); err != nil { + mgtr.migrationContext.Log.Errorf("unable to create checkpoint table, see further error details") } } - if this.migrationContext.Resume { - lastCheckpoint, err := this.applier.ReadLastCheckpoint() + if mgtr.migrationContext.Resume { + lastCheckpoint, err := mgtr.applier.ReadLastCheckpoint() if err != nil { - return this.migrationContext.Log.Errorf("No checkpoint found, unable to resume: %+v", err) + return mgtr.migrationContext.Log.Errorf("no checkpoint found, unable to resume: %+v", err) } - this.migrationContext.Log.Infof("Resuming from checkpoint coords=%+v range_min=%+v range_max=%+v iteration=%d", + mgtr.migrationContext.Log.Infof("Resuming from checkpoint coords=%+v range_min=%+v range_max=%+v iteration=%d", lastCheckpoint.LastTrxCoords, lastCheckpoint.IterationRangeMin.String(), lastCheckpoint.IterationRangeMax.String(), lastCheckpoint.Iteration) - this.migrationContext.MigrationIterationRangeMinValues = lastCheckpoint.IterationRangeMin - this.migrationContext.MigrationIterationRangeMaxValues = lastCheckpoint.IterationRangeMax - this.migrationContext.Iteration = lastCheckpoint.Iteration - this.migrationContext.TotalRowsCopied = lastCheckpoint.RowsCopied - this.migrationContext.TotalDMLEventsApplied = lastCheckpoint.DMLApplied - this.migrationContext.InitialStreamerCoords = lastCheckpoint.LastTrxCoords - if err := this.initiateStreaming(); err != nil { + mgtr.migrationContext.MigrationIterationRangeMinValues = lastCheckpoint.IterationRangeMin + mgtr.migrationContext.MigrationIterationRangeMaxValues = lastCheckpoint.IterationRangeMax + mgtr.migrationContext.Iteration = lastCheckpoint.Iteration + mgtr.migrationContext.TotalRowsCopied = lastCheckpoint.RowsCopied + mgtr.migrationContext.TotalDMLEventsApplied = lastCheckpoint.DMLApplied + mgtr.migrationContext.InitialStreamerCoords = lastCheckpoint.LastTrxCoords + if err := mgtr.initiateStreaming(); err != nil { return err } } // Validation complete! We're good to execute this migration - if err := this.hooksExecutor.onValidated(); err != nil { + if err := mgtr.hooksExecutor.onValidated(); err != nil { return err } - if err := this.initiateServer(); err != nil { + if err := mgtr.initiateServer(); err != nil { return err } - defer this.server.RemoveSocketFile() + defer mgtr.server.RemoveSocketFile() - if err := this.countTableRows(); err != nil { + if err := mgtr.countTableRows(); err != nil { return err } - if err := this.addDMLEventsListener(); err != nil { + if err := mgtr.addDMLEventsListener(); err != nil { return err } - if err := this.applier.ReadMigrationRangeValues(); err != nil { + if err := mgtr.applier.ReadMigrationRangeValues(); err != nil { return err } - this.initiateThrottler() + mgtr.initiateThrottler() - if err := this.hooksExecutor.onBeforeRowCopy(); err != nil { + if err := mgtr.hooksExecutor.onBeforeRowCopy(); err != nil { return err } go func() { - if err := this.executeWriteFuncs(); err != nil { + if err := mgtr.executeWriteFuncs(); err != nil { // Send error to PanicAbort to trigger abort - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, err) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.migrationContext.PanicAbort, err) } }() - go this.iterateChunks() - this.migrationContext.MarkRowCopyStartTime() - go this.initiateStatus() - if this.migrationContext.Checkpoint { - go this.checkpointLoop() + go mgtr.iterateChunks() + mgtr.migrationContext.MarkRowCopyStartTime() + go mgtr.initiateStatus() + if mgtr.migrationContext.Checkpoint { + go mgtr.checkpointLoop() } - this.migrationContext.Log.Debugf("Operating until row copy is complete") - this.consumeRowCopyComplete() - this.migrationContext.Log.Infof("Row copy complete") + mgtr.migrationContext.Log.Debugf("Operating until row copy is complete") + mgtr.consumeRowCopyComplete() + mgtr.migrationContext.Log.Infof("Row copy complete") // Check if row copy was aborted due to error - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } - if err := this.hooksExecutor.onRowCopyComplete(); err != nil { + if err := mgtr.hooksExecutor.onRowCopyComplete(); err != nil { return err } - this.printStatus(ForcePrintStatusRule) + mgtr.printStatus(ForcePrintStatusRule) - if this.migrationContext.IsCountingTableRows() { - this.migrationContext.Log.Info("stopping query for exact row count, because that can accidentally lock out the cut over") - this.migrationContext.CancelTableRowsCount() + if mgtr.migrationContext.IsCountingTableRows() { + mgtr.migrationContext.Log.Info("stopping query for exact row count, because that can accidentally lock out the cut over") + mgtr.migrationContext.CancelTableRowsCount() } - if err := this.hooksExecutor.onBeforeCutOver(); err != nil { + if err := mgtr.hooksExecutor.onBeforeCutOver(); err != nil { return err } var retrier func(func() error, ...bool) error - if this.migrationContext.CutOverExponentialBackoff { - retrier = this.retryOperationWithExponentialBackoff + if mgtr.migrationContext.CutOverExponentialBackoff { + retrier = mgtr.retryOperationWithExponentialBackoff } else { - retrier = this.retryOperation + retrier = mgtr.retryOperation } - if err := retrier(this.cutOver); err != nil { + if err := retrier(mgtr.cutOver); err != nil { return err } - atomic.StoreInt64(&this.migrationContext.CutOverCompleteFlag, 1) + atomic.StoreInt64(&mgtr.migrationContext.CutOverCompleteFlag, 1) - if this.migrationContext.Checkpoint && !this.migrationContext.Noop { - cutoverChk, err := this.CheckpointAfterCutOver() + if mgtr.migrationContext.Checkpoint && !mgtr.migrationContext.Noop { + cutoverChk, err := mgtr.CheckpointAfterCutOver() if err != nil { - this.migrationContext.Log.Warningf("failed to checkpoint after cutover: %+v", err) + mgtr.migrationContext.Log.Warningf("failed to checkpoint after cutover: %+v", err) } else { - this.migrationContext.Log.Infof("checkpoint success after cutover at coords=%+v", cutoverChk.LastTrxCoords.DisplayString()) + mgtr.migrationContext.Log.Infof("checkpoint success after cutover at coords=%+v", cutoverChk.LastTrxCoords.DisplayString()) } } - if err := this.finalCleanup(); err != nil { + if err := mgtr.finalCleanup(); err != nil { return nil } - if err := this.hooksExecutor.onSuccess(false); err != nil { + if err := mgtr.hooksExecutor.onSuccess(false); err != nil { return err } - this.migrationContext.Log.Infof("Done migrating %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + mgtr.migrationContext.Log.Infof("Done migrating %s.%s", sql.EscapeName(mgtr.migrationContext.DatabaseName), sql.EscapeName(mgtr.migrationContext.OriginalTableName)) // Final check for abort before declaring success - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } return nil @@ -658,143 +658,143 @@ func (this *Migrator) Migrate() (err error) { // Revert reverts a migration that previously completed by applying all DML events that happened // after the original cutover, then doing another cutover to swap the tables back. // The steps are similar to Migrate(), but without row copying. -func (this *Migrator) Revert() error { - this.migrationContext.Log.Infof("Reverting %s.%s from %s.%s", - sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName), - sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OldTableName)) - this.migrationContext.StartTime = time.Now() +func (mgtr *Migrator) Revert() error { + mgtr.migrationContext.Log.Infof("Reverting %s.%s from %s.%s", + sql.EscapeName(mgtr.migrationContext.DatabaseName), sql.EscapeName(mgtr.migrationContext.OriginalTableName), + sql.EscapeName(mgtr.migrationContext.DatabaseName), sql.EscapeName(mgtr.migrationContext.OldTableName)) + mgtr.migrationContext.StartTime = time.Now() // Ensure context is cancelled on exit (cleanup) - defer this.migrationContext.CancelContext() + defer mgtr.migrationContext.CancelContext() var err error - if this.migrationContext.Hostname, err = os.Hostname(); err != nil { + if mgtr.migrationContext.Hostname, err = os.Hostname(); err != nil { return err } - go this.listenOnPanicAbort() + go mgtr.listenOnPanicAbort() - if err := this.hooksExecutor.onStartup(); err != nil { + if err := mgtr.hooksExecutor.onStartup(); err != nil { return err } - if err := this.validateAlterStatement(); err != nil { + if err := mgtr.validateAlterStatement(); err != nil { return err } - defer this.teardown() + defer mgtr.teardown() - if err := this.initiateInspector(); err != nil { + if err := mgtr.initiateInspector(); err != nil { return err } - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } - if err := this.initiateApplier(); err != nil { + if err := mgtr.initiateApplier(); err != nil { return err } - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } - if err := this.createFlagFiles(); err != nil { + if err := mgtr.createFlagFiles(); err != nil { return err } - if err := this.inspector.inspectOriginalAndGhostTables(); err != nil { + if err := mgtr.inspector.inspectOriginalAndGhostTables(); err != nil { return err } - if err := this.applier.prepareQueries(); err != nil { + if err := mgtr.applier.prepareQueries(); err != nil { return err } - lastCheckpoint, err := this.applier.ReadLastCheckpoint() + lastCheckpoint, err := mgtr.applier.ReadLastCheckpoint() if err != nil { - return this.migrationContext.Log.Errorf("No checkpoint found, unable to revert: %+v", err) + return mgtr.migrationContext.Log.Errorf("no checkpoint found, unable to revert: %+v", err) } if !lastCheckpoint.IsCutover { - return this.migrationContext.Log.Errorf("Last checkpoint is not after cutover, unable to revert: coords=%+v time=%+v", lastCheckpoint.LastTrxCoords, lastCheckpoint.Timestamp) + return mgtr.migrationContext.Log.Errorf("last checkpoint is not after cutover, unable to revert: coords=%+v time=%+v", lastCheckpoint.LastTrxCoords, lastCheckpoint.Timestamp) } - this.migrationContext.InitialStreamerCoords = lastCheckpoint.LastTrxCoords - this.migrationContext.TotalRowsCopied = lastCheckpoint.RowsCopied - this.migrationContext.MigrationIterationRangeMinValues = lastCheckpoint.IterationRangeMin - this.migrationContext.MigrationIterationRangeMaxValues = lastCheckpoint.IterationRangeMax - if err := this.initiateStreaming(); err != nil { + mgtr.migrationContext.InitialStreamerCoords = lastCheckpoint.LastTrxCoords + mgtr.migrationContext.TotalRowsCopied = lastCheckpoint.RowsCopied + mgtr.migrationContext.MigrationIterationRangeMinValues = lastCheckpoint.IterationRangeMin + mgtr.migrationContext.MigrationIterationRangeMaxValues = lastCheckpoint.IterationRangeMax + if err := mgtr.initiateStreaming(); err != nil { return err } - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } - if err := this.hooksExecutor.onValidated(); err != nil { + if err := mgtr.hooksExecutor.onValidated(); err != nil { return err } - if err := this.initiateServer(); err != nil { + if err := mgtr.initiateServer(); err != nil { return err } - defer this.server.RemoveSocketFile() - if err := this.addDMLEventsListener(); err != nil { + defer mgtr.server.RemoveSocketFile() + if err := mgtr.addDMLEventsListener(); err != nil { return err } - this.initiateThrottler() - go this.initiateStatus() + mgtr.initiateThrottler() + go mgtr.initiateStatus() go func() { - if err := this.executeDMLWriteFuncs(); err != nil { + if err := mgtr.executeDMLWriteFuncs(); err != nil { // Send error to PanicAbort to trigger abort - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, err) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.migrationContext.PanicAbort, err) } }() - this.printStatus(ForcePrintStatusRule) + mgtr.printStatus(ForcePrintStatusRule) var retrier func(func() error, ...bool) error - if this.migrationContext.CutOverExponentialBackoff { - retrier = this.retryOperationWithExponentialBackoff + if mgtr.migrationContext.CutOverExponentialBackoff { + retrier = mgtr.retryOperationWithExponentialBackoff } else { - retrier = this.retryOperation + retrier = mgtr.retryOperation } - if err := this.hooksExecutor.onBeforeCutOver(); err != nil { + if err := mgtr.hooksExecutor.onBeforeCutOver(); err != nil { return err } - if err := retrier(this.cutOver); err != nil { + if err := retrier(mgtr.cutOver); err != nil { return err } - atomic.StoreInt64(&this.migrationContext.CutOverCompleteFlag, 1) - if err := this.finalCleanup(); err != nil { + atomic.StoreInt64(&mgtr.migrationContext.CutOverCompleteFlag, 1) + if err := mgtr.finalCleanup(); err != nil { return nil } - if err := this.hooksExecutor.onSuccess(false); err != nil { + if err := mgtr.hooksExecutor.onSuccess(false); err != nil { return err } - this.migrationContext.Log.Infof("Done reverting %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.OriginalTableName)) + mgtr.migrationContext.Log.Infof("Done reverting %s.%s", sql.EscapeName(mgtr.migrationContext.DatabaseName), sql.EscapeName(mgtr.migrationContext.OriginalTableName)) return nil } // ExecOnFailureHook executes the onFailure hook, and this method is provided as the only external // hook access point -func (this *Migrator) ExecOnFailureHook() (err error) { - return this.hooksExecutor.onFailure() +func (mgtr *Migrator) ExecOnFailureHook() (err error) { + return mgtr.hooksExecutor.onFailure() } -func (this *Migrator) handleCutOverResult(cutOverError error) (err error) { - if this.migrationContext.TestOnReplica { +func (mgtr *Migrator) handleCutOverResult(cutOverError error) (err error) { + if mgtr.migrationContext.TestOnReplica { // We're merely testing, we don't want to keep this state. Rollback the renames as possible - this.applier.RenameTablesRollback() + mgtr.applier.RenameTablesRollback() } if cutOverError == nil { return nil } // Only on error: - if this.migrationContext.TestOnReplica { + if mgtr.migrationContext.TestOnReplica { // With `--test-on-replica` we stop replication thread, and then proceed to use // the same cut-over phase as the master would use. That means we take locks // and swap the tables. // The difference is that we will later swap the tables back. - if err := this.hooksExecutor.onStartReplication(); err != nil { - return this.migrationContext.Log.Errore(err) + if err := mgtr.hooksExecutor.onStartReplication(); err != nil { + return mgtr.migrationContext.Log.Errore(err) } - if this.migrationContext.TestOnReplicaSkipReplicaStop { - this.migrationContext.Log.Warningf("--test-on-replica-skip-replica-stop enabled, we are not starting replication.") + if mgtr.migrationContext.TestOnReplicaSkipReplicaStop { + mgtr.migrationContext.Log.Warningf("--test-on-replica-skip-replica-stop enabled, we are not starting replication.") } else { - this.migrationContext.Log.Debugf("testing on replica. Starting replication IO thread after cut-over failure") - if err := this.retryOperation(this.applier.StartReplication); err != nil { - return this.migrationContext.Log.Errore(err) + mgtr.migrationContext.Log.Debugf("testing on replica. Starting replication IO thread after cut-over failure") + if err := mgtr.retryOperation(mgtr.applier.StartReplication); err != nil { + return mgtr.migrationContext.Log.Errore(err) } } } @@ -803,42 +803,42 @@ func (this *Migrator) handleCutOverResult(cutOverError error) (err error) { // cutOver performs the final step of migration, based on migration // type (on replica? atomic? safe?) -func (this *Migrator) cutOver() (err error) { - if this.migrationContext.Noop { - this.migrationContext.Log.Debugf("Noop operation; not really swapping tables") +func (mgtr *Migrator) cutOver() (err error) { + if mgtr.migrationContext.Noop { + mgtr.migrationContext.Log.Debugf("Noop operation; not really swapping tables") return nil } - this.migrationContext.MarkPointOfInterest() - this.throttler.throttle(func() { - this.migrationContext.Log.Debugf("throttling before swapping tables") + mgtr.migrationContext.MarkPointOfInterest() + mgtr.throttler.throttle(func() { + mgtr.migrationContext.Log.Debugf("throttling before swapping tables") }) - this.migrationContext.MarkPointOfInterest() - this.migrationContext.Log.Debugf("checking for cut-over postpone") - if err := this.sleepWhileTrue( + mgtr.migrationContext.MarkPointOfInterest() + mgtr.migrationContext.Log.Debugf("checking for cut-over postpone") + if err := mgtr.sleepWhileTrue( func() (bool, error) { - heartbeatLag := this.migrationContext.TimeSinceLastHeartbeatOnChangelog() - maxLagMillisecondsThrottle := time.Duration(atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)) * time.Millisecond - cutOverLockTimeout := time.Duration(this.migrationContext.CutOverLockTimeoutSeconds) * time.Second + heartbeatLag := mgtr.migrationContext.TimeSinceLastHeartbeatOnChangelog() + maxLagMillisecondsThrottle := time.Duration(atomic.LoadInt64(&mgtr.migrationContext.MaxLagMillisecondsThrottleThreshold)) * time.Millisecond + cutOverLockTimeout := time.Duration(mgtr.migrationContext.CutOverLockTimeoutSeconds) * time.Second if heartbeatLag > maxLagMillisecondsThrottle || heartbeatLag > cutOverLockTimeout { - this.migrationContext.Log.Debugf("current HeartbeatLag (%.2fs) is too high, it needs to be less than both --max-lag-millis (%.2fs) and --cut-over-lock-timeout-seconds (%.2fs) to continue", heartbeatLag.Seconds(), maxLagMillisecondsThrottle.Seconds(), cutOverLockTimeout.Seconds()) + mgtr.migrationContext.Log.Debugf("current HeartbeatLag (%.2fs) is too high, it needs to be less than both --max-lag-millis (%.2fs) and --cut-over-lock-timeout-seconds (%.2fs) to continue", heartbeatLag.Seconds(), maxLagMillisecondsThrottle.Seconds(), cutOverLockTimeout.Seconds()) return true, nil } - if this.migrationContext.PostponeCutOverFlagFile == "" { + if mgtr.migrationContext.PostponeCutOverFlagFile == "" { return false, nil } - if atomic.LoadInt64(&this.migrationContext.UserCommandedUnpostponeFlag) > 0 { - atomic.StoreInt64(&this.migrationContext.UserCommandedUnpostponeFlag, 0) + if atomic.LoadInt64(&mgtr.migrationContext.UserCommandedUnpostponeFlag) > 0 { + atomic.StoreInt64(&mgtr.migrationContext.UserCommandedUnpostponeFlag, 0) return false, nil } - if base.FileExists(this.migrationContext.PostponeCutOverFlagFile) { + if base.FileExists(mgtr.migrationContext.PostponeCutOverFlagFile) { // Postpone file defined and exists! - if atomic.LoadInt64(&this.migrationContext.IsPostponingCutOver) == 0 { - if err := this.hooksExecutor.onBeginPostponed(); err != nil { + if atomic.LoadInt64(&mgtr.migrationContext.IsPostponingCutOver) == 0 { + if err := mgtr.hooksExecutor.onBeginPostponed(); err != nil { return true, err } } - atomic.StoreInt64(&this.migrationContext.IsPostponingCutOver, 1) + atomic.StoreInt64(&mgtr.migrationContext.IsPostponingCutOver, 1) return true, nil } return false, nil @@ -846,80 +846,80 @@ func (this *Migrator) cutOver() (err error) { ); err != nil { return err } - atomic.StoreInt64(&this.migrationContext.IsPostponingCutOver, 0) - this.migrationContext.MarkPointOfInterest() - this.migrationContext.Log.Debugf("checking for cut-over postpone: complete") + atomic.StoreInt64(&mgtr.migrationContext.IsPostponingCutOver, 0) + mgtr.migrationContext.MarkPointOfInterest() + mgtr.migrationContext.Log.Debugf("checking for cut-over postpone: complete") - if this.migrationContext.TestOnReplica { + if mgtr.migrationContext.TestOnReplica { // With `--test-on-replica` we stop replication thread, and then proceed to use // the same cut-over phase as the master would use. That means we take locks // and swap the tables. // The difference is that we will later swap the tables back. - if err := this.hooksExecutor.onStopReplication(); err != nil { + if err := mgtr.hooksExecutor.onStopReplication(); err != nil { return err } - if this.migrationContext.TestOnReplicaSkipReplicaStop { - this.migrationContext.Log.Warningf("--test-on-replica-skip-replica-stop enabled, we are not stopping replication.") + if mgtr.migrationContext.TestOnReplicaSkipReplicaStop { + mgtr.migrationContext.Log.Warningf("--test-on-replica-skip-replica-stop enabled, we are not stopping replication.") } else { - this.migrationContext.Log.Debugf("testing on replica. Stopping replication IO thread") - if err := this.retryOperation(this.applier.StopReplication); err != nil { + mgtr.migrationContext.Log.Debugf("testing on replica. Stopping replication IO thread") + if err := mgtr.retryOperation(mgtr.applier.StopReplication); err != nil { return err } } } - switch this.migrationContext.CutOverType { + switch mgtr.migrationContext.CutOverType { case base.CutOverAtomic: // Atomic solution: we use low timeout and multiple attempts. But for // each failed attempt, we throttle until replication lag is back to normal - err = this.atomicCutOver() + err = mgtr.atomicCutOver() case base.CutOverTwoStep: - err = this.cutOverTwoStep() + err = mgtr.cutOverTwoStep() default: - return this.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", this.migrationContext.CutOverType) + return mgtr.migrationContext.Log.Fatalf("Unknown cut-over type: %d; should never get here!", mgtr.migrationContext.CutOverType) } - this.handleCutOverResult(err) + mgtr.handleCutOverResult(err) return err } // Inject the "AllEventsUpToLockProcessed" state hint, wait for it to appear in the binary logs, // make sure the queue is drained. -func (this *Migrator) waitForEventsUpToLock() error { - timeout := time.NewTimer(time.Second * time.Duration(this.migrationContext.CutOverLockTimeoutSeconds)) +func (mgtr *Migrator) waitForEventsUpToLock() error { + timeout := time.NewTimer(time.Second * time.Duration(mgtr.migrationContext.CutOverLockTimeoutSeconds)) - this.migrationContext.MarkPointOfInterest() + mgtr.migrationContext.MarkPointOfInterest() waitForEventsUpToLockStartTime := time.Now() allEventsUpToLockProcessedChallenge := fmt.Sprintf("%s:%d", string(AllEventsUpToLockProcessed), waitForEventsUpToLockStartTime.UnixNano()) - this.migrationContext.Log.Infof("Writing changelog state: %+v", allEventsUpToLockProcessedChallenge) - if _, err := this.applier.WriteChangelogState(allEventsUpToLockProcessedChallenge); err != nil { + mgtr.migrationContext.Log.Infof("Writing changelog state: %+v", allEventsUpToLockProcessedChallenge) + if _, err := mgtr.applier.WriteChangelogState(allEventsUpToLockProcessedChallenge); err != nil { return err } - this.migrationContext.Log.Infof("Waiting for events up to lock") - atomic.StoreInt64(&this.migrationContext.AllEventsUpToLockProcessedInjectedFlag, 1) + mgtr.migrationContext.Log.Infof("Waiting for events up to lock") + atomic.StoreInt64(&mgtr.migrationContext.AllEventsUpToLockProcessedInjectedFlag, 1) var lockProcessed *lockProcessedStruct for found := false; !found; { select { case <-timeout.C: { - return this.migrationContext.Log.Errorf("Timeout while waiting for events up to lock") + return mgtr.migrationContext.Log.Errorf("timeout while waiting for events up to lock") } - case lockProcessed = <-this.allEventsUpToLockProcessed: + case lockProcessed = <-mgtr.allEventsUpToLockProcessed: { if lockProcessed.state == allEventsUpToLockProcessedChallenge { - this.migrationContext.Log.Infof("Waiting for events up to lock: got %s", lockProcessed.state) + mgtr.migrationContext.Log.Infof("Waiting for events up to lock: got %s", lockProcessed.state) found = true - this.lastLockProcessed = lockProcessed + mgtr.lastLockProcessed = lockProcessed } else { - this.migrationContext.Log.Infof("Waiting for events up to lock: skipping %s", lockProcessed.state) + mgtr.migrationContext.Log.Infof("Waiting for events up to lock: skipping %s", lockProcessed.state) } } } } waitForEventsUpToLockDuration := time.Since(waitForEventsUpToLockStartTime) - this.migrationContext.Log.Infof("Done waiting for events up to lock; duration=%+v", waitForEventsUpToLockDuration) - this.printStatus(ForcePrintStatusAndHintRule) + mgtr.migrationContext.Log.Infof("Done waiting for events up to lock; duration=%+v", waitForEventsUpToLockDuration) + mgtr.printStatus(ForcePrintStatusAndHintRule) return nil } @@ -928,92 +928,92 @@ func (this *Migrator) waitForEventsUpToLock() error { // what's left of last DML entries, and **non-atomically** swap original->old, then new->original. // There is a point in time where the "original" table does not exist and queries are non-blocked // and failing. -func (this *Migrator) cutOverTwoStep() (err error) { - atomic.StoreInt64(&this.migrationContext.InCutOverCriticalSectionFlag, 1) - defer atomic.StoreInt64(&this.migrationContext.InCutOverCriticalSectionFlag, 0) - atomic.StoreInt64(&this.migrationContext.AllEventsUpToLockProcessedInjectedFlag, 0) +func (mgtr *Migrator) cutOverTwoStep() (err error) { + atomic.StoreInt64(&mgtr.migrationContext.InCutOverCriticalSectionFlag, 1) + defer atomic.StoreInt64(&mgtr.migrationContext.InCutOverCriticalSectionFlag, 0) + atomic.StoreInt64(&mgtr.migrationContext.AllEventsUpToLockProcessedInjectedFlag, 0) - if err := this.retryOperation(this.applier.LockOriginalTable); err != nil { + if err := mgtr.retryOperation(mgtr.applier.LockOriginalTable); err != nil { return err } - if err := this.retryOperation(this.waitForEventsUpToLock); err != nil { + if err := mgtr.retryOperation(mgtr.waitForEventsUpToLock); err != nil { return err } // If we need to create triggers we need to do it here (only create part) - if this.migrationContext.IncludeTriggers && len(this.migrationContext.Triggers) > 0 { - if err := this.retryOperation(this.applier.CreateTriggersOnGhost); err != nil { + if mgtr.migrationContext.IncludeTriggers && len(mgtr.migrationContext.Triggers) > 0 { + if err := mgtr.retryOperation(mgtr.applier.CreateTriggersOnGhost); err != nil { return err } } - if err := this.retryOperation(this.applier.SwapTablesQuickAndBumpy); err != nil { + if err := mgtr.retryOperation(mgtr.applier.SwapTablesQuickAndBumpy); err != nil { return err } - if err := this.retryOperation(this.applier.UnlockTables); err != nil { + if err := mgtr.retryOperation(mgtr.applier.UnlockTables); err != nil { return err } - lockAndRenameDuration := this.migrationContext.RenameTablesEndTime.Sub(this.migrationContext.LockTablesStartTime) - renameDuration := this.migrationContext.RenameTablesEndTime.Sub(this.migrationContext.RenameTablesStartTime) - this.migrationContext.Log.Debugf("Lock & rename duration: %s (rename only: %s). During this time, queries on %s were locked or failing", lockAndRenameDuration, renameDuration, sql.EscapeName(this.migrationContext.OriginalTableName)) + lockAndRenameDuration := mgtr.migrationContext.RenameTablesEndTime.Sub(mgtr.migrationContext.LockTablesStartTime) + renameDuration := mgtr.migrationContext.RenameTablesEndTime.Sub(mgtr.migrationContext.RenameTablesStartTime) + mgtr.migrationContext.Log.Debugf("Lock & rename duration: %s (rename only: %s). During mgtr time, queries on %s were locked or failing", lockAndRenameDuration, renameDuration, sql.EscapeName(mgtr.migrationContext.OriginalTableName)) return nil } // atomicCutOver -func (this *Migrator) atomicCutOver() (err error) { - atomic.StoreInt64(&this.migrationContext.InCutOverCriticalSectionFlag, 1) - defer atomic.StoreInt64(&this.migrationContext.InCutOverCriticalSectionFlag, 0) +func (mgtr *Migrator) atomicCutOver() (err error) { + atomic.StoreInt64(&mgtr.migrationContext.InCutOverCriticalSectionFlag, 1) + defer atomic.StoreInt64(&mgtr.migrationContext.InCutOverCriticalSectionFlag, 0) okToUnlockTable := make(chan bool, 4) defer func() { okToUnlockTable <- true }() - atomic.StoreInt64(&this.migrationContext.AllEventsUpToLockProcessedInjectedFlag, 0) + atomic.StoreInt64(&mgtr.migrationContext.AllEventsUpToLockProcessedInjectedFlag, 0) lockOriginalSessionIdChan := make(chan int64, 2) tableLocked := make(chan error, 2) tableUnlocked := make(chan error, 2) var renameLockSessionId int64 go func() { - if err := this.applier.AtomicCutOverMagicLock(lockOriginalSessionIdChan, tableLocked, okToUnlockTable, tableUnlocked, &renameLockSessionId); err != nil { - this.migrationContext.Log.Errore(err) + if err := mgtr.applier.AtomicCutOverMagicLock(lockOriginalSessionIdChan, tableLocked, okToUnlockTable, tableUnlocked, &renameLockSessionId); err != nil { + mgtr.migrationContext.Log.Errore(err) } }() if err := <-tableLocked; err != nil { - return this.migrationContext.Log.Errore(err) + return mgtr.migrationContext.Log.Errore(err) } lockOriginalSessionId := <-lockOriginalSessionIdChan - this.migrationContext.Log.Infof("Session locking original & magic tables is %+v", lockOriginalSessionId) + mgtr.migrationContext.Log.Infof("Session locking original & magic tables is %+v", lockOriginalSessionId) // At this point we know the original table is locked. // We know any newly incoming DML on original table is blocked. - if err := this.waitForEventsUpToLock(); err != nil { - return this.migrationContext.Log.Errore(err) + if err := mgtr.waitForEventsUpToLock(); err != nil { + return mgtr.migrationContext.Log.Errore(err) } // If we need to create triggers we need to do it here (only create part) - if this.migrationContext.IncludeTriggers && len(this.migrationContext.Triggers) > 0 { - if err := this.applier.CreateTriggersOnGhost(); err != nil { - return this.migrationContext.Log.Errore(err) + if mgtr.migrationContext.IncludeTriggers && len(mgtr.migrationContext.Triggers) > 0 { + if err := mgtr.applier.CreateTriggersOnGhost(); err != nil { + return mgtr.migrationContext.Log.Errore(err) } } // Step 2 // We now attempt an atomic RENAME on original & ghost tables, and expect it to block. - this.migrationContext.RenameTablesStartTime = time.Now() + mgtr.migrationContext.RenameTablesStartTime = time.Now() var tableRenameKnownToHaveFailed int64 renameSessionIdChan := make(chan int64, 2) tablesRenamed := make(chan error, 2) go func() { - if err := this.applier.AtomicCutoverRename(renameSessionIdChan, tablesRenamed); err != nil { + if err := mgtr.applier.AtomicCutoverRename(renameSessionIdChan, tablesRenamed); err != nil { // Abort! Release the lock atomic.StoreInt64(&tableRenameKnownToHaveFailed, 1) okToUnlockTable <- true } }() renameSessionId := <-renameSessionIdChan - this.migrationContext.Log.Infof("Session renaming tables is %+v", renameSessionId) + mgtr.migrationContext.Log.Infof("Session renaming tables is %+v", renameSessionId) waitForRename := func() error { if atomic.LoadInt64(&tableRenameKnownToHaveFailed) == 1 { @@ -1021,22 +1021,22 @@ func (this *Migrator) atomicCutOver() (err error) { // it won't show up in PROCESSLIST, no point in waiting return nil } - return this.applier.ExpectProcess(renameSessionId, "metadata lock", "rename") + return mgtr.applier.ExpectProcess(renameSessionId, "metadata lock", "rename") } // Wait for the RENAME to appear in PROCESSLIST - if err := this.retryOperation(waitForRename, true); err != nil { + if err := mgtr.retryOperation(waitForRename, true); err != nil { // Abort! Release the lock okToUnlockTable <- true return err } if atomic.LoadInt64(&tableRenameKnownToHaveFailed) == 0 { - this.migrationContext.Log.Infof("Found atomic RENAME to be blocking, as expected. Double checking the lock is still in place (though I don't strictly have to)") + mgtr.migrationContext.Log.Infof("Found atomic RENAME to be blocking, as expected. Double checking the lock is still in place (though I don't strictly have to)") } - if err := this.applier.ExpectUsedLock(lockOriginalSessionId); err != nil { + if err := mgtr.applier.ExpectUsedLock(lockOriginalSessionId); err != nil { // Abort operation. Just make sure to drop the magic table. - return this.migrationContext.Log.Errore(err) + return mgtr.migrationContext.Log.Errore(err) } - this.migrationContext.Log.Infof("Connection holding lock on original table still exists") + mgtr.migrationContext.Log.Infof("Connection holding lock on original table still exists") // Now that we've found the RENAME blocking, AND the locking connection still alive, // we know it is safe to proceed to release the lock @@ -1046,33 +1046,33 @@ func (this *Migrator) atomicCutOver() (err error) { // BAM! magic table dropped, original table lock is released // -> RENAME released -> queries on original are unblocked. if err := <-tableUnlocked; err != nil { - return this.migrationContext.Log.Errore(err) + return mgtr.migrationContext.Log.Errore(err) } if err := <-tablesRenamed; err != nil { - return this.migrationContext.Log.Errore(err) + return mgtr.migrationContext.Log.Errore(err) } - this.migrationContext.RenameTablesEndTime = time.Now() + mgtr.migrationContext.RenameTablesEndTime = time.Now() // ooh nice! We're actually truly and thankfully done - lockAndRenameDuration := this.migrationContext.RenameTablesEndTime.Sub(this.migrationContext.LockTablesStartTime) - this.migrationContext.Log.Infof("Lock & rename duration: %s. During this time, queries on %s were blocked", lockAndRenameDuration, sql.EscapeName(this.migrationContext.OriginalTableName)) + lockAndRenameDuration := mgtr.migrationContext.RenameTablesEndTime.Sub(mgtr.migrationContext.LockTablesStartTime) + mgtr.migrationContext.Log.Infof("Lock & rename duration: %s. During mgtr time, queries on %s were blocked", lockAndRenameDuration, sql.EscapeName(mgtr.migrationContext.OriginalTableName)) return nil } // initiateServer begins listening on unix socket/tcp for incoming interactive commands -func (this *Migrator) initiateServer() (err error) { +func (mgtr *Migrator) initiateServer() (err error) { var f printStatusFunc = func(rule PrintStatusRule, writer io.Writer) { - this.printStatus(rule, writer) + mgtr.printStatus(rule, writer) } - this.server = NewServer(this.migrationContext, this.hooksExecutor, f) - if err := this.server.BindSocketFile(); err != nil { + mgtr.server = NewServer(mgtr.migrationContext, mgtr.hooksExecutor, f) + if err := mgtr.server.BindSocketFile(); err != nil { return err } - if err := this.server.BindTCPPort(); err != nil { + if err := mgtr.server.BindTCPPort(); err != nil { return err } - go this.server.Serve() + go mgtr.server.Serve() return nil } @@ -1083,59 +1083,59 @@ func (this *Migrator) initiateServer() (err error) { // - schema validation // - heartbeat // When `--allow-on-master` is supplied, the inspector is actually the master. -func (this *Migrator) initiateInspector() (err error) { - this.inspector = NewInspector(this.migrationContext) - if err := this.inspector.InitDBConnections(); err != nil { +func (mgtr *Migrator) initiateInspector() (err error) { + mgtr.inspector = NewInspector(mgtr.migrationContext) + if err := mgtr.inspector.InitDBConnections(); err != nil { return err } - if err := this.inspector.ValidateOriginalTable(); err != nil { + if err := mgtr.inspector.ValidateOriginalTable(); err != nil { return err } - if err := this.inspector.InspectOriginalTable(); err != nil { + if err := mgtr.inspector.InspectOriginalTable(); err != nil { return err } // So far so good, table is accessible and valid. // Let's get master connection config - if this.migrationContext.AssumeMasterHostname == "" { + if mgtr.migrationContext.AssumeMasterHostname == "" { // No forced master host; detect master - if this.migrationContext.ApplierConnectionConfig, err = this.inspector.getMasterConnectionConfig(); err != nil { + if mgtr.migrationContext.ApplierConnectionConfig, err = mgtr.inspector.getMasterConnectionConfig(); err != nil { return err } - this.migrationContext.Log.Infof("Master found to be %+v", *this.migrationContext.ApplierConnectionConfig.ImpliedKey) + mgtr.migrationContext.Log.Infof("Master found to be %+v", *mgtr.migrationContext.ApplierConnectionConfig.ImpliedKey) } else { // Forced master host. - key, err := mysql.ParseInstanceKey(this.migrationContext.AssumeMasterHostname) + key, err := mysql.ParseInstanceKey(mgtr.migrationContext.AssumeMasterHostname) if err != nil { return err } - this.migrationContext.ApplierConnectionConfig = this.migrationContext.InspectorConnectionConfig.DuplicateCredentials(*key) - if this.migrationContext.CliMasterUser != "" { - this.migrationContext.ApplierConnectionConfig.User = this.migrationContext.CliMasterUser + mgtr.migrationContext.ApplierConnectionConfig = mgtr.migrationContext.InspectorConnectionConfig.DuplicateCredentials(*key) + if mgtr.migrationContext.CliMasterUser != "" { + mgtr.migrationContext.ApplierConnectionConfig.User = mgtr.migrationContext.CliMasterUser } - if this.migrationContext.CliMasterPassword != "" { - this.migrationContext.ApplierConnectionConfig.Password = this.migrationContext.CliMasterPassword + if mgtr.migrationContext.CliMasterPassword != "" { + mgtr.migrationContext.ApplierConnectionConfig.Password = mgtr.migrationContext.CliMasterPassword } - if err := this.migrationContext.ApplierConnectionConfig.RegisterTLSConfig(); err != nil { + if err := mgtr.migrationContext.ApplierConnectionConfig.RegisterTLSConfig(); err != nil { return err } - this.migrationContext.Log.Infof("Master forced to be %+v", *this.migrationContext.ApplierConnectionConfig.ImpliedKey) + mgtr.migrationContext.Log.Infof("Master forced to be %+v", *mgtr.migrationContext.ApplierConnectionConfig.ImpliedKey) } // validate configs - if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica { - if this.migrationContext.InspectorIsAlsoApplier() { - return fmt.Errorf("Instructed to --test-on-replica or --migrate-on-replica, but the server we connect to doesn't seem to be a replica") + if mgtr.migrationContext.TestOnReplica || mgtr.migrationContext.MigrateOnReplica { + if mgtr.migrationContext.InspectorIsAlsoApplier() { + return fmt.Errorf("instructed to --test-on-replica or --migrate-on-replica, but the server we connect to doesn't seem to be a replica") } - this.migrationContext.Log.Infof("--test-on-replica or --migrate-on-replica given. Will not execute on master %+v but rather on replica %+v itself", - *this.migrationContext.ApplierConnectionConfig.ImpliedKey, *this.migrationContext.InspectorConnectionConfig.ImpliedKey, + mgtr.migrationContext.Log.Infof("--test-on-replica or --migrate-on-replica given. Will not execute on master %+v but rather on replica %+v itself", + *mgtr.migrationContext.ApplierConnectionConfig.ImpliedKey, *mgtr.migrationContext.InspectorConnectionConfig.ImpliedKey, ) - this.migrationContext.ApplierConnectionConfig = this.migrationContext.InspectorConnectionConfig.Duplicate() - if this.migrationContext.GetThrottleControlReplicaKeys().Len() == 0 { - this.migrationContext.AddThrottleControlReplicaKey(this.migrationContext.InspectorConnectionConfig.Key) + mgtr.migrationContext.ApplierConnectionConfig = mgtr.migrationContext.InspectorConnectionConfig.Duplicate() + if mgtr.migrationContext.GetThrottleControlReplicaKeys().Len() == 0 { + mgtr.migrationContext.AddThrottleControlReplicaKey(mgtr.migrationContext.InspectorConnectionConfig.Key) } - } else if this.migrationContext.InspectorIsAlsoApplier() && !this.migrationContext.AllowedRunningOnMaster { + } else if mgtr.migrationContext.InspectorIsAlsoApplier() && !mgtr.migrationContext.AllowedRunningOnMaster { return ErrMigrationNotAllowedOnMaster } - if err := this.inspector.validateLogSlaveUpdates(); err != nil { + if err := mgtr.inspector.validateLogSlaveUpdates(); err != nil { return err } @@ -1143,20 +1143,20 @@ func (this *Migrator) initiateInspector() (err error) { } // initiateStatus sets and activates the printStatus() ticker -func (this *Migrator) initiateStatus() { - this.printStatus(ForcePrintStatusAndHintRule) +func (mgtr *Migrator) initiateStatus() { + mgtr.printStatus(ForcePrintStatusAndHintRule) ticker := time.NewTicker(time.Second) defer ticker.Stop() var previousCount int64 for range ticker.C { - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&mgtr.finishedMigrating) > 0 { return } - go this.printStatus(HeuristicPrintStatusRule) - totalCopied := atomic.LoadInt64(&this.migrationContext.TotalRowsCopied) + go mgtr.printStatus(HeuristicPrintStatusRule) + totalCopied := atomic.LoadInt64(&mgtr.migrationContext.TotalRowsCopied) if previousCount > 0 { copiedThisLoop := totalCopied - previousCount - atomic.StoreInt64(&this.migrationContext.EtaRowsPerSecond, copiedThisLoop) + atomic.StoreInt64(&mgtr.migrationContext.EtaRowsPerSecond, copiedThisLoop) } previousCount = totalCopied } @@ -1166,101 +1166,101 @@ func (this *Migrator) initiateStatus() { // to keep in mind; such as the name of migrated table, throttle params etc. // This gets printed at beginning and end of migration, every 10 minutes throughout // migration, and as response to the "status" interactive command. -func (this *Migrator) printMigrationStatusHint(writers ...io.Writer) { +func (mgtr *Migrator) printMigrationStatusHint(writers ...io.Writer) { w := io.MultiWriter(writers...) fmt.Fprintf(w, "# Migrating %s.%s; Ghost table is %s.%s\n", - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.OriginalTableName), - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetGhostTableName()), + sql.EscapeName(mgtr.migrationContext.DatabaseName), + sql.EscapeName(mgtr.migrationContext.OriginalTableName), + sql.EscapeName(mgtr.migrationContext.DatabaseName), + sql.EscapeName(mgtr.migrationContext.GetGhostTableName()), ) fmt.Fprintf(w, "# Migrating %+v; inspecting %+v; executing on %+v\n", - *this.applier.connectionConfig.ImpliedKey, - *this.inspector.connectionConfig.ImpliedKey, - this.migrationContext.Hostname, + *mgtr.applier.connectionConfig.ImpliedKey, + *mgtr.inspector.connectionConfig.ImpliedKey, + mgtr.migrationContext.Hostname, ) fmt.Fprintf(w, "# Migration started at %+v\n", - this.migrationContext.StartTime.Format(time.RubyDate), + mgtr.migrationContext.StartTime.Format(time.RubyDate), ) - maxLoad := this.migrationContext.GetMaxLoad() - criticalLoad := this.migrationContext.GetCriticalLoad() + maxLoad := mgtr.migrationContext.GetMaxLoad() + criticalLoad := mgtr.migrationContext.GetCriticalLoad() fmt.Fprintf(w, "# chunk-size: %+v; max-lag-millis: %+vms; dml-batch-size: %+v; max-load: %s; critical-load: %s; nice-ratio: %f\n", - atomic.LoadInt64(&this.migrationContext.ChunkSize), - atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold), - atomic.LoadInt64(&this.migrationContext.DMLBatchSize), + atomic.LoadInt64(&mgtr.migrationContext.ChunkSize), + atomic.LoadInt64(&mgtr.migrationContext.MaxLagMillisecondsThrottleThreshold), + atomic.LoadInt64(&mgtr.migrationContext.DMLBatchSize), maxLoad.String(), criticalLoad.String(), - this.migrationContext.GetNiceRatio(), + mgtr.migrationContext.GetNiceRatio(), ) - if this.migrationContext.ThrottleFlagFile != "" { + if mgtr.migrationContext.ThrottleFlagFile != "" { setIndicator := "" - if base.FileExists(this.migrationContext.ThrottleFlagFile) { + if base.FileExists(mgtr.migrationContext.ThrottleFlagFile) { setIndicator = "[set]" } fmt.Fprintf(w, "# throttle-flag-file: %+v %+v\n", - this.migrationContext.ThrottleFlagFile, setIndicator, + mgtr.migrationContext.ThrottleFlagFile, setIndicator, ) } - if this.migrationContext.ThrottleAdditionalFlagFile != "" { + if mgtr.migrationContext.ThrottleAdditionalFlagFile != "" { setIndicator := "" - if base.FileExists(this.migrationContext.ThrottleAdditionalFlagFile) { + if base.FileExists(mgtr.migrationContext.ThrottleAdditionalFlagFile) { setIndicator = "[set]" } fmt.Fprintf(w, "# throttle-additional-flag-file: %+v %+v\n", - this.migrationContext.ThrottleAdditionalFlagFile, setIndicator, + mgtr.migrationContext.ThrottleAdditionalFlagFile, setIndicator, ) } - if throttleQuery := this.migrationContext.GetThrottleQuery(); throttleQuery != "" { + if throttleQuery := mgtr.migrationContext.GetThrottleQuery(); throttleQuery != "" { fmt.Fprintf(w, "# throttle-query: %+v\n", throttleQuery, ) } - if throttleControlReplicaKeys := this.migrationContext.GetThrottleControlReplicaKeys(); throttleControlReplicaKeys.Len() > 0 { + if throttleControlReplicaKeys := mgtr.migrationContext.GetThrottleControlReplicaKeys(); throttleControlReplicaKeys.Len() > 0 { fmt.Fprintf(w, "# throttle-control-replicas count: %+v\n", throttleControlReplicaKeys.Len(), ) } - if this.migrationContext.PostponeCutOverFlagFile != "" { + if mgtr.migrationContext.PostponeCutOverFlagFile != "" { setIndicator := "" - if base.FileExists(this.migrationContext.PostponeCutOverFlagFile) { + if base.FileExists(mgtr.migrationContext.PostponeCutOverFlagFile) { setIndicator = "[set]" } fmt.Fprintf(w, "# postpone-cut-over-flag-file: %+v %+v\n", - this.migrationContext.PostponeCutOverFlagFile, setIndicator, + mgtr.migrationContext.PostponeCutOverFlagFile, setIndicator, ) } - if this.migrationContext.PanicFlagFile != "" { + if mgtr.migrationContext.PanicFlagFile != "" { fmt.Fprintf(w, "# panic-flag-file: %+v\n", - this.migrationContext.PanicFlagFile, + mgtr.migrationContext.PanicFlagFile, ) } fmt.Fprintf(w, "# Serving on unix socket: %+v\n", - this.migrationContext.ServeSocketFile, + mgtr.migrationContext.ServeSocketFile, ) - if this.migrationContext.ServeTCPPort != 0 { - fmt.Fprintf(w, "# Serving on TCP port: %+v\n", this.migrationContext.ServeTCPPort) + if mgtr.migrationContext.ServeTCPPort != 0 { + fmt.Fprintf(w, "# Serving on TCP port: %+v\n", mgtr.migrationContext.ServeTCPPort) } } // getProgressPercent returns an estimate of migration progess as a percent. -func (this *Migrator) getProgressPercent(rowsEstimate int64) (progressPct float64) { +func (mgtr *Migrator) getProgressPercent(rowsEstimate int64) (progressPct float64) { progressPct = 100.0 if rowsEstimate > 0 { - progressPct *= float64(this.migrationContext.GetTotalRowsCopied()) / float64(rowsEstimate) + progressPct *= float64(mgtr.migrationContext.GetTotalRowsCopied()) / float64(rowsEstimate) } return progressPct } // getMigrationETA returns the estimated duration of the migration -func (this *Migrator) getMigrationETA(rowsEstimate int64) (eta string, duration time.Duration) { +func (mgtr *Migrator) getMigrationETA(rowsEstimate int64) (eta string, duration time.Duration) { duration = time.Duration(base.ETAUnknown) - progressPct := this.getProgressPercent(rowsEstimate) + progressPct := mgtr.getProgressPercent(rowsEstimate) if progressPct >= 100.0 { duration = 0 } else if progressPct >= 0.1 { - totalRowsCopied := this.migrationContext.GetTotalRowsCopied() - etaRowsPerSecond := atomic.LoadInt64(&this.migrationContext.EtaRowsPerSecond) + totalRowsCopied := mgtr.migrationContext.GetTotalRowsCopied() + etaRowsPerSecond := atomic.LoadInt64(&mgtr.migrationContext.EtaRowsPerSecond) var etaSeconds float64 // If there is data available on our current row-copies-per-second rate, use it. // Otherwise we can fallback to the total elapsed time and extrapolate. @@ -1270,7 +1270,7 @@ func (this *Migrator) getMigrationETA(rowsEstimate int64) (eta string, duration remainingRows := float64(rowsEstimate) - float64(totalRowsCopied) etaSeconds = remainingRows / float64(etaRowsPerSecond) } else { - elapsedRowCopySeconds := this.migrationContext.ElapsedRowCopyTime().Seconds() + elapsedRowCopySeconds := mgtr.migrationContext.ElapsedRowCopyTime().Seconds() totalExpectedSeconds := elapsedRowCopySeconds * float64(rowsEstimate) / float64(totalRowsCopied) etaSeconds = totalExpectedSeconds - elapsedRowCopySeconds } @@ -1294,22 +1294,22 @@ func (this *Migrator) getMigrationETA(rowsEstimate int64) (eta string, duration } // getMigrationStateAndETA returns the state and eta of the migration. -func (this *Migrator) getMigrationStateAndETA(rowsEstimate int64) (state, eta string, etaDuration time.Duration) { - eta, etaDuration = this.getMigrationETA(rowsEstimate) +func (mgtr *Migrator) getMigrationStateAndETA(rowsEstimate int64) (state, eta string, etaDuration time.Duration) { + eta, etaDuration = mgtr.getMigrationETA(rowsEstimate) state = "migrating" - if atomic.LoadInt64(&this.migrationContext.CountingRowsFlag) > 0 && !this.migrationContext.ConcurrentCountTableRows { + if atomic.LoadInt64(&mgtr.migrationContext.CountingRowsFlag) > 0 && !mgtr.migrationContext.ConcurrentCountTableRows { state = "counting rows" - } else if atomic.LoadInt64(&this.migrationContext.IsPostponingCutOver) > 0 { + } else if atomic.LoadInt64(&mgtr.migrationContext.IsPostponingCutOver) > 0 { eta = "due" state = "postponing cut-over" - } else if isThrottled, throttleReason, _ := this.migrationContext.IsThrottled(); isThrottled { + } else if isThrottled, throttleReason, _ := mgtr.migrationContext.IsThrottled(); isThrottled { state = fmt.Sprintf("throttled, %s", throttleReason) } return state, eta, etaDuration } // shouldPrintStatus returns true when the migrator is due to print status info. -func (this *Migrator) shouldPrintStatus(rule PrintStatusRule, elapsedSeconds int64, etaDuration time.Duration) (shouldPrint bool) { +func (mgtr *Migrator) shouldPrintStatus(rule PrintStatusRule, elapsedSeconds int64, etaDuration time.Duration) (shouldPrint bool) { if rule != HeuristicPrintStatusRule { return true } @@ -1323,7 +1323,7 @@ func (this *Migrator) shouldPrintStatus(rule PrintStatusRule, elapsedSeconds int shouldPrint = (elapsedSeconds%5 == 0) } else if elapsedSeconds <= 180 { shouldPrint = (elapsedSeconds%5 == 0) - } else if this.migrationContext.TimeSincePointOfInterest().Seconds() <= 60 { + } else if mgtr.migrationContext.TimeSincePointOfInterest().Seconds() <= 60 { shouldPrint = (elapsedSeconds%5 == 0) } else { shouldPrint = (elapsedSeconds%30 == 0) @@ -1333,7 +1333,7 @@ func (this *Migrator) shouldPrintStatus(rule PrintStatusRule, elapsedSeconds int } // shouldPrintMigrationStatus returns true when the migrator is due to print the migration status hint -func (this *Migrator) shouldPrintMigrationStatusHint(rule PrintStatusRule, elapsedSeconds int64) (shouldPrint bool) { +func (mgtr *Migrator) shouldPrintMigrationStatusHint(rule PrintStatusRule, elapsedSeconds int64) (shouldPrint bool) { if elapsedSeconds%600 == 0 { shouldPrint = true } else if rule == ForcePrintStatusAndHintRule { @@ -1347,54 +1347,54 @@ func (this *Migrator) shouldPrintMigrationStatusHint(rule PrintStatusRule, elaps // `rule` indicates the type of output expected. // By default the status is written to standard output, but other writers can // be used as well. -func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) { +func (mgtr *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) { if rule == NoPrintStatusRule { return } writers = append(writers, os.Stdout) - elapsedTime := this.migrationContext.ElapsedTime() + elapsedTime := mgtr.migrationContext.ElapsedTime() elapsedSeconds := int64(elapsedTime.Seconds()) - totalRowsCopied := this.migrationContext.GetTotalRowsCopied() - rowsEstimate := atomic.LoadInt64(&this.migrationContext.RowsEstimate) + atomic.LoadInt64(&this.migrationContext.RowsDeltaEstimate) - if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 { + totalRowsCopied := mgtr.migrationContext.GetTotalRowsCopied() + rowsEstimate := atomic.LoadInt64(&mgtr.migrationContext.RowsEstimate) + atomic.LoadInt64(&mgtr.migrationContext.RowsDeltaEstimate) + if atomic.LoadInt64(&mgtr.rowCopyCompleteFlag) == 1 { // Done copying rows. The totalRowsCopied value is the de-facto number of rows, // and there is no further need to keep updating the value. rowsEstimate = totalRowsCopied } // we take the opportunity to update migration context with progressPct - progressPct := this.getProgressPercent(rowsEstimate) - this.migrationContext.SetProgressPct(progressPct) + progressPct := mgtr.getProgressPercent(rowsEstimate) + mgtr.migrationContext.SetProgressPct(progressPct) // Before status, let's see if we should print a nice reminder for what exactly we're doing here. - if this.shouldPrintMigrationStatusHint(rule, elapsedSeconds) { - this.printMigrationStatusHint(writers...) + if mgtr.shouldPrintMigrationStatusHint(rule, elapsedSeconds) { + mgtr.printMigrationStatusHint(writers...) } // Get state + ETA - state, eta, etaDuration := this.getMigrationStateAndETA(rowsEstimate) - this.migrationContext.SetETADuration(etaDuration) + state, eta, etaDuration := mgtr.getMigrationStateAndETA(rowsEstimate) + mgtr.migrationContext.SetETADuration(etaDuration) - if !this.shouldPrintStatus(rule, elapsedSeconds, etaDuration) { + if !mgtr.shouldPrintStatus(rule, elapsedSeconds, etaDuration) { return } - currentBinlogCoordinates := this.eventsStreamer.GetCurrentBinlogCoordinates() + currentBinlogCoordinates := mgtr.eventsStreamer.GetCurrentBinlogCoordinates() status := fmt.Sprintf("Copy: %d/%d %.1f%%; Applied: %d; Backlog: %d/%d; Time: %+v(total), %+v(copy); streamer: %+v; Lag: %.2fs, HeartbeatLag: %.2fs, State: %s; ETA: %s", totalRowsCopied, rowsEstimate, progressPct, - atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied), - len(this.applyEventsQueue), cap(this.applyEventsQueue), - base.PrettifyDurationOutput(elapsedTime), base.PrettifyDurationOutput(this.migrationContext.ElapsedRowCopyTime()), + atomic.LoadInt64(&mgtr.migrationContext.TotalDMLEventsApplied), + len(mgtr.applyEventsQueue), cap(mgtr.applyEventsQueue), + base.PrettifyDurationOutput(elapsedTime), base.PrettifyDurationOutput(mgtr.migrationContext.ElapsedRowCopyTime()), currentBinlogCoordinates.DisplayString(), - this.migrationContext.GetCurrentLagDuration().Seconds(), - this.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds(), + mgtr.migrationContext.GetCurrentLagDuration().Seconds(), + mgtr.migrationContext.TimeSinceLastHeartbeatOnChangelog().Seconds(), state, eta, ) - this.applier.WriteChangelog( - fmt.Sprintf("copy iteration %d at %d", this.migrationContext.GetIteration(), time.Now().Unix()), + mgtr.applier.WriteChangelog( + fmt.Sprintf("copy iteration %d at %d", mgtr.migrationContext.GetIteration(), time.Now().Unix()), state, ) w := io.MultiWriter(writers...) @@ -1406,47 +1406,47 @@ func (this *Migrator) printStatus(rule PrintStatusRule, writers ...io.Writer) { // fmt.Sprintf. So, the argument of every function called on the DefaultLogger object // migrationContext.Log will eventually pass through fmt.Sprintf, and thus the '%' character // needs to be escaped. - this.migrationContext.Log.Info(strings.Replace(status, "%", "%%", 1)) + mgtr.migrationContext.Log.Info(strings.Replace(status, "%", "%%", 1)) - hooksStatusIntervalSec := this.migrationContext.HooksStatusIntervalSec + hooksStatusIntervalSec := mgtr.migrationContext.HooksStatusIntervalSec if hooksStatusIntervalSec > 0 && elapsedSeconds%hooksStatusIntervalSec == 0 { - this.hooksExecutor.onStatus(status) + mgtr.hooksExecutor.onStatus(status) } } // initiateStreaming begins streaming of binary log events and registers listeners for such events -func (this *Migrator) initiateStreaming() error { - this.eventsStreamer = NewEventsStreamer(this.migrationContext) - if err := this.eventsStreamer.InitDBConnections(); err != nil { +func (mgtr *Migrator) initiateStreaming() error { + mgtr.eventsStreamer = NewEventsStreamer(mgtr.migrationContext) + if err := mgtr.eventsStreamer.InitDBConnections(); err != nil { return err } - this.eventsStreamer.AddListener( + mgtr.eventsStreamer.AddListener( false, - this.migrationContext.DatabaseName, - this.migrationContext.GetChangelogTableName(), + mgtr.migrationContext.DatabaseName, + mgtr.migrationContext.GetChangelogTableName(), func(dmlEntry *binlog.BinlogEntry) error { - return this.onChangelogEvent(dmlEntry) + return mgtr.onChangelogEvent(dmlEntry) }, ) go func() { - this.migrationContext.Log.Debugf("Beginning streaming") - err := this.eventsStreamer.StreamEvents(this.canStopStreaming) + mgtr.migrationContext.Log.Debugf("Beginning streaming") + err := mgtr.eventsStreamer.StreamEvents(mgtr.canStopStreaming) if err != nil { // Use helper to prevent deadlock if listenOnPanicAbort already exited - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, err) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.migrationContext.PanicAbort, err) } - this.migrationContext.Log.Debugf("Done streaming") + mgtr.migrationContext.Log.Debugf("Done streaming") }() go func() { ticker := time.NewTicker(time.Second) defer ticker.Stop() for range ticker.C { - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&mgtr.finishedMigrating) > 0 { return } - this.migrationContext.SetRecentBinlogCoordinates(this.eventsStreamer.GetCurrentBinlogCoordinates()) + mgtr.migrationContext.SetRecentBinlogCoordinates(mgtr.eventsStreamer.GetCurrentBinlogCoordinates()) } }() return nil @@ -1454,127 +1454,127 @@ func (this *Migrator) initiateStreaming() error { // addDMLEventsListener begins listening for binlog events on the original table, // and creates & enqueues a write task per such event. -func (this *Migrator) addDMLEventsListener() error { - err := this.eventsStreamer.AddListener( +func (mgtr *Migrator) addDMLEventsListener() error { + err := mgtr.eventsStreamer.AddListener( false, - this.migrationContext.DatabaseName, - this.migrationContext.OriginalTableName, + mgtr.migrationContext.DatabaseName, + mgtr.migrationContext.OriginalTableName, func(dmlEntry *binlog.BinlogEntry) error { // Use helper to prevent deadlock if buffer fills and executeWriteFuncs exits // This is critical because this callback blocks the event streamer - return base.SendWithContext(this.migrationContext.GetContext(), this.applyEventsQueue, newApplyEventStructByDML(dmlEntry)) + return base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.applyEventsQueue, newApplyEventStructByDML(dmlEntry)) }, ) return err } // initiateThrottler kicks in the throttling collection and the throttling checks. -func (this *Migrator) initiateThrottler() { - this.throttler = NewThrottler(this.migrationContext, this.applier, this.inspector, this.appVersion) - - go this.throttler.initiateThrottlerCollection(this.firstThrottlingCollected) - this.migrationContext.Log.Infof("Waiting for first throttle metrics to be collected") - <-this.firstThrottlingCollected // replication lag - <-this.firstThrottlingCollected // HTTP status - <-this.firstThrottlingCollected // other, general metrics - this.migrationContext.Log.Infof("First throttle metrics collected") - go this.throttler.initiateThrottlerChecks() +func (mgtr *Migrator) initiateThrottler() { + mgtr.throttler = NewThrottler(mgtr.migrationContext, mgtr.applier, mgtr.inspector, mgtr.appVersion) + + go mgtr.throttler.initiateThrottlerCollection(mgtr.firstThrottlingCollected) + mgtr.migrationContext.Log.Infof("Waiting for first throttle metrics to be collected") + <-mgtr.firstThrottlingCollected // replication lag + <-mgtr.firstThrottlingCollected // HTTP status + <-mgtr.firstThrottlingCollected // other, general metrics + mgtr.migrationContext.Log.Infof("First throttle metrics collected") + go mgtr.throttler.initiateThrottlerChecks() } -func (this *Migrator) initiateApplier() error { - this.applier = NewApplier(this.migrationContext) - if err := this.applier.InitDBConnections(); err != nil { +func (mgtr *Migrator) initiateApplier() error { + mgtr.applier = NewApplier(mgtr.migrationContext) + if err := mgtr.applier.InitDBConnections(); err != nil { return err } - if this.migrationContext.Revert { - if err := this.applier.CreateChangelogTable(); err != nil { - this.migrationContext.Log.Errorf("Unable to create changelog table, see further error details. Perhaps a previous migration failed without dropping the table? OR is there a running migration? Bailing out") + if mgtr.migrationContext.Revert { + if err := mgtr.applier.CreateChangelogTable(); err != nil { + mgtr.migrationContext.Log.Errorf("unable to create changelog table, see further error details. Perhaps a previous migration failed without dropping the table? OR is there a running migration? Bailing out") return err } - } else if !this.migrationContext.Resume { - if err := this.applier.ValidateOrDropExistingTables(); err != nil { + } else if !mgtr.migrationContext.Resume { + if err := mgtr.applier.ValidateOrDropExistingTables(); err != nil { return err } - if err := this.applier.CreateChangelogTable(); err != nil { - this.migrationContext.Log.Errorf("Unable to create changelog table, see further error details. Perhaps a previous migration failed without dropping the table? OR is there a running migration? Bailing out") + if err := mgtr.applier.CreateChangelogTable(); err != nil { + mgtr.migrationContext.Log.Errorf("unable to create changelog table, see further error details. Perhaps a previous migration failed without dropping the table? OR is there a running migration? Bailing out") return err } - if err := this.applier.CreateGhostTable(); err != nil { - this.migrationContext.Log.Errorf("Unable to create ghost table, see further error details. Perhaps a previous migration failed without dropping the table? Bailing out") + if err := mgtr.applier.CreateGhostTable(); err != nil { + mgtr.migrationContext.Log.Errorf("unable to create ghost table, see further error details. Perhaps a previous migration failed without dropping the table? Bailing out") return err } - if err := this.applier.AlterGhost(); err != nil { - this.migrationContext.Log.Errorf("Unable to ALTER ghost table, see further error details. Bailing out") + if err := mgtr.applier.AlterGhost(); err != nil { + mgtr.migrationContext.Log.Errorf("unable to ALTER ghost table, see further error details. Bailing out") return err } - if this.migrationContext.OriginalTableAutoIncrement > 0 && !this.parser.IsAutoIncrementDefined() { + if mgtr.migrationContext.OriginalTableAutoIncrement > 0 && !mgtr.parser.IsAutoIncrementDefined() { // Original table has AUTO_INCREMENT value and the -alter statement does not indicate any override, // so we should copy AUTO_INCREMENT value onto our ghost table. - if err := this.applier.AlterGhostAutoIncrement(); err != nil { - this.migrationContext.Log.Errorf("Unable to ALTER ghost table AUTO_INCREMENT value, see further error details. Bailing out") + if err := mgtr.applier.AlterGhostAutoIncrement(); err != nil { + mgtr.migrationContext.Log.Errorf("unable to ALTER ghost table AUTO_INCREMENT value, see further error details. Bailing out") return err } } - if _, err := this.applier.WriteChangelogState(string(GhostTableMigrated)); err != nil { + if _, err := mgtr.applier.WriteChangelogState(string(GhostTableMigrated)); err != nil { return err } } // ensure performance_schema.metadata_locks is available. - if err := this.applier.StateMetadataLockInstrument(); err != nil { - this.migrationContext.Log.Warning("Unable to enable metadata lock instrument, see further error details.") + if err := mgtr.applier.StateMetadataLockInstrument(); err != nil { + mgtr.migrationContext.Log.Warning("unable to enable metadata lock instrument, see further error details") } - if !this.migrationContext.IsOpenMetadataLockInstruments { - if !this.migrationContext.SkipMetadataLockCheck { - return this.migrationContext.Log.Errorf("Bailing out because metadata lock instrument not enabled. Use --skip-metadata-lock-check if you wish to proceed without. See https://github.com/github/gh-ost/pull/1536 for details.") + if !mgtr.migrationContext.IsOpenMetadataLockInstruments { + if !mgtr.migrationContext.SkipMetadataLockCheck { + return mgtr.migrationContext.Log.Errorf("bailing out because metadata lock instrument not enabled. Use --skip-metadata-lock-check if you wish to proceed without. See https://github.com/github/gh-ost/pull/1536 for details") } - this.migrationContext.Log.Warning("Proceeding without metadata lock check. There is a small chance of data loss if another session accesses the ghost table during cut-over. See https://github.com/github/gh-ost/pull/1536 for details.") + mgtr.migrationContext.Log.Warning("proceeding without metadata lock check. There is a small chance of data loss if another session accesses the ghost table during cut-over. See https://github.com/github/gh-ost/pull/1536 for details") } - go this.applier.InitiateHeartbeat() + go mgtr.applier.InitiateHeartbeat() return nil } // iterateChunks iterates the existing table rows, and generates a copy task of // a chunk of rows onto the ghost table. -func (this *Migrator) iterateChunks() error { +func (mgtr *Migrator) iterateChunks() error { terminateRowIteration := func(err error) error { - _ = base.SendWithContext(this.migrationContext.GetContext(), this.rowCopyComplete, err) - return this.migrationContext.Log.Errore(err) + _ = base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.rowCopyComplete, err) + return mgtr.migrationContext.Log.Errore(err) } - if this.migrationContext.Noop { - this.migrationContext.Log.Debugf("Noop operation; not really copying data") + if mgtr.migrationContext.Noop { + mgtr.migrationContext.Log.Debugf("Noop operation; not really copying data") return terminateRowIteration(nil) } - if this.migrationContext.MigrationRangeMinValues == nil { - this.migrationContext.Log.Debugf("No rows found in table. Rowcopy will be implicitly empty") + if mgtr.migrationContext.MigrationRangeMinValues == nil { + mgtr.migrationContext.Log.Debugf("No rows found in table. Rowcopy will be implicitly empty") return terminateRowIteration(nil) } var hasNoFurtherRangeFlag int64 // Iterate per chunk: for { - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return terminateRowIteration(err) } - if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 { + if atomic.LoadInt64(&mgtr.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 { // Done // There's another such check down the line return nil } copyRowsFunc := func() error { - this.migrationContext.SetNextIterationRangeMinValues() + mgtr.migrationContext.SetNextIterationRangeMinValues() // Copy task: applyCopyRowsFunc := func() error { - if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 { + if atomic.LoadInt64(&mgtr.rowCopyCompleteFlag) == 1 || atomic.LoadInt64(&hasNoFurtherRangeFlag) == 1 { // Done. // There's another such check down the line return nil } // When hasFurtherRange is false, original table might be write locked and CalculateNextIterationRangeEndValues would hangs forever - hasFurtherRange, err := this.applier.CalculateNextIterationRangeEndValues() + hasFurtherRange, err := mgtr.applier.CalculateNextIterationRangeEndValues() if err != nil { return err // wrapping call will retry } @@ -1582,7 +1582,7 @@ func (this *Migrator) iterateChunks() error { atomic.StoreInt64(&hasNoFurtherRangeFlag, 1) return terminateRowIteration(nil) } - if atomic.LoadInt64(&this.rowCopyCompleteFlag) == 1 { + if atomic.LoadInt64(&mgtr.rowCopyCompleteFlag) == 1 { // No need for more writes. // This is the de-facto place where we avoid writing in the event of completed cut-over. // There could _still_ be a race condition, but that's as close as we can get. @@ -1593,44 +1593,44 @@ func (this *Migrator) iterateChunks() error { // _ghost_ table, which no longer exists. So, bothering error messages and all, but no damage. return nil } - _, rowsAffected, _, err := this.applier.ApplyIterationInsertQuery() + _, rowsAffected, _, err := mgtr.applier.ApplyIterationInsertQuery() if err != nil { return err // wrapping call will retry } - if this.migrationContext.PanicOnWarnings { - if len(this.migrationContext.MigrationLastInsertSQLWarnings) > 0 { - for _, warning := range this.migrationContext.MigrationLastInsertSQLWarnings { - this.migrationContext.Log.Infof("ApplyIterationInsertQuery has SQL warnings! %s", warning) + if mgtr.migrationContext.PanicOnWarnings { + if len(mgtr.migrationContext.MigrationLastInsertSQLWarnings) > 0 { + for _, warning := range mgtr.migrationContext.MigrationLastInsertSQLWarnings { + mgtr.migrationContext.Log.Infof("ApplyIterationInsertQuery has SQL warnings! %s", warning) } - joinedWarnings := strings.Join(this.migrationContext.MigrationLastInsertSQLWarnings, "; ") + joinedWarnings := strings.Join(mgtr.migrationContext.MigrationLastInsertSQLWarnings, "; ") return terminateRowIteration(fmt.Errorf("ApplyIterationInsertQuery failed because of SQL warnings: [%s]", joinedWarnings)) } } - atomic.AddInt64(&this.migrationContext.TotalRowsCopied, rowsAffected) - atomic.AddInt64(&this.migrationContext.Iteration, 1) + atomic.AddInt64(&mgtr.migrationContext.TotalRowsCopied, rowsAffected) + atomic.AddInt64(&mgtr.migrationContext.Iteration, 1) return nil } - if err := this.retryBatchCopyWithHooks(applyCopyRowsFunc); err != nil { + if err := mgtr.retryBatchCopyWithHooks(applyCopyRowsFunc); err != nil { return terminateRowIteration(err) } // record last successfully copied range - this.applier.LastIterationRangeMutex.Lock() - if this.migrationContext.MigrationIterationRangeMinValues != nil && this.migrationContext.MigrationIterationRangeMaxValues != nil { - this.applier.LastIterationRangeMinValues = this.migrationContext.MigrationIterationRangeMinValues.Clone() - this.applier.LastIterationRangeMaxValues = this.migrationContext.MigrationIterationRangeMaxValues.Clone() + mgtr.applier.LastIterationRangeMutex.Lock() + if mgtr.migrationContext.MigrationIterationRangeMinValues != nil && mgtr.migrationContext.MigrationIterationRangeMaxValues != nil { + mgtr.applier.LastIterationRangeMinValues = mgtr.migrationContext.MigrationIterationRangeMinValues.Clone() + mgtr.applier.LastIterationRangeMaxValues = mgtr.migrationContext.MigrationIterationRangeMaxValues.Clone() } - this.applier.LastIterationRangeMutex.Unlock() + mgtr.applier.LastIterationRangeMutex.Unlock() return nil } // Enqueue copy operation; to be executed by executeWriteFuncs() // Use helper to prevent deadlock if executeWriteFuncs exits - if err := base.SendWithContext(this.migrationContext.GetContext(), this.copyRowsQueue, copyRowsFunc); err != nil { + if err := base.SendWithContext(mgtr.migrationContext.GetContext(), mgtr.copyRowsQueue, copyRowsFunc); err != nil { // Context cancelled, check for abort and exit - if abortErr := this.checkAbort(); abortErr != nil { + if abortErr := mgtr.checkAbort(); abortErr != nil { return terminateRowIteration(abortErr) } return terminateRowIteration(err) @@ -1638,11 +1638,11 @@ func (this *Migrator) iterateChunks() error { } } -func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error { +func (mgtr *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error { handleNonDMLEventStruct := func(eventStruct *applyEventStruct) error { if eventStruct.writeFunc != nil { - if err := this.retryOperation(*eventStruct.writeFunc); err != nil { - return this.migrationContext.Log.Errore(err) + if err := mgtr.retryOperation(*eventStruct.writeFunc); err != nil { + return mgtr.migrationContext.Log.Errore(err) } } return nil @@ -1655,15 +1655,15 @@ func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error { dmlEvents = append(dmlEvents, eventStruct.dmlEvent) var nonDmlStructToApply *applyEventStruct - availableEvents := len(this.applyEventsQueue) - batchSize := int(atomic.LoadInt64(&this.migrationContext.DMLBatchSize)) + availableEvents := len(mgtr.applyEventsQueue) + batchSize := int(atomic.LoadInt64(&mgtr.migrationContext.DMLBatchSize)) if availableEvents > batchSize-1 { // The "- 1" is because we already consumed one event: the original event that led to this function getting called. // So, if DMLBatchSize==1 we wish to not process any further events availableEvents = batchSize - 1 } for i := 0; i < availableEvents; i++ { - additionalStruct := <-this.applyEventsQueue + additionalStruct := <-mgtr.applyEventsQueue if additionalStruct.dmlEvent == nil { // Not a DML. We don't group this, and we don't batch any further nonDmlStructToApply = additionalStruct @@ -1673,21 +1673,21 @@ func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error { } // Create a task to apply the DML event; this will be execute by executeWriteFuncs() var applyEventFunc tableWriteFunc = func() error { - return this.applier.ApplyDMLEventQueries(dmlEvents) + return mgtr.applier.ApplyDMLEventQueries(dmlEvents) } - if err := this.retryOperation(applyEventFunc); err != nil { - return this.migrationContext.Log.Errore(err) + if err := mgtr.retryOperation(applyEventFunc); err != nil { + return mgtr.migrationContext.Log.Errore(err) } // update applier coordinates - this.applier.CurrentCoordinatesMutex.Lock() - this.applier.CurrentCoordinates = eventStruct.coords - this.applier.CurrentCoordinatesMutex.Unlock() + mgtr.applier.CurrentCoordinatesMutex.Lock() + mgtr.applier.CurrentCoordinates = eventStruct.coords + mgtr.applier.CurrentCoordinatesMutex.Unlock() if nonDmlStructToApply != nil { // We pulled DML events from the queue, and then we hit a non-DML event. Wait! // We need to handle it! if err := handleNonDMLEventStruct(nonDmlStructToApply); err != nil { - return this.migrationContext.Log.Errore(err) + return mgtr.migrationContext.Log.Errore(err) } } } @@ -1697,93 +1697,93 @@ func (this *Migrator) onApplyEventStruct(eventStruct *applyEventStruct) error { // Checkpoint attempts to write a checkpoint of the Migrator's current state. // It gets the binlog coordinates of the last received trx and waits until the // applier reaches that trx. At that point it's safe to resume from these coordinates. -func (this *Migrator) Checkpoint(ctx context.Context) (*Checkpoint, error) { - coords := this.eventsStreamer.GetCurrentBinlogCoordinates() - this.applier.LastIterationRangeMutex.Lock() - if this.applier.LastIterationRangeMaxValues == nil || this.applier.LastIterationRangeMinValues == nil { - this.applier.LastIterationRangeMutex.Unlock() - return nil, errors.New("iteration range is empty, not checkpointing...") +func (mgtr *Migrator) Checkpoint(ctx context.Context) (*Checkpoint, error) { + coords := mgtr.eventsStreamer.GetCurrentBinlogCoordinates() + mgtr.applier.LastIterationRangeMutex.Lock() + if mgtr.applier.LastIterationRangeMaxValues == nil || mgtr.applier.LastIterationRangeMinValues == nil { + mgtr.applier.LastIterationRangeMutex.Unlock() + return nil, errors.New("iteration range is empty, not checkpointing") } chk := &Checkpoint{ - Iteration: this.migrationContext.GetIteration(), - IterationRangeMin: this.applier.LastIterationRangeMinValues.Clone(), - IterationRangeMax: this.applier.LastIterationRangeMaxValues.Clone(), + Iteration: mgtr.migrationContext.GetIteration(), + IterationRangeMin: mgtr.applier.LastIterationRangeMinValues.Clone(), + IterationRangeMax: mgtr.applier.LastIterationRangeMaxValues.Clone(), LastTrxCoords: coords, - RowsCopied: atomic.LoadInt64(&this.migrationContext.TotalRowsCopied), - DMLApplied: atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied), + RowsCopied: atomic.LoadInt64(&mgtr.migrationContext.TotalRowsCopied), + DMLApplied: atomic.LoadInt64(&mgtr.migrationContext.TotalDMLEventsApplied), } - this.applier.LastIterationRangeMutex.Unlock() + mgtr.applier.LastIterationRangeMutex.Unlock() for { if err := ctx.Err(); err != nil { return nil, err } - this.applier.CurrentCoordinatesMutex.Lock() - if coords.SmallerThanOrEquals(this.applier.CurrentCoordinates) { - id, err := this.applier.WriteCheckpoint(chk) + mgtr.applier.CurrentCoordinatesMutex.Lock() + if coords.SmallerThanOrEquals(mgtr.applier.CurrentCoordinates) { + id, err := mgtr.applier.WriteCheckpoint(chk) chk.Id = id - this.applier.CurrentCoordinatesMutex.Unlock() + mgtr.applier.CurrentCoordinatesMutex.Unlock() return chk, err } - this.applier.CurrentCoordinatesMutex.Unlock() + mgtr.applier.CurrentCoordinatesMutex.Unlock() time.Sleep(500 * time.Millisecond) } } // CheckpointAfterCutOver writes a final checkpoint after the cutover completes successfully. -func (this *Migrator) CheckpointAfterCutOver() (*Checkpoint, error) { - if this.lastLockProcessed == nil || this.lastLockProcessed.coords.IsEmpty() { - return nil, this.migrationContext.Log.Errorf("lastLockProcessed coords are empty") +func (mgtr *Migrator) CheckpointAfterCutOver() (*Checkpoint, error) { + if mgtr.lastLockProcessed == nil || mgtr.lastLockProcessed.coords.IsEmpty() { + return nil, mgtr.migrationContext.Log.Errorf("lastLockProcessed coords are empty") } chk := &Checkpoint{ IsCutover: true, - LastTrxCoords: this.lastLockProcessed.coords, - IterationRangeMin: sql.NewColumnValues(this.migrationContext.UniqueKey.Len()), - IterationRangeMax: sql.NewColumnValues(this.migrationContext.UniqueKey.Len()), - Iteration: this.migrationContext.GetIteration(), - RowsCopied: atomic.LoadInt64(&this.migrationContext.TotalRowsCopied), - DMLApplied: atomic.LoadInt64(&this.migrationContext.TotalDMLEventsApplied), + LastTrxCoords: mgtr.lastLockProcessed.coords, + IterationRangeMin: sql.NewColumnValues(mgtr.migrationContext.UniqueKey.Len()), + IterationRangeMax: sql.NewColumnValues(mgtr.migrationContext.UniqueKey.Len()), + Iteration: mgtr.migrationContext.GetIteration(), + RowsCopied: atomic.LoadInt64(&mgtr.migrationContext.TotalRowsCopied), + DMLApplied: atomic.LoadInt64(&mgtr.migrationContext.TotalDMLEventsApplied), } - this.applier.LastIterationRangeMutex.Lock() - if this.applier.LastIterationRangeMinValues != nil { - chk.IterationRangeMin = this.applier.LastIterationRangeMinValues.Clone() + mgtr.applier.LastIterationRangeMutex.Lock() + if mgtr.applier.LastIterationRangeMinValues != nil { + chk.IterationRangeMin = mgtr.applier.LastIterationRangeMinValues.Clone() } - if this.applier.LastIterationRangeMaxValues != nil { - chk.IterationRangeMax = this.applier.LastIterationRangeMaxValues.Clone() + if mgtr.applier.LastIterationRangeMaxValues != nil { + chk.IterationRangeMax = mgtr.applier.LastIterationRangeMaxValues.Clone() } - this.applier.LastIterationRangeMutex.Unlock() + mgtr.applier.LastIterationRangeMutex.Unlock() - id, err := this.applier.WriteCheckpoint(chk) + id, err := mgtr.applier.WriteCheckpoint(chk) chk.Id = id return chk, err } -func (this *Migrator) checkpointLoop() { - if this.migrationContext.Noop { - this.migrationContext.Log.Debugf("Noop operation; not really checkpointing") +func (mgtr *Migrator) checkpointLoop() { + if mgtr.migrationContext.Noop { + mgtr.migrationContext.Log.Debugf("Noop operation; not really checkpointing") return } - checkpointInterval := time.Duration(this.migrationContext.CheckpointIntervalSeconds) * time.Second + checkpointInterval := time.Duration(mgtr.migrationContext.CheckpointIntervalSeconds) * time.Second ticker := time.NewTicker(checkpointInterval) for t := range ticker.C { - if atomic.LoadInt64(&this.finishedMigrating) > 0 || atomic.LoadInt64(&this.migrationContext.CutOverCompleteFlag) > 0 { + if atomic.LoadInt64(&mgtr.finishedMigrating) > 0 || atomic.LoadInt64(&mgtr.migrationContext.CutOverCompleteFlag) > 0 { return } - if atomic.LoadInt64(&this.migrationContext.InCutOverCriticalSectionFlag) > 0 { + if atomic.LoadInt64(&mgtr.migrationContext.InCutOverCriticalSectionFlag) > 0 { continue } - this.migrationContext.Log.Infof("starting checkpoint at %+v", t) + mgtr.migrationContext.Log.Infof("starting checkpoint at %+v", t) ctx, cancel := context.WithTimeout(context.Background(), checkpointTimeout) - chk, err := this.Checkpoint(ctx) + chk, err := mgtr.Checkpoint(ctx) if err != nil { if errors.Is(err, context.DeadlineExceeded) { - this.migrationContext.Log.Errorf("checkpoint attempt timed out after %+v", checkpointTimeout) + mgtr.migrationContext.Log.Errorf("checkpoint attempt timed out after %+v", checkpointTimeout) } else { - this.migrationContext.Log.Errorf("error attempting checkpoint: %+v", err) + mgtr.migrationContext.Log.Errorf("error attempting checkpoint: %+v", err) } } else { - this.migrationContext.Log.Infof("checkpoint success at coords=%+v range_min=%+v range_max=%+v iteration=%d", + mgtr.migrationContext.Log.Infof("checkpoint success at coords=%+v range_min=%+v range_max=%+v iteration=%d", chk.LastTrxCoords.DisplayString(), chk.IterationRangeMin.String(), chk.IterationRangeMax.String(), chk.Iteration) } cancel() @@ -1793,41 +1793,41 @@ func (this *Migrator) checkpointLoop() { // executeWriteFuncs writes data via applier: both the rowcopy and the events backlog. // This is where the ghost table gets the data. The function fills the data single-threaded. // Both event backlog and rowcopy events are polled; the backlog events have precedence. -func (this *Migrator) executeWriteFuncs() error { - if this.migrationContext.Noop { - this.migrationContext.Log.Debugf("Noop operation; not really executing write funcs") +func (mgtr *Migrator) executeWriteFuncs() error { + if mgtr.migrationContext.Noop { + mgtr.migrationContext.Log.Debugf("Noop operation; not really executing write funcs") return nil } for { - if err := this.checkAbort(); err != nil { + if err := mgtr.checkAbort(); err != nil { return err } - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&mgtr.finishedMigrating) > 0 { return nil } - this.throttler.throttle(nil) + mgtr.throttler.throttle(nil) // We give higher priority to event processing, then secondary priority to // rowcopy select { - case eventStruct := <-this.applyEventsQueue: + case eventStruct := <-mgtr.applyEventsQueue: { - if err := this.onApplyEventStruct(eventStruct); err != nil { + if err := mgtr.onApplyEventStruct(eventStruct); err != nil { return err } } default: { select { - case copyRowsFunc := <-this.copyRowsQueue: + case copyRowsFunc := <-mgtr.copyRowsQueue: { copyRowsStartTime := time.Now() // Retries are handled within the copyRowsFunc if err := copyRowsFunc(); err != nil { - return this.migrationContext.Log.Errore(err) + return mgtr.migrationContext.Log.Errore(err) } - if niceRatio := this.migrationContext.GetNiceRatio(); niceRatio > 0 { + if niceRatio := mgtr.migrationContext.GetNiceRatio(); niceRatio > 0 { copyRowsDuration := time.Since(copyRowsStartTime) sleepTimeNanosecondFloat64 := niceRatio * float64(copyRowsDuration.Nanoseconds()) sleepTime := time.Duration(int64(sleepTimeNanosecondFloat64)) * time.Nanosecond @@ -1838,7 +1838,7 @@ func (this *Migrator) executeWriteFuncs() error { { // Hmmmmm... nothing in the queue; no events, but also no row copy. // This is possible upon load. Let's just sleep it over. - this.migrationContext.Log.Debugf("Getting nothing in the write queue. Sleeping...") + mgtr.migrationContext.Log.Debugf("Getting nothing in the write queue. Sleeping...") time.Sleep(time.Second) } } @@ -1847,22 +1847,22 @@ func (this *Migrator) executeWriteFuncs() error { } } -func (this *Migrator) executeDMLWriteFuncs() error { - if this.migrationContext.Noop { - this.migrationContext.Log.Debugf("Noop operation; not really executing DML write funcs") +func (mgtr *Migrator) executeDMLWriteFuncs() error { + if mgtr.migrationContext.Noop { + mgtr.migrationContext.Log.Debugf("Noop operation; not really executing DML write funcs") return nil } for { - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&mgtr.finishedMigrating) > 0 { return nil } - this.throttler.throttle(nil) + mgtr.throttler.throttle(nil) select { - case eventStruct := <-this.applyEventsQueue: + case eventStruct := <-mgtr.applyEventsQueue: { - if err := this.onApplyEventStruct(eventStruct); err != nil { + if err := mgtr.onApplyEventStruct(eventStruct); err != nil { return err } } @@ -1873,46 +1873,46 @@ func (this *Migrator) executeDMLWriteFuncs() error { } // finalCleanup takes actions at very end of migration, dropping tables etc. -func (this *Migrator) finalCleanup() error { - atomic.StoreInt64(&this.migrationContext.CleanupImminentFlag, 1) +func (mgtr *Migrator) finalCleanup() error { + atomic.StoreInt64(&mgtr.migrationContext.CleanupImminentFlag, 1) - this.migrationContext.Log.Infof("Writing changelog state: %+v", Migrated) - if _, err := this.applier.WriteChangelogState(string(Migrated)); err != nil { + mgtr.migrationContext.Log.Infof("Writing changelog state: %+v", Migrated) + if _, err := mgtr.applier.WriteChangelogState(string(Migrated)); err != nil { return err } - if this.migrationContext.Noop { - if createTableStatement, err := this.inspector.showCreateTable(this.migrationContext.GetGhostTableName()); err == nil { - this.migrationContext.Log.Infof("New table structure follows") + if mgtr.migrationContext.Noop { + if createTableStatement, err := mgtr.inspector.showCreateTable(mgtr.migrationContext.GetGhostTableName()); err == nil { + mgtr.migrationContext.Log.Infof("New table structure follows") fmt.Println(createTableStatement) } else { - this.migrationContext.Log.Errore(err) + mgtr.migrationContext.Log.Errore(err) } } - if err := this.eventsStreamer.Close(); err != nil { - this.migrationContext.Log.Errore(err) + if err := mgtr.eventsStreamer.Close(); err != nil { + mgtr.migrationContext.Log.Errore(err) } - if err := this.retryOperation(this.applier.DropChangelogTable); err != nil { + if err := mgtr.retryOperation(mgtr.applier.DropChangelogTable); err != nil { return err } - if this.migrationContext.OkToDropTable && !this.migrationContext.TestOnReplica { - if err := this.retryOperation(this.applier.DropOldTable); err != nil { + if mgtr.migrationContext.OkToDropTable && !mgtr.migrationContext.TestOnReplica { + if err := mgtr.retryOperation(mgtr.applier.DropOldTable); err != nil { return err } - if err := this.retryOperation(this.applier.DropCheckpointTable); err != nil { + if err := mgtr.retryOperation(mgtr.applier.DropCheckpointTable); err != nil { return err } - } else if !this.migrationContext.Noop { - this.migrationContext.Log.Infof("Am not dropping old table because I want this operation to be as live as possible. If you insist I should do it, please add `--ok-to-drop-table` next time. But I prefer you do not. To drop the old table, issue:") - this.migrationContext.Log.Infof("-- drop table %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.GetOldTableName())) - if this.migrationContext.Checkpoint { - this.migrationContext.Log.Infof("Am not dropping checkpoint table without `--ok-to-drop-table`. To drop the checkpoint table, issue:") - this.migrationContext.Log.Infof("-- drop table %s.%s", sql.EscapeName(this.migrationContext.DatabaseName), sql.EscapeName(this.migrationContext.GetCheckpointTableName())) + } else if !mgtr.migrationContext.Noop { + mgtr.migrationContext.Log.Infof("Am not dropping old table because I want this operation to be as live as possible. If you insist I should do it, please add `--ok-to-drop-table` next time. But I prefer you do not. To drop the old table, issue:") + mgtr.migrationContext.Log.Infof("-- drop table %s.%s", sql.EscapeName(mgtr.migrationContext.DatabaseName), sql.EscapeName(mgtr.migrationContext.GetOldTableName())) + if mgtr.migrationContext.Checkpoint { + mgtr.migrationContext.Log.Infof("Am not dropping checkpoint table without `--ok-to-drop-table`. To drop the checkpoint table, issue:") + mgtr.migrationContext.Log.Infof("-- drop table %s.%s", sql.EscapeName(mgtr.migrationContext.DatabaseName), sql.EscapeName(mgtr.migrationContext.GetCheckpointTableName())) } } - if this.migrationContext.Noop { - if err := this.retryOperation(this.applier.DropGhostTable); err != nil { + if mgtr.migrationContext.Noop { + if err := mgtr.retryOperation(mgtr.applier.DropGhostTable); err != nil { return err } } @@ -1920,26 +1920,26 @@ func (this *Migrator) finalCleanup() error { return nil } -func (this *Migrator) teardown() { - atomic.StoreInt64(&this.finishedMigrating, 1) +func (mgtr *Migrator) teardown() { + atomic.StoreInt64(&mgtr.finishedMigrating, 1) - if this.inspector != nil { - this.migrationContext.Log.Infof("Tearing down inspector") - this.inspector.Teardown() + if mgtr.inspector != nil { + mgtr.migrationContext.Log.Infof("Tearing down inspector") + mgtr.inspector.Teardown() } - if this.applier != nil { - this.migrationContext.Log.Infof("Tearing down applier") - this.applier.Teardown() + if mgtr.applier != nil { + mgtr.migrationContext.Log.Infof("Tearing down applier") + mgtr.applier.Teardown() } - if this.eventsStreamer != nil { - this.migrationContext.Log.Infof("Tearing down streamer") - this.eventsStreamer.Teardown() + if mgtr.eventsStreamer != nil { + mgtr.migrationContext.Log.Infof("Tearing down streamer") + mgtr.eventsStreamer.Teardown() } - if this.throttler != nil { - this.migrationContext.Log.Infof("Tearing down throttler") - this.throttler.Teardown() + if mgtr.throttler != nil { + mgtr.migrationContext.Log.Infof("Tearing down throttler") + mgtr.throttler.Teardown() } } diff --git a/go/logic/migrator_test.go b/go/logic/migrator_test.go index df0d21733..0b05d58a3 100644 --- a/go/logic/migrator_test.go +++ b/go/logic/migrator_test.go @@ -426,7 +426,6 @@ func (suite *MigratorTestSuite) TestMigrateEmpty() { // Verify the new column was added var tableName, createTableSQL string - //nolint:execinquery err = suite.db.QueryRow("SHOW CREATE TABLE "+getTestTableName()).Scan(&tableName, &createTableSQL) suite.Require().NoError(err) @@ -434,13 +433,11 @@ func (suite *MigratorTestSuite) TestMigrateEmpty() { suite.Require().Equal("CREATE TABLE `testing` (\n `id` int NOT NULL,\n `name` varchar(64) DEFAULT NULL,\n `foobar` varchar(255) DEFAULT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", createTableSQL) // Verify the changelog table was claned up - //nolint:execinquery err = suite.db.QueryRow("SHOW TABLES IN test LIKE '_testing_ghc'").Scan(&tableName) suite.Require().Error(err) suite.Require().Equal(gosql.ErrNoRows, err) // Verify the old table was renamed - //nolint:execinquery err = suite.db.QueryRow("SHOW TABLES IN test LIKE '_testing_del'").Scan(&tableName) suite.Require().NoError(err) suite.Require().Equal("_testing_del", tableName) @@ -958,7 +955,6 @@ func (suite *MigratorTestSuite) TestCutOverLossDataCaseLockGhostBeforeRename() { suite.Require().LessOrEqual(delValue, OriginalValue) var tableName, createTableSQL string - //nolint:execinquery err = suite.db.QueryRow("SHOW CREATE TABLE "+getTestTableName()).Scan(&tableName, &createTableSQL) suite.Require().NoError(err) diff --git a/go/logic/server.go b/go/logic/server.go index 74097acb7..7fe171f6e 100644 --- a/go/logic/server.go +++ b/go/logic/server.go @@ -51,7 +51,7 @@ func NewServer(migrationContext *base.MigrationContext, hooksExecutor *HooksExec } } -func (this *Server) runCPUProfile(args string) (io.Reader, error) { +func (srv *Server) runCPUProfile(args string) (io.Reader, error) { duration := defaultCPUProfileDuration var err error @@ -74,11 +74,11 @@ func (this *Server) runCPUProfile(args string) (io.Reader, error) { } } - if atomic.LoadInt64(&this.isCPUProfiling) > 0 { + if atomic.LoadInt64(&srv.isCPUProfiling) > 0 { return nil, ErrCPUProfilingInProgress } - atomic.StoreInt64(&this.isCPUProfiling, 1) - defer atomic.StoreInt64(&this.isCPUProfiling, 0) + atomic.StoreInt64(&srv.isCPUProfiling, 1) + defer atomic.StoreInt64(&srv.isCPUProfiling, 0) var buf bytes.Buffer var writer io.Writer = &buf @@ -95,80 +95,80 @@ func (this *Server) runCPUProfile(args string) (io.Reader, error) { time.Sleep(duration) pprof.StopCPUProfile() - this.migrationContext.Log.Infof("Captured %d byte runtime/pprof CPU profile (gzip=%v)", buf.Len(), useGzip) + srv.migrationContext.Log.Infof("Captured %d byte runtime/pprof CPU profile (gzip=%v)", buf.Len(), useGzip) return &buf, nil } -func (this *Server) createPostponeCutOverFlagFile(filePath string) (err error) { +func (srv *Server) createPostponeCutOverFlagFile(filePath string) (err error) { if !base.FileExists(filePath) { if err := base.TouchFile(filePath); err != nil { - return fmt.Errorf("Failed to create postpone cut-over flag file %s: %w", filePath, err) + return fmt.Errorf("failed to create postpone cut-over flag file %s: %w", filePath, err) } - this.migrationContext.Log.Infof("Created postpone-cut-over-flag-file: %s", filePath) + srv.migrationContext.Log.Infof("Created postpone-cut-over-flag-file: %s", filePath) } return nil } -func (this *Server) BindSocketFile() (err error) { - if this.migrationContext.ServeSocketFile == "" { +func (srv *Server) BindSocketFile() (err error) { + if srv.migrationContext.ServeSocketFile == "" { return nil } - if this.migrationContext.DropServeSocket && base.FileExists(this.migrationContext.ServeSocketFile) { - os.Remove(this.migrationContext.ServeSocketFile) + if srv.migrationContext.DropServeSocket && base.FileExists(srv.migrationContext.ServeSocketFile) { + os.Remove(srv.migrationContext.ServeSocketFile) } - this.unixListener, err = net.Listen("unix", this.migrationContext.ServeSocketFile) + srv.unixListener, err = net.Listen("unix", srv.migrationContext.ServeSocketFile) if err != nil { return err } - this.migrationContext.Log.Infof("Listening on unix socket file: %s", this.migrationContext.ServeSocketFile) + srv.migrationContext.Log.Infof("Listening on unix socket file: %s", srv.migrationContext.ServeSocketFile) return nil } -func (this *Server) RemoveSocketFile() (err error) { - this.migrationContext.Log.Infof("Removing socket file: %s", this.migrationContext.ServeSocketFile) - return os.Remove(this.migrationContext.ServeSocketFile) +func (srv *Server) RemoveSocketFile() (err error) { + srv.migrationContext.Log.Infof("Removing socket file: %s", srv.migrationContext.ServeSocketFile) + return os.Remove(srv.migrationContext.ServeSocketFile) } -func (this *Server) BindTCPPort() (err error) { - if this.migrationContext.ServeTCPPort == 0 { +func (srv *Server) BindTCPPort() (err error) { + if srv.migrationContext.ServeTCPPort == 0 { return nil } - this.tcpListener, err = net.Listen("tcp", fmt.Sprintf(":%d", this.migrationContext.ServeTCPPort)) + srv.tcpListener, err = net.Listen("tcp", fmt.Sprintf(":%d", srv.migrationContext.ServeTCPPort)) if err != nil { return err } - this.migrationContext.Log.Infof("Listening on tcp port: %d", this.migrationContext.ServeTCPPort) + srv.migrationContext.Log.Infof("Listening on tcp port: %d", srv.migrationContext.ServeTCPPort) return nil } // Serve begins listening & serving on whichever device was configured -func (this *Server) Serve() (err error) { +func (srv *Server) Serve() (err error) { go func() { for { - conn, err := this.unixListener.Accept() + conn, err := srv.unixListener.Accept() if err != nil { - this.migrationContext.Log.Errore(err) + srv.migrationContext.Log.Errore(err) } - go this.handleConnection(conn) + go srv.handleConnection(conn) } }() go func() { - if this.tcpListener == nil { + if srv.tcpListener == nil { return } for { - conn, err := this.tcpListener.Accept() + conn, err := srv.tcpListener.Accept() if err != nil { - this.migrationContext.Log.Errore(err) + srv.migrationContext.Log.Errore(err) } - go this.handleConnection(conn) + go srv.handleConnection(conn) } }() return nil } -func (this *Server) handleConnection(conn net.Conn) (err error) { +func (srv *Server) handleConnection(conn net.Conn) (err error) { if conn != nil { defer conn.Close() } @@ -176,24 +176,24 @@ func (this *Server) handleConnection(conn net.Conn) (err error) { if err != nil { return err } - return this.onServerCommand(string(command), bufio.NewWriter(conn)) + return srv.onServerCommand(string(command), bufio.NewWriter(conn)) } // onServerCommand responds to a user's interactive command -func (this *Server) onServerCommand(command string, writer *bufio.Writer) (err error) { +func (srv *Server) onServerCommand(command string, writer *bufio.Writer) (err error) { defer writer.Flush() - printStatusRule, err := this.applyServerCommand(command, writer) + printStatusRule, err := srv.applyServerCommand(command, writer) if err == nil { - this.printStatus(printStatusRule, writer) + srv.printStatus(printStatusRule, writer) } else { fmt.Fprintf(writer, "%s\n", err.Error()) } - return this.migrationContext.Log.Errore(err) + return srv.migrationContext.Log.Errore(err) } // applyServerCommand parses and executes commands by user -func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (printStatusRule PrintStatusRule, err error) { +func (srv *Server) applyServerCommand(command string, writer *bufio.Writer) (printStatusRule PrintStatusRule, err error) { tokens := strings.SplitN(command, "=", 2) command = strings.TrimSpace(tokens[0]) arg := "" @@ -206,7 +206,7 @@ func (this *Server) applyServerCommand(command string, writer *bufio.Writer) (pr argIsQuestion := (arg == "?") throttleHint := "# Note: you may only throttle for as long as your binary logs are not purged" - if err := this.hooksExecutor.onInteractiveCommand(command); err != nil { + if err := srv.hooksExecutor.onInteractiveCommand(command); err != nil { return NoPrintStatusRule, err } @@ -244,7 +244,7 @@ help # This message case "info", "status": return ForcePrintStatusAndHintRule, nil case "cpu-profile": - cpuProfile, err := this.runCPUProfile(arg) + cpuProfile, err := srv.runCPUProfile(arg) if err == nil { fmt.Fprint(base64.NewEncoder(base64.StdEncoding, writer), cpuProfile) } @@ -252,63 +252,63 @@ help # This message case "coordinates": { if argIsQuestion || arg == "" { - fmt.Fprintf(writer, "%+v\n", this.migrationContext.GetRecentBinlogCoordinates()) + fmt.Fprintf(writer, "%+v\n", srv.migrationContext.GetRecentBinlogCoordinates()) return NoPrintStatusRule, nil } return NoPrintStatusRule, fmt.Errorf("coordinates are read-only") } case "applier": - if this.migrationContext.ApplierConnectionConfig != nil && this.migrationContext.ApplierConnectionConfig.ImpliedKey != nil { + if srv.migrationContext.ApplierConnectionConfig != nil && srv.migrationContext.ApplierConnectionConfig.ImpliedKey != nil { fmt.Fprintf(writer, "Host: %s, Version: %s\n", - this.migrationContext.ApplierConnectionConfig.ImpliedKey.String(), - this.migrationContext.ApplierMySQLVersion, + srv.migrationContext.ApplierConnectionConfig.ImpliedKey.String(), + srv.migrationContext.ApplierMySQLVersion, ) } return NoPrintStatusRule, nil case "inspector": - if this.migrationContext.InspectorConnectionConfig != nil && this.migrationContext.InspectorConnectionConfig.ImpliedKey != nil { + if srv.migrationContext.InspectorConnectionConfig != nil && srv.migrationContext.InspectorConnectionConfig.ImpliedKey != nil { fmt.Fprintf(writer, "Host: %s, Version: %s\n", - this.migrationContext.InspectorConnectionConfig.ImpliedKey.String(), - this.migrationContext.InspectorMySQLVersion, + srv.migrationContext.InspectorConnectionConfig.ImpliedKey.String(), + srv.migrationContext.InspectorMySQLVersion, ) } return NoPrintStatusRule, nil case "chunk-size": { if argIsQuestion { - fmt.Fprintf(writer, "%+v\n", atomic.LoadInt64(&this.migrationContext.ChunkSize)) + fmt.Fprintf(writer, "%+v\n", atomic.LoadInt64(&srv.migrationContext.ChunkSize)) return NoPrintStatusRule, nil } if chunkSize, err := strconv.Atoi(arg); err != nil { return NoPrintStatusRule, err } else { - this.migrationContext.SetChunkSize(int64(chunkSize)) + srv.migrationContext.SetChunkSize(int64(chunkSize)) return ForcePrintStatusAndHintRule, nil } } case "dml-batch-size": { if argIsQuestion { - fmt.Fprintf(writer, "%+v\n", atomic.LoadInt64(&this.migrationContext.DMLBatchSize)) + fmt.Fprintf(writer, "%+v\n", atomic.LoadInt64(&srv.migrationContext.DMLBatchSize)) return NoPrintStatusRule, nil } if dmlBatchSize, err := strconv.Atoi(arg); err != nil { return NoPrintStatusRule, err } else { - this.migrationContext.SetDMLBatchSize(int64(dmlBatchSize)) + srv.migrationContext.SetDMLBatchSize(int64(dmlBatchSize)) return ForcePrintStatusAndHintRule, nil } } case "max-lag-millis": { if argIsQuestion { - fmt.Fprintf(writer, "%+v\n", atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold)) + fmt.Fprintf(writer, "%+v\n", atomic.LoadInt64(&srv.migrationContext.MaxLagMillisecondsThrottleThreshold)) return NoPrintStatusRule, nil } if maxLagMillis, err := strconv.Atoi(arg); err != nil { return NoPrintStatusRule, err } else { - this.migrationContext.SetMaxLagMillisecondsThrottleThreshold(int64(maxLagMillis)) + srv.migrationContext.SetMaxLagMillisecondsThrottleThreshold(int64(maxLagMillis)) return ForcePrintStatusAndHintRule, nil } } @@ -319,24 +319,24 @@ help # This message case "nice-ratio": { if argIsQuestion { - fmt.Fprintf(writer, "%+v\n", this.migrationContext.GetNiceRatio()) + fmt.Fprintf(writer, "%+v\n", srv.migrationContext.GetNiceRatio()) return NoPrintStatusRule, nil } if niceRatio, err := strconv.ParseFloat(arg, 64); err != nil { return NoPrintStatusRule, err } else { - this.migrationContext.SetNiceRatio(niceRatio) + srv.migrationContext.SetNiceRatio(niceRatio) return ForcePrintStatusAndHintRule, nil } } case "max-load": { if argIsQuestion { - maxLoad := this.migrationContext.GetMaxLoad() + maxLoad := srv.migrationContext.GetMaxLoad() fmt.Fprintf(writer, "%s\n", maxLoad.String()) return NoPrintStatusRule, nil } - if err := this.migrationContext.ReadMaxLoad(arg); err != nil { + if err := srv.migrationContext.ReadMaxLoad(arg); err != nil { return NoPrintStatusRule, err } return ForcePrintStatusAndHintRule, nil @@ -344,11 +344,11 @@ help # This message case "critical-load": { if argIsQuestion { - criticalLoad := this.migrationContext.GetCriticalLoad() + criticalLoad := srv.migrationContext.GetCriticalLoad() fmt.Fprintf(writer, "%s\n", criticalLoad.String()) return NoPrintStatusRule, nil } - if err := this.migrationContext.ReadCriticalLoad(arg); err != nil { + if err := srv.migrationContext.ReadCriticalLoad(arg); err != nil { return NoPrintStatusRule, err } return ForcePrintStatusAndHintRule, nil @@ -356,106 +356,106 @@ help # This message case "throttle-query": { if argIsQuestion { - fmt.Fprintf(writer, "%+v\n", this.migrationContext.GetThrottleQuery()) + fmt.Fprintf(writer, "%+v\n", srv.migrationContext.GetThrottleQuery()) return NoPrintStatusRule, nil } - this.migrationContext.SetThrottleQuery(arg) + srv.migrationContext.SetThrottleQuery(arg) fmt.Fprintln(writer, throttleHint) return ForcePrintStatusAndHintRule, nil } case "throttle-http": { if argIsQuestion { - fmt.Fprintf(writer, "%+v\n", this.migrationContext.GetThrottleHTTP()) + fmt.Fprintf(writer, "%+v\n", srv.migrationContext.GetThrottleHTTP()) return NoPrintStatusRule, nil } - this.migrationContext.SetThrottleHTTP(arg) + srv.migrationContext.SetThrottleHTTP(arg) fmt.Fprintln(writer, throttleHint) return ForcePrintStatusAndHintRule, nil } case "throttle-control-replicas": { if argIsQuestion { - fmt.Fprintf(writer, "%s\n", this.migrationContext.GetThrottleControlReplicaKeys().ToCommaDelimitedList()) + fmt.Fprintf(writer, "%s\n", srv.migrationContext.GetThrottleControlReplicaKeys().ToCommaDelimitedList()) return NoPrintStatusRule, nil } - if err := this.migrationContext.ReadThrottleControlReplicaKeys(arg); err != nil { + if err := srv.migrationContext.ReadThrottleControlReplicaKeys(arg); err != nil { return NoPrintStatusRule, err } - fmt.Fprintf(writer, "%s\n", this.migrationContext.GetThrottleControlReplicaKeys().ToCommaDelimitedList()) + fmt.Fprintf(writer, "%s\n", srv.migrationContext.GetThrottleControlReplicaKeys().ToCommaDelimitedList()) return ForcePrintStatusAndHintRule, nil } case "throttle", "pause", "suspend": { - if arg != "" && arg != this.migrationContext.OriginalTableName { + if arg != "" && arg != srv.migrationContext.OriginalTableName { // User explicitly provided table name. This is a courtesy protection mechanism - err := fmt.Errorf("User commanded 'throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName) + err := fmt.Errorf("user commanded 'throttle' on %s, but migrated table is %s; ignoring request", arg, srv.migrationContext.OriginalTableName) return NoPrintStatusRule, err } - atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 1) + atomic.StoreInt64(&srv.migrationContext.ThrottleCommandedByUser, 1) fmt.Fprintln(writer, throttleHint) return ForcePrintStatusAndHintRule, nil } case "no-throttle", "unthrottle", "resume", "continue": { - if arg != "" && arg != this.migrationContext.OriginalTableName { + if arg != "" && arg != srv.migrationContext.OriginalTableName { // User explicitly provided table name. This is a courtesy protection mechanism - err := fmt.Errorf("User commanded 'no-throttle' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName) + err := fmt.Errorf("user commanded 'no-throttle' on %s, but migrated table is %s; ignoring request", arg, srv.migrationContext.OriginalTableName) return NoPrintStatusRule, err } - atomic.StoreInt64(&this.migrationContext.ThrottleCommandedByUser, 0) + atomic.StoreInt64(&srv.migrationContext.ThrottleCommandedByUser, 0) return ForcePrintStatusAndHintRule, nil } case "postpone-cut-over-flag-file": { if arg == "" { - err := fmt.Errorf("User commanded 'postpone-cut-over-flag-file' without specifying file path") + err := fmt.Errorf("user commanded 'postpone-cut-over-flag-file' without specifying file path") return NoPrintStatusRule, err } - if err := this.createPostponeCutOverFlagFile(arg); err != nil { + if err := srv.createPostponeCutOverFlagFile(arg); err != nil { return NoPrintStatusRule, err } - this.migrationContext.PostponeCutOverFlagFile = arg + srv.migrationContext.PostponeCutOverFlagFile = arg fmt.Fprintf(writer, "Postponed\n") return ForcePrintStatusAndHintRule, nil } case "unpostpone", "no-postpone", "cut-over": { - if arg == "" && this.migrationContext.ForceNamedCutOverCommand { - err := fmt.Errorf("User commanded 'unpostpone' without specifying table name, but --force-named-cut-over is set") + if arg == "" && srv.migrationContext.ForceNamedCutOverCommand { + err := fmt.Errorf("user commanded 'unpostpone' without specifying table name, but --force-named-cut-over is set") return NoPrintStatusRule, err } - if arg != "" && arg != this.migrationContext.OriginalTableName { + if arg != "" && arg != srv.migrationContext.OriginalTableName { // User explicitly provided table name. This is a courtesy protection mechanism - err := fmt.Errorf("User commanded 'unpostpone' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName) + err := fmt.Errorf("user commanded 'unpostpone' on %s, but migrated table is %s; ignoring request", arg, srv.migrationContext.OriginalTableName) return NoPrintStatusRule, err } - if atomic.LoadInt64(&this.migrationContext.IsPostponingCutOver) > 0 { - atomic.StoreInt64(&this.migrationContext.UserCommandedUnpostponeFlag, 1) + if atomic.LoadInt64(&srv.migrationContext.IsPostponingCutOver) > 0 { + atomic.StoreInt64(&srv.migrationContext.UserCommandedUnpostponeFlag, 1) fmt.Fprintf(writer, "Unpostponed\n") return ForcePrintStatusAndHintRule, nil } - fmt.Fprintf(writer, "You may only invoke this when gh-ost is actively postponing migration. At this time it is not.\n") + fmt.Fprintf(writer, "You may only invoke this when gh-ost is actively postponing migration. At this time it is not\n") return NoPrintStatusRule, nil } case "panic": { - if arg == "" && this.migrationContext.ForceNamedPanicCommand { - err := fmt.Errorf("User commanded 'panic' without specifying table name, but --force-named-panic is set") + if arg == "" && srv.migrationContext.ForceNamedPanicCommand { + err := fmt.Errorf("user commanded 'panic' without specifying table name, but --force-named-panic is set") return NoPrintStatusRule, err } - if arg != "" && arg != this.migrationContext.OriginalTableName { + if arg != "" && arg != srv.migrationContext.OriginalTableName { // User explicitly provided table name. This is a courtesy protection mechanism - err := fmt.Errorf("User commanded 'panic' on %s, but migrated table is %s; ignoring request.", arg, this.migrationContext.OriginalTableName) + err := fmt.Errorf("user commanded 'panic' on %s, but migrated table is %s; ignoring request", arg, srv.migrationContext.OriginalTableName) return NoPrintStatusRule, err } - err := fmt.Errorf("User commanded 'panic'. The migration will be aborted without cleanup. Please drop the gh-ost tables before trying again.") + err := fmt.Errorf("user commanded 'panic'. The migration will be aborted without cleanup. Please drop the gh-ost tables before trying again") // Use helper to prevent deadlock if listenOnPanicAbort already exited - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, err) + _ = base.SendWithContext(srv.migrationContext.GetContext(), srv.migrationContext.PanicAbort, err) return NoPrintStatusRule, err } default: - err = fmt.Errorf("Unknown command: %s", command) + err = fmt.Errorf("unknown command: %s", command) return NoPrintStatusRule, err } return NoPrintStatusRule, nil diff --git a/go/logic/streamer.go b/go/logic/streamer.go index 1c2635138..ecb936069 100644 --- a/go/logic/streamer.go +++ b/go/logic/streamer.go @@ -60,16 +60,16 @@ func NewEventsStreamer(migrationContext *base.MigrationContext) *EventsStreamer } // AddListener registers a new listener for binlog events, on a per-table basis -func (this *EventsStreamer) AddListener( +func (es *EventsStreamer) AddListener( async bool, databaseName string, tableName string, onDmlEvent func(event *binlog.BinlogEntry) error) (err error) { - this.listenersMutex.Lock() - defer this.listenersMutex.Unlock() + es.listenersMutex.Lock() + defer es.listenersMutex.Unlock() if databaseName == "" { - return fmt.Errorf("Empty database name in AddListener") + return fmt.Errorf("empty database name in AddListener") } if tableName == "" { - return fmt.Errorf("Empty table name in AddListener") + return fmt.Errorf("empty table name in AddListener") } listener := &BinlogEventListener{ async: async, @@ -77,17 +77,17 @@ func (this *EventsStreamer) AddListener( tableName: tableName, onDmlEvent: onDmlEvent, } - this.listeners = append(this.listeners, listener) + es.listeners = append(es.listeners, listener) return nil } // notifyListeners will notify relevant listeners with given DML event. Only // listeners registered for changes on the table on which the DML operates are notified. -func (this *EventsStreamer) notifyListeners(binlogEntry *binlog.BinlogEntry) { - this.listenersMutex.Lock() - defer this.listenersMutex.Unlock() +func (es *EventsStreamer) notifyListeners(binlogEntry *binlog.BinlogEntry) { + es.listenersMutex.Lock() + defer es.listenersMutex.Unlock() - for _, listener := range this.listeners { + for _, listener := range es.listeners { listener := listener if !strings.EqualFold(listener.databaseName, binlogEntry.DmlEvent.DatabaseName) { continue @@ -105,22 +105,22 @@ func (this *EventsStreamer) notifyListeners(binlogEntry *binlog.BinlogEntry) { } } -func (this *EventsStreamer) InitDBConnections() (err error) { - EventsStreamerUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName) - if this.db, _, err = mysql.GetDB(this.migrationContext.Uuid, EventsStreamerUri); err != nil { +func (es *EventsStreamer) InitDBConnections() (err error) { + EventsStreamerUri := es.connectionConfig.GetDBUri(es.migrationContext.DatabaseName) + if es.db, _, err = mysql.GetDB(es.migrationContext.Uuid, EventsStreamerUri); err != nil { return err } - version, err := base.ValidateConnection(this.db, this.connectionConfig, this.migrationContext, this.name) + version, err := base.ValidateConnection(es.db, es.connectionConfig, es.migrationContext, es.name) if err != nil { return err } - this.dbVersion = version - if this.initialBinlogCoordinates == nil || this.initialBinlogCoordinates.IsEmpty() { - if err := this.readCurrentBinlogCoordinates(); err != nil { + es.dbVersion = version + if es.initialBinlogCoordinates == nil || es.initialBinlogCoordinates.IsEmpty() { + if err := es.readCurrentBinlogCoordinates(); err != nil { return err } } - if err := this.initBinlogReader(this.initialBinlogCoordinates); err != nil { + if err := es.initBinlogReader(es.initialBinlogCoordinates); err != nil { return err } @@ -128,34 +128,34 @@ func (this *EventsStreamer) InitDBConnections() (err error) { } // initBinlogReader creates and connects the reader: we hook up to a MySQL server as a replica -func (this *EventsStreamer) initBinlogReader(binlogCoordinates mysql.BinlogCoordinates) error { - goMySQLReader := binlog.NewGoMySQLReader(this.migrationContext) +func (es *EventsStreamer) initBinlogReader(binlogCoordinates mysql.BinlogCoordinates) error { + goMySQLReader := binlog.NewGoMySQLReader(es.migrationContext) if err := goMySQLReader.ConnectBinlogStreamer(binlogCoordinates); err != nil { return err } - this.binlogReader = goMySQLReader + es.binlogReader = goMySQLReader return nil } -func (this *EventsStreamer) GetCurrentBinlogCoordinates() mysql.BinlogCoordinates { - return this.binlogReader.GetCurrentBinlogCoordinates() +func (es *EventsStreamer) GetCurrentBinlogCoordinates() mysql.BinlogCoordinates { + return es.binlogReader.GetCurrentBinlogCoordinates() } // readCurrentBinlogCoordinates reads master status from hooked server -func (this *EventsStreamer) readCurrentBinlogCoordinates() error { - binaryLogStatusTerm := mysql.ReplicaTermFor(this.dbVersion, "master status") +func (es *EventsStreamer) readCurrentBinlogCoordinates() error { + binaryLogStatusTerm := mysql.ReplicaTermFor(es.dbVersion, "master status") query := fmt.Sprintf("show /* gh-ost readCurrentBinlogCoordinates */ %s", binaryLogStatusTerm) foundMasterStatus := false - err := sqlutils.QueryRowsMap(this.db, query, func(m sqlutils.RowMap) error { - if this.migrationContext.UseGTIDs { + err := sqlutils.QueryRowsMap(es.db, query, func(m sqlutils.RowMap) error { + if es.migrationContext.UseGTIDs { execGtidSet := m.GetString("Executed_Gtid_Set") gtidSet, err := gomysql.ParseMysqlGTIDSet(execGtidSet) if err != nil { return err } - this.initialBinlogCoordinates = &mysql.GTIDBinlogCoordinates{GTIDSet: gtidSet.(*gomysql.MysqlGTIDSet)} + es.initialBinlogCoordinates = &mysql.GTIDBinlogCoordinates{GTIDSet: gtidSet.(*gomysql.MysqlGTIDSet)} } else { - this.initialBinlogCoordinates = &mysql.FileBinlogCoordinates{ + es.initialBinlogCoordinates = &mysql.FileBinlogCoordinates{ LogFile: m.GetString("File"), LogPos: m.GetInt64("Position"), } @@ -167,26 +167,26 @@ func (this *EventsStreamer) readCurrentBinlogCoordinates() error { return err } if !foundMasterStatus { - return fmt.Errorf("Got no results from SHOW %s. Bailing out", strings.ToUpper(binaryLogStatusTerm)) + return fmt.Errorf("got no results from SHOW %s. Bailing out", strings.ToUpper(binaryLogStatusTerm)) } - this.migrationContext.Log.Debugf("Streamer binlog coordinates: %+v", this.initialBinlogCoordinates) + es.migrationContext.Log.Debugf("Streamer binlog coordinates: %+v", es.initialBinlogCoordinates) return nil } // StreamEvents will begin streaming events. It will be blocking, so should be // executed by a goroutine -func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error { +func (es *EventsStreamer) StreamEvents(canStopStreaming func() bool) error { go func() { - for binlogEntry := range this.eventsChannel { + for binlogEntry := range es.eventsChannel { if binlogEntry.DmlEvent != nil { - this.notifyListeners(binlogEntry) + es.notifyListeners(binlogEntry) } } }() // The next should block and execute forever, unless there's a serious error. var successiveFailures int var reconnectCoords mysql.BinlogCoordinates - ctx := this.migrationContext.GetContext() + ctx := es.migrationContext.GetContext() for { // Check for context cancellation each iteration if err := ctx.Err(); err != nil { @@ -198,47 +198,47 @@ func (this *EventsStreamer) StreamEvents(canStopStreaming func() bool) error { // We will reconnect the binlog streamer at the coordinates // of the last trx that was read completely from the streamer. // Since row event application is idempotent, it's OK if we reapply some events. - if err := this.binlogReader.StreamEvents(canStopStreaming, this.eventsChannel); err != nil { + if err := es.binlogReader.StreamEvents(canStopStreaming, es.eventsChannel); err != nil { if canStopStreaming() { return nil } - this.migrationContext.Log.Infof("StreamEvents encountered unexpected error: %+v", err) - this.migrationContext.MarkPointOfInterest() + es.migrationContext.Log.Infof("StreamEvents encountered unexpected error: %+v", err) + es.migrationContext.MarkPointOfInterest() time.Sleep(ReconnectStreamerSleepSeconds * time.Second) // See if there's retry overflow - if this.migrationContext.BinlogSyncerMaxReconnectAttempts > 0 && successiveFailures >= this.migrationContext.BinlogSyncerMaxReconnectAttempts { + if es.migrationContext.BinlogSyncerMaxReconnectAttempts > 0 && successiveFailures >= es.migrationContext.BinlogSyncerMaxReconnectAttempts { return fmt.Errorf("%d successive failures in streamer reconnect at coordinates %+v", successiveFailures, reconnectCoords) } // Reposition at same coordinates - if this.binlogReader.LastTrxCoords != nil { - reconnectCoords = this.binlogReader.LastTrxCoords.Clone() + if es.binlogReader.LastTrxCoords != nil { + reconnectCoords = es.binlogReader.LastTrxCoords.Clone() } else { - reconnectCoords = this.initialBinlogCoordinates.Clone() + reconnectCoords = es.initialBinlogCoordinates.Clone() } - if !reconnectCoords.SmallerThan(this.GetCurrentBinlogCoordinates()) { + if !reconnectCoords.SmallerThan(es.GetCurrentBinlogCoordinates()) { successiveFailures += 1 } else { successiveFailures = 0 } - this.migrationContext.Log.Infof("Reconnecting EventsStreamer... Will resume at %+v", reconnectCoords) - _ = this.binlogReader.Close() - if err := this.initBinlogReader(reconnectCoords); err != nil { + es.migrationContext.Log.Infof("Reconnecting EventsStreamer... Will resume at %+v", reconnectCoords) + _ = es.binlogReader.Close() + if err := es.initBinlogReader(reconnectCoords); err != nil { return err } } } } -func (this *EventsStreamer) Close() (err error) { - err = this.binlogReader.Close() - this.migrationContext.Log.Infof("Closed streamer connection. err=%+v", err) +func (es *EventsStreamer) Close() (err error) { + err = es.binlogReader.Close() + es.migrationContext.Log.Infof("Closed streamer connection. err=%+v", err) return err } -func (this *EventsStreamer) Teardown() { - this.db.Close() +func (es *EventsStreamer) Teardown() { + es.db.Close() } diff --git a/go/logic/streamer_test.go b/go/logic/streamer_test.go index 8e0b57f80..c1b5c5a9b 100644 --- a/go/logic/streamer_test.go +++ b/go/logic/streamer_test.go @@ -2,7 +2,6 @@ package logic import ( "context" - "database/sql" gosql "database/sql" "fmt" "testing" @@ -201,7 +200,6 @@ func (suite *EventsStreamerTestSuite) TestStreamEventsAutomaticallyReconnects() return err } - //nolint:execinquery rows, err := suite.db.Query("SHOW FULL PROCESSLIST") if err != nil { return err @@ -211,7 +209,7 @@ func (suite *EventsStreamerTestSuite) TestStreamEventsAutomaticallyReconnects() connectionIdsToKill := make([]int, 0) var id, stateTime int - var user, host, dbName, command, state, info sql.NullString + var user, host, dbName, command, state, info gosql.NullString for rows.Next() { err = rows.Scan(&id, &user, &host, &dbName, &command, &stateTime, &state, &info) if err != nil { diff --git a/go/logic/throttler.go b/go/logic/throttler.go index 1ca40f957..a5c7bc453 100644 --- a/go/logic/throttler.go +++ b/go/logic/throttler.go @@ -64,9 +64,9 @@ func NewThrottler(migrationContext *base.MigrationContext, applier *Applier, ins } } -func (this *Throttler) throttleHttpMessage(statusCode int) string { +func (thlr *Throttler) throttleHttpMessage(statusCode int) string { statusCodesMap := httpStatusMessages - if throttleHttp := this.migrationContext.GetThrottleHTTP(); strings.Contains(throttleHttp, frenoMagicHint) { + if throttleHttp := thlr.migrationContext.GetThrottleHTTP(); strings.Contains(throttleHttp, frenoMagicHint) { statusCodesMap = httpStatusFrenoMessages } if message, ok := statusCodesMap[statusCode]; ok { @@ -78,33 +78,31 @@ func (this *Throttler) throttleHttpMessage(statusCode int) string { // shouldThrottle performs checks to see whether we should currently be throttling. // It merely observes the metrics collected by other components, it does not issue // its own metric collection. -func (this *Throttler) shouldThrottle() (result bool, reason string, reasonHint base.ThrottleReasonHint) { - if hibernateUntil := atomic.LoadInt64(&this.migrationContext.HibernateUntil); hibernateUntil > 0 { +func (thlr *Throttler) shouldThrottle() (result bool, reason string, reasonHint base.ThrottleReasonHint) { + if hibernateUntil := atomic.LoadInt64(&thlr.migrationContext.HibernateUntil); hibernateUntil > 0 { hibernateUntilTime := time.Unix(0, hibernateUntil) return true, fmt.Sprintf("critical-load-hibernate until %+v", hibernateUntilTime), base.NoThrottleReasonHint } - generalCheckResult := this.migrationContext.GetThrottleGeneralCheckResult() + generalCheckResult := thlr.migrationContext.GetThrottleGeneralCheckResult() if generalCheckResult.ShouldThrottle { return generalCheckResult.ShouldThrottle, generalCheckResult.Reason, generalCheckResult.ReasonHint } // HTTP throttle - statusCode := atomic.LoadInt64(&this.migrationContext.ThrottleHTTPStatusCode) + statusCode := atomic.LoadInt64(&thlr.migrationContext.ThrottleHTTPStatusCode) if statusCode != 0 && statusCode != http.StatusOK { - return true, this.throttleHttpMessage(int(statusCode)), base.NoThrottleReasonHint + return true, thlr.throttleHttpMessage(int(statusCode)), base.NoThrottleReasonHint } // Replication lag throttle - maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold) - lag := atomic.LoadInt64(&this.migrationContext.CurrentLag) + maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&thlr.migrationContext.MaxLagMillisecondsThrottleThreshold) + lag := atomic.LoadInt64(&thlr.migrationContext.CurrentLag) if time.Duration(lag) > time.Duration(maxLagMillisecondsThrottleThreshold)*time.Millisecond { return true, fmt.Sprintf("lag=%fs", time.Duration(lag).Seconds()), base.NoThrottleReasonHint } - checkThrottleControlReplicas := true - if (this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica) && (atomic.LoadInt64(&this.migrationContext.AllEventsUpToLockProcessedInjectedFlag) > 0) { - checkThrottleControlReplicas = false - } + lockInjected := atomic.LoadInt64(&thlr.migrationContext.AllEventsUpToLockProcessedInjectedFlag) > 0 + checkThrottleControlReplicas := !lockInjected || (!thlr.migrationContext.TestOnReplica && !thlr.migrationContext.MigrateOnReplica) if checkThrottleControlReplicas { - lagResult := this.migrationContext.GetControlReplicasLagResult() + lagResult := thlr.migrationContext.GetControlReplicasLagResult() if lagResult.Err != nil { return true, fmt.Sprintf("%+v %+v", lagResult.Key, lagResult.Err), base.NoThrottleReasonHint } @@ -127,39 +125,39 @@ func parseChangelogHeartbeat(heartbeatValue string) (lag time.Duration, err erro } // parseChangelogHeartbeat parses a string timestamp and deduces replication lag -func (this *Throttler) parseChangelogHeartbeat(heartbeatValue string) (err error) { +func (thlr *Throttler) parseChangelogHeartbeat(heartbeatValue string) (err error) { if lag, err := parseChangelogHeartbeat(heartbeatValue); err != nil { - return this.migrationContext.Log.Errore(err) + return thlr.migrationContext.Log.Errore(err) } else { - atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag)) + atomic.StoreInt64(&thlr.migrationContext.CurrentLag, int64(lag)) return nil } } // collectReplicationLag reads the latest changelog heartbeat value -func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- bool) { +func (thlr *Throttler) collectReplicationLag(firstThrottlingCollected chan<- bool) { collectFunc := func() error { - if atomic.LoadInt64(&this.migrationContext.CleanupImminentFlag) > 0 { + if atomic.LoadInt64(&thlr.migrationContext.CleanupImminentFlag) > 0 { return nil } - if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 { + if atomic.LoadInt64(&thlr.migrationContext.HibernateUntil) > 0 { return nil } - if this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica { + if thlr.migrationContext.TestOnReplica || thlr.migrationContext.MigrateOnReplica { // when running on replica, the heartbeat injection is also done on the replica. // This means we will always get a good heartbeat value. // When running on replica, we should instead check the `SHOW SLAVE STATUS` output. - if lag, err := mysql.GetReplicationLagFromSlaveStatus(this.inspector.dbVersion, this.inspector.informationSchemaDb); err != nil { - return this.migrationContext.Log.Errore(err) + if lag, err := mysql.GetReplicationLagFromSlaveStatus(thlr.inspector.dbVersion, thlr.inspector.informationSchemaDb); err != nil { + return thlr.migrationContext.Log.Errore(err) } else { - atomic.StoreInt64(&this.migrationContext.CurrentLag, int64(lag)) + atomic.StoreInt64(&thlr.migrationContext.CurrentLag, int64(lag)) } } else { - if heartbeatValue, err := this.inspector.readChangelogState("heartbeat"); err != nil { - return this.migrationContext.Log.Errore(err) + if heartbeatValue, err := thlr.inspector.readChangelogState("heartbeat"); err != nil { + return thlr.migrationContext.Log.Errore(err) } else { - this.parseChangelogHeartbeat(heartbeatValue) + thlr.parseChangelogHeartbeat(heartbeatValue) } } return nil @@ -168,10 +166,10 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo collectFunc() firstThrottlingCollected <- true - ticker := time.NewTicker(time.Duration(this.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond) + ticker := time.NewTicker(time.Duration(thlr.migrationContext.HeartbeatIntervalMilliseconds) * time.Millisecond) defer ticker.Stop() for range ticker.C { - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&thlr.finishedMigrating) > 0 { return } go collectFunc() @@ -179,23 +177,23 @@ func (this *Throttler) collectReplicationLag(firstThrottlingCollected chan<- boo } // collectControlReplicasLag polls all the control replicas to get maximum lag value -func (this *Throttler) collectControlReplicasLag() { - if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 { +func (thlr *Throttler) collectControlReplicasLag() { + if atomic.LoadInt64(&thlr.migrationContext.HibernateUntil) > 0 { return } replicationLagQuery := fmt.Sprintf(` select value from %s.%s where hint = 'heartbeat' and id <= 255 `, - sql.EscapeName(this.migrationContext.DatabaseName), - sql.EscapeName(this.migrationContext.GetChangelogTableName()), + sql.EscapeName(thlr.migrationContext.DatabaseName), + sql.EscapeName(thlr.migrationContext.GetChangelogTableName()), ) readReplicaLag := func(connectionConfig *mysql.ConnectionConfig) (lag time.Duration, err error) { dbUri := connectionConfig.GetDBUri("information_schema") var heartbeatValue string - db, _, err := mysql.GetDB(this.migrationContext.Uuid, dbUri) + db, _, err := mysql.GetDB(thlr.migrationContext.Uuid, dbUri) if err != nil { return lag, err } @@ -209,13 +207,13 @@ func (this *Throttler) collectControlReplicasLag() { } readControlReplicasLag := func() (result *mysql.ReplicationLagResult) { - instanceKeyMap := this.migrationContext.GetThrottleControlReplicaKeys() + instanceKeyMap := thlr.migrationContext.GetThrottleControlReplicaKeys() if instanceKeyMap.Len() == 0 { return result } lagResults := make(chan *mysql.ReplicationLagResult, instanceKeyMap.Len()) for replicaKey := range *instanceKeyMap { - connectionConfig := this.migrationContext.InspectorConnectionConfig.DuplicateCredentials(replicaKey) + connectionConfig := thlr.migrationContext.InspectorConnectionConfig.DuplicateCredentials(replicaKey) if err := connectionConfig.RegisterTLSConfig(); err != nil { return &mysql.ReplicationLagResult{Err: err} } @@ -240,11 +238,11 @@ func (this *Throttler) collectControlReplicasLag() { } checkControlReplicasLag := func() { - if (this.migrationContext.TestOnReplica || this.migrationContext.MigrateOnReplica) && (atomic.LoadInt64(&this.migrationContext.AllEventsUpToLockProcessedInjectedFlag) > 0) { + if (thlr.migrationContext.TestOnReplica || thlr.migrationContext.MigrateOnReplica) && (atomic.LoadInt64(&thlr.migrationContext.AllEventsUpToLockProcessedInjectedFlag) > 0) { // No need to read lag return } - this.migrationContext.SetControlReplicasLagResult(readControlReplicasLag()) + thlr.migrationContext.SetControlReplicasLagResult(readControlReplicasLag()) } relaxedFactor := 10 @@ -254,14 +252,14 @@ func (this *Throttler) collectControlReplicasLag() { ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() for range ticker.C { - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&thlr.finishedMigrating) > 0 { return } if counter%relaxedFactor == 0 { // we only check if we wish to be aggressive once per second. The parameters for being aggressive // do not typically change at all throughout the migration, but nonetheless we check them. counter = 0 - maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&this.migrationContext.MaxLagMillisecondsThrottleThreshold) + maxLagMillisecondsThrottleThreshold := atomic.LoadInt64(&thlr.migrationContext.MaxLagMillisecondsThrottleThreshold) shouldReadLagAggressively = (maxLagMillisecondsThrottleThreshold < 1000) } if counter == 0 || shouldReadLagAggressively { @@ -272,10 +270,10 @@ func (this *Throttler) collectControlReplicasLag() { } } -func (this *Throttler) criticalLoadIsMet() (met bool, variableName string, value int64, threshold int64, err error) { - criticalLoad := this.migrationContext.GetCriticalLoad() +func (thlr *Throttler) criticalLoadIsMet() (met bool, variableName string, value int64, threshold int64, err error) { + criticalLoad := thlr.migrationContext.GetCriticalLoad() for variableName, threshold = range criticalLoad { - value, err = this.applier.ShowStatusVariable(variableName) + value, err = thlr.applier.ShowStatusVariable(variableName) if err != nil { return false, variableName, value, threshold, err } @@ -287,58 +285,58 @@ func (this *Throttler) criticalLoadIsMet() (met bool, variableName string, value } // collectThrottleHTTPStatus reads the latest changelog heartbeat value -func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<- bool) { +func (thlr *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<- bool) { collectFunc := func() (sleep bool, err error) { - if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 { + if atomic.LoadInt64(&thlr.migrationContext.HibernateUntil) > 0 { return true, nil } - url := this.migrationContext.GetThrottleHTTP() + url := thlr.migrationContext.GetThrottleHTTP() if url == "" { return true, nil } - ctx, cancel := context.WithTimeout(context.Background(), this.httpClientTimeout) + ctx, cancel := context.WithTimeout(context.Background(), thlr.httpClientTimeout) defer cancel() req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) if err != nil { return false, err } - req.Header.Set("User-Agent", fmt.Sprintf("gh-ost/%s", this.appVersion)) + req.Header.Set("User-Agent", fmt.Sprintf("gh-ost/%s", thlr.appVersion)) - resp, err := this.httpClient.Do(req) + resp, err := thlr.httpClient.Do(req) if err != nil { return false, err } defer resp.Body.Close() - atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(resp.StatusCode)) + atomic.StoreInt64(&thlr.migrationContext.ThrottleHTTPStatusCode, int64(resp.StatusCode)) return false, nil } _, err := collectFunc() if err != nil { // If not told to ignore errors, we'll throttle on HTTP connection issues - if !this.migrationContext.IgnoreHTTPErrors { - atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1)) + if !thlr.migrationContext.IgnoreHTTPErrors { + atomic.StoreInt64(&thlr.migrationContext.ThrottleHTTPStatusCode, int64(-1)) } } firstThrottlingCollected <- true - collectInterval := time.Duration(this.migrationContext.ThrottleHTTPIntervalMillis) * time.Millisecond + collectInterval := time.Duration(thlr.migrationContext.ThrottleHTTPIntervalMillis) * time.Millisecond ticker := time.NewTicker(collectInterval) defer ticker.Stop() for range ticker.C { - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&thlr.finishedMigrating) > 0 { return } sleep, err := collectFunc() if err != nil { // If not told to ignore errors, we'll throttle on HTTP connection issues - if !this.migrationContext.IgnoreHTTPErrors { - atomic.StoreInt64(&this.migrationContext.ThrottleHTTPStatusCode, int64(-1)) + if !thlr.migrationContext.IgnoreHTTPErrors { + atomic.StoreInt64(&thlr.migrationContext.ThrottleHTTPStatusCode, int64(-1)) } } @@ -348,57 +346,57 @@ func (this *Throttler) collectThrottleHTTPStatus(firstThrottlingCollected chan<- } } -// collectGeneralThrottleMetrics reads the once-per-sec metrics, and stores them onto this.migrationContext -func (this *Throttler) collectGeneralThrottleMetrics() error { - if atomic.LoadInt64(&this.migrationContext.HibernateUntil) > 0 { +// collectGeneralThrottleMetrics reads the once-per-sec metrics, and stores them onto migrationContext +func (thlr *Throttler) collectGeneralThrottleMetrics() error { + if atomic.LoadInt64(&thlr.migrationContext.HibernateUntil) > 0 { return nil } setThrottle := func(throttle bool, reason string, reasonHint base.ThrottleReasonHint) error { - this.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(throttle, reason, reasonHint)) + thlr.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(throttle, reason, reasonHint)) return nil } // Regardless of throttle, we take opportunity to check for panic-abort - if this.migrationContext.PanicFlagFile != "" { - if base.FileExists(this.migrationContext.PanicFlagFile) { + if thlr.migrationContext.PanicFlagFile != "" { + if base.FileExists(thlr.migrationContext.PanicFlagFile) { // Use helper to prevent deadlock if listenOnPanicAbort already exited - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, fmt.Errorf("Found panic-file %s. Aborting without cleanup", this.migrationContext.PanicFlagFile)) + _ = base.SendWithContext(thlr.migrationContext.GetContext(), thlr.migrationContext.PanicAbort, fmt.Errorf("found panic-file %s. Aborting without cleanup", thlr.migrationContext.PanicFlagFile)) return nil } } - criticalLoadMet, variableName, value, threshold, err := this.criticalLoadIsMet() + criticalLoadMet, variableName, value, threshold, err := thlr.criticalLoadIsMet() if err != nil { return setThrottle(true, fmt.Sprintf("%s %s", variableName, err), base.NoThrottleReasonHint) } - if criticalLoadMet && this.migrationContext.CriticalLoadHibernateSeconds > 0 { - hibernateDuration := time.Duration(this.migrationContext.CriticalLoadHibernateSeconds) * time.Second + if criticalLoadMet && thlr.migrationContext.CriticalLoadHibernateSeconds > 0 { + hibernateDuration := time.Duration(thlr.migrationContext.CriticalLoadHibernateSeconds) * time.Second hibernateUntilTime := time.Now().Add(hibernateDuration) - atomic.StoreInt64(&this.migrationContext.HibernateUntil, hibernateUntilTime.UnixNano()) - this.migrationContext.Log.Errorf("critical-load met: %s=%d, >=%d. Will hibernate for the duration of %+v, until %+v", variableName, value, threshold, hibernateDuration, hibernateUntilTime) + atomic.StoreInt64(&thlr.migrationContext.HibernateUntil, hibernateUntilTime.UnixNano()) + thlr.migrationContext.Log.Errorf("critical-load met: %s=%d, >=%d. Will hibernate for the duration of %+v, until %+v", variableName, value, threshold, hibernateDuration, hibernateUntilTime) go func() { time.Sleep(hibernateDuration) - this.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(true, "leaving hibernation", base.LeavingHibernationThrottleReasonHint)) - atomic.StoreInt64(&this.migrationContext.HibernateUntil, 0) + thlr.migrationContext.SetThrottleGeneralCheckResult(base.NewThrottleCheckResult(true, "leaving hibernation", base.LeavingHibernationThrottleReasonHint)) + atomic.StoreInt64(&thlr.migrationContext.HibernateUntil, 0) }() return nil } - if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds == 0 { + if criticalLoadMet && thlr.migrationContext.CriticalLoadIntervalMilliseconds == 0 { // Use helper to prevent deadlock if listenOnPanicAbort already exited - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, fmt.Errorf("critical-load met: %s=%d, >=%d", variableName, value, threshold)) + _ = base.SendWithContext(thlr.migrationContext.GetContext(), thlr.migrationContext.PanicAbort, fmt.Errorf("critical-load met: %s=%d, >=%d", variableName, value, threshold)) return nil } - if criticalLoadMet && this.migrationContext.CriticalLoadIntervalMilliseconds > 0 { - this.migrationContext.Log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, this.migrationContext.CriticalLoadIntervalMilliseconds) + if criticalLoadMet && thlr.migrationContext.CriticalLoadIntervalMilliseconds > 0 { + thlr.migrationContext.Log.Errorf("critical-load met once: %s=%d, >=%d. Will check again in %d millis", variableName, value, threshold, thlr.migrationContext.CriticalLoadIntervalMilliseconds) go func() { - timer := time.NewTimer(time.Millisecond * time.Duration(this.migrationContext.CriticalLoadIntervalMilliseconds)) + timer := time.NewTimer(time.Millisecond * time.Duration(thlr.migrationContext.CriticalLoadIntervalMilliseconds)) <-timer.C - if criticalLoadMetAgain, variableName, value, threshold, _ := this.criticalLoadIsMet(); criticalLoadMetAgain { + if criticalLoadMetAgain, variableName, value, threshold, _ := thlr.criticalLoadIsMet(); criticalLoadMetAgain { // Use helper to prevent deadlock if listenOnPanicAbort already exited - _ = base.SendWithContext(this.migrationContext.GetContext(), this.migrationContext.PanicAbort, fmt.Errorf("critical-load met again after %d millis: %s=%d, >=%d", this.migrationContext.CriticalLoadIntervalMilliseconds, variableName, value, threshold)) + _ = base.SendWithContext(thlr.migrationContext.GetContext(), thlr.migrationContext.PanicAbort, fmt.Errorf("critical-load met again after %d millis: %s=%d, >=%d", thlr.migrationContext.CriticalLoadIntervalMilliseconds, variableName, value, threshold)) } }() } @@ -406,25 +404,25 @@ func (this *Throttler) collectGeneralThrottleMetrics() error { // Back to throttle considerations // User-based throttle - if atomic.LoadInt64(&this.migrationContext.ThrottleCommandedByUser) > 0 { + if atomic.LoadInt64(&thlr.migrationContext.ThrottleCommandedByUser) > 0 { return setThrottle(true, "commanded by user", base.UserCommandThrottleReasonHint) } - if this.migrationContext.ThrottleFlagFile != "" { - if base.FileExists(this.migrationContext.ThrottleFlagFile) { + if thlr.migrationContext.ThrottleFlagFile != "" { + if base.FileExists(thlr.migrationContext.ThrottleFlagFile) { // Throttle file defined and exists! return setThrottle(true, "flag-file", base.NoThrottleReasonHint) } } - if this.migrationContext.ThrottleAdditionalFlagFile != "" { - if base.FileExists(this.migrationContext.ThrottleAdditionalFlagFile) { + if thlr.migrationContext.ThrottleAdditionalFlagFile != "" { + if base.FileExists(thlr.migrationContext.ThrottleAdditionalFlagFile) { // 2nd Throttle file defined and exists! return setThrottle(true, "flag-file", base.NoThrottleReasonHint) } } - maxLoad := this.migrationContext.GetMaxLoad() + maxLoad := thlr.migrationContext.GetMaxLoad() for variableName, threshold := range maxLoad { - value, err := this.applier.ShowStatusVariable(variableName) + value, err := thlr.applier.ShowStatusVariable(variableName) if err != nil { return setThrottle(true, fmt.Sprintf("%s %s", variableName, err), base.NoThrottleReasonHint) } @@ -432,8 +430,8 @@ func (this *Throttler) collectGeneralThrottleMetrics() error { return setThrottle(true, fmt.Sprintf("max-load %s=%d >= %d", variableName, value, threshold), base.NoThrottleReasonHint) } } - if this.migrationContext.GetThrottleQuery() != "" { - if res, _ := this.applier.ExecuteThrottleQuery(); res > 0 { + if thlr.migrationContext.GetThrottleQuery() != "" { + if res, _ := thlr.applier.ExecuteThrottleQuery(); res > 0 { return setThrottle(true, "throttle-query", base.NoThrottleReasonHint) } } @@ -444,43 +442,43 @@ func (this *Throttler) collectGeneralThrottleMetrics() error { // initiateThrottlerCollection initiates the various processes that collect measurements // that may affect throttling. There are several components, all running independently, // that collect such metrics. -func (this *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan<- bool) { - go this.collectReplicationLag(firstThrottlingCollected) - go this.collectControlReplicasLag() - go this.collectThrottleHTTPStatus(firstThrottlingCollected) +func (thlr *Throttler) initiateThrottlerCollection(firstThrottlingCollected chan<- bool) { + go thlr.collectReplicationLag(firstThrottlingCollected) + go thlr.collectControlReplicasLag() + go thlr.collectThrottleHTTPStatus(firstThrottlingCollected) go func() { - this.collectGeneralThrottleMetrics() + thlr.collectGeneralThrottleMetrics() firstThrottlingCollected <- true ticker := time.NewTicker(time.Second) defer ticker.Stop() for range ticker.C { - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&thlr.finishedMigrating) > 0 { return } - this.collectGeneralThrottleMetrics() + thlr.collectGeneralThrottleMetrics() } }() } // initiateThrottlerChecks initiates the throttle ticker and sets the basic behavior of throttling. -func (this *Throttler) initiateThrottlerChecks() { +func (thlr *Throttler) initiateThrottlerChecks() { throttlerFunction := func() { - alreadyThrottling, currentReason, _ := this.migrationContext.IsThrottled() - shouldThrottle, throttleReason, throttleReasonHint := this.shouldThrottle() + alreadyThrottling, currentReason, _ := thlr.migrationContext.IsThrottled() + shouldThrottle, throttleReason, throttleReasonHint := thlr.shouldThrottle() if shouldThrottle && !alreadyThrottling { // New throttling - this.applier.WriteAndLogChangelog("throttle", throttleReason) + thlr.applier.WriteAndLogChangelog("throttle", throttleReason) } else if shouldThrottle && alreadyThrottling && (currentReason != throttleReason) { // Change of reason - this.applier.WriteAndLogChangelog("throttle", throttleReason) + thlr.applier.WriteAndLogChangelog("throttle", throttleReason) } else if alreadyThrottling && !shouldThrottle { // End of throttling - this.applier.WriteAndLogChangelog("throttle", "done throttling") + thlr.applier.WriteAndLogChangelog("throttle", "done throttling") } - this.migrationContext.SetThrottled(shouldThrottle, throttleReason, throttleReasonHint) + thlr.migrationContext.SetThrottled(shouldThrottle, throttleReason, throttleReasonHint) } throttlerFunction() @@ -488,7 +486,7 @@ func (this *Throttler) initiateThrottlerChecks() { defer ticker.Stop() for { // Check for context cancellation each iteration - ctx := this.migrationContext.GetContext() + ctx := thlr.migrationContext.GetContext() select { case <-ctx.Done(): return @@ -496,7 +494,7 @@ func (this *Throttler) initiateThrottlerChecks() { // Process throttle check } - if atomic.LoadInt64(&this.finishedMigrating) > 0 { + if atomic.LoadInt64(&thlr.finishedMigrating) > 0 { return } throttlerFunction() @@ -505,11 +503,11 @@ func (this *Throttler) initiateThrottlerChecks() { // throttle sees if throttling needs take place, and if so, continuously sleeps (blocks) // until throttling reasons are gone -func (this *Throttler) throttle(onThrottled func()) { +func (thlr *Throttler) throttle(onThrottled func()) { for { // IsThrottled() is non-blocking; the throttling decision making takes place asynchronously. // Therefore calling IsThrottled() is cheap - if shouldThrottle, _, _ := this.migrationContext.IsThrottled(); !shouldThrottle { + if shouldThrottle, _, _ := thlr.migrationContext.IsThrottled(); !shouldThrottle { return } if onThrottled != nil { @@ -519,7 +517,7 @@ func (this *Throttler) throttle(onThrottled func()) { } } -func (this *Throttler) Teardown() { - this.migrationContext.Log.Debugf("Tearing down...") - atomic.StoreInt64(&this.finishedMigrating, 1) +func (thlr *Throttler) Teardown() { + thlr.migrationContext.Log.Debugf("Tearing down...") + atomic.StoreInt64(&thlr.finishedMigrating, 1) } diff --git a/go/mysql/binlog_file.go b/go/mysql/binlog_file.go index b9df215bf..a3cf5ad88 100644 --- a/go/mysql/binlog_file.go +++ b/go/mysql/binlog_file.go @@ -49,68 +49,68 @@ func ParseFileBinlogCoordinates(logFileLogPos string) (*FileBinlogCoordinates, e } // DisplayString returns a user-friendly string representation of these coordinates -func (this *FileBinlogCoordinates) DisplayString() string { - return fmt.Sprintf("%s:%d", this.LogFile, this.LogPos) +func (fbc *FileBinlogCoordinates) DisplayString() string { + return fmt.Sprintf("%s:%d", fbc.LogFile, fbc.LogPos) } // String returns a user-friendly string representation of these coordinates -func (this FileBinlogCoordinates) String() string { - return this.DisplayString() +func (fbc FileBinlogCoordinates) String() string { + return fbc.DisplayString() } // Equals tests equality of this coordinate and another one. -func (this *FileBinlogCoordinates) Equals(other BinlogCoordinates) bool { +func (fbc *FileBinlogCoordinates) Equals(other BinlogCoordinates) bool { coord, ok := other.(*FileBinlogCoordinates) if !ok || other == nil { return false } - return this.LogFile == coord.LogFile && this.LogPos == coord.LogPos + return fbc.LogFile == coord.LogFile && fbc.LogPos == coord.LogPos } // IsEmpty returns true if the log file is empty, unnamed -func (this *FileBinlogCoordinates) IsEmpty() bool { - return this.LogFile == "" +func (fbc *FileBinlogCoordinates) IsEmpty() bool { + return fbc.LogFile == "" } // SmallerThan returns true if this coordinate is strictly smaller than the other. -func (this *FileBinlogCoordinates) SmallerThan(other BinlogCoordinates) bool { +func (fbc *FileBinlogCoordinates) SmallerThan(other BinlogCoordinates) bool { coord, ok := other.(*FileBinlogCoordinates) if !ok || other == nil { return false } - fileNumberDist := this.FileNumberDistance(coord) + fileNumberDist := fbc.FileNumberDistance(coord) if fileNumberDist == 0 { - return this.LogPos < coord.LogPos + return fbc.LogPos < coord.LogPos } return fileNumberDist > 0 } // SmallerThanOrEquals returns true if this coordinate is the same or equal to the other one. -// We do NOT compare the type so we can not use this.Equals() -func (this *FileBinlogCoordinates) SmallerThanOrEquals(other BinlogCoordinates) bool { +// We do NOT compare the type so we can not use fbc.Equals() +func (fbc *FileBinlogCoordinates) SmallerThanOrEquals(other BinlogCoordinates) bool { coord, ok := other.(*FileBinlogCoordinates) if !ok || other == nil { return false } - if this.SmallerThan(other) { + if fbc.SmallerThan(other) { return true } - return this.LogFile == coord.LogFile && this.LogPos == coord.LogPos // No Type comparison + return fbc.LogFile == coord.LogFile && fbc.LogPos == coord.LogPos // No Type comparison } // FileNumberDistance returns the numeric distance between this coordinate's file number and the other's. // Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's" -func (this *FileBinlogCoordinates) FileNumberDistance(other *FileBinlogCoordinates) int { - thisNumber, _ := this.FileNumber() +func (fbc *FileBinlogCoordinates) FileNumberDistance(other *FileBinlogCoordinates) int { + fbcNumber, _ := fbc.FileNumber() otherNumber, _ := other.FileNumber() - return otherNumber - thisNumber + return otherNumber - fbcNumber } // FileNumber returns the numeric value of the file, and the length in characters representing the number in the filename. // Example: FileNumber() of mysqld.log.000789 is (789, 6) -func (this *FileBinlogCoordinates) FileNumber() (int, int) { - tokens := strings.Split(this.LogFile, ".") +func (fbc *FileBinlogCoordinates) FileNumber() (int, int) { + tokens := strings.Split(fbc.LogFile, ".") numPart := tokens[len(tokens)-1] numLen := len(numPart) fileNum, err := strconv.Atoi(numPart) @@ -121,55 +121,55 @@ func (this *FileBinlogCoordinates) FileNumber() (int, int) { } // PreviousFileCoordinatesBy guesses the filename of the previous binlog/relaylog, by given offset (number of files back) -func (this *FileBinlogCoordinates) PreviousFileCoordinatesBy(offset int) (BinlogCoordinates, error) { +func (fbc *FileBinlogCoordinates) PreviousFileCoordinatesBy(offset int) (BinlogCoordinates, error) { result := &FileBinlogCoordinates{} - fileNum, numLen := this.FileNumber() + fileNum, numLen := fbc.FileNumber() if fileNum == 0 { - return result, errors.New("Log file number is zero, cannot detect previous file") + return result, errors.New("log file number is zero, cannot detect previous file") } newNumStr := fmt.Sprintf("%d", (fileNum - offset)) newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr - tokens := strings.Split(this.LogFile, ".") + tokens := strings.Split(fbc.LogFile, ".") tokens[len(tokens)-1] = newNumStr result.LogFile = strings.Join(tokens, ".") return result, nil } // PreviousFileCoordinates guesses the filename of the previous binlog/relaylog -func (this *FileBinlogCoordinates) PreviousFileCoordinates() (BinlogCoordinates, error) { - return this.PreviousFileCoordinatesBy(1) +func (fbc *FileBinlogCoordinates) PreviousFileCoordinates() (BinlogCoordinates, error) { + return fbc.PreviousFileCoordinatesBy(1) } // PreviousFileCoordinates guesses the filename of the previous binlog/relaylog -func (this *FileBinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) { +func (fbc *FileBinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) { result := &FileBinlogCoordinates{} - fileNum, numLen := this.FileNumber() + fileNum, numLen := fbc.FileNumber() newNumStr := fmt.Sprintf("%d", (fileNum + 1)) newNumStr = strings.Repeat("0", numLen-len(newNumStr)) + newNumStr - tokens := strings.Split(this.LogFile, ".") + tokens := strings.Split(fbc.LogFile, ".") tokens[len(tokens)-1] = newNumStr result.LogFile = strings.Join(tokens, ".") return result, nil } // FileSmallerThan returns true if this coordinate's file is strictly smaller than the other's. -func (this *FileBinlogCoordinates) DetachedCoordinates() (isDetached bool, detachedLogFile string, detachedLogPos string) { - detachedCoordinatesSubmatch := detachPattern.FindStringSubmatch(this.LogFile) +func (fbc *FileBinlogCoordinates) DetachedCoordinates() (isDetached bool, detachedLogFile string, detachedLogPos string) { + detachedCoordinatesSubmatch := detachPattern.FindStringSubmatch(fbc.LogFile) if len(detachedCoordinatesSubmatch) == 0 { return false, "", "" } return true, detachedCoordinatesSubmatch[1], detachedCoordinatesSubmatch[2] } -func (this *FileBinlogCoordinates) Clone() BinlogCoordinates { +func (fbc *FileBinlogCoordinates) Clone() BinlogCoordinates { return &FileBinlogCoordinates{ - LogPos: this.LogPos, - LogFile: this.LogFile, - EventSize: this.EventSize, + LogPos: fbc.LogPos, + LogFile: fbc.LogFile, + EventSize: fbc.EventSize, } } @@ -178,7 +178,7 @@ func (this *FileBinlogCoordinates) Clone() BinlogCoordinates { // https://github.com/go-mysql-org/go-mysql/blob/master/replication/event.go // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_replication_binlog_event.html#sect_protocol_replication_binlog_event_header // Issue: https://github.com/github/gh-ost/issues/1366 -func (this *FileBinlogCoordinates) IsLogPosOverflowBeyond4Bytes(preCoordinate *FileBinlogCoordinates) bool { +func (fbc *FileBinlogCoordinates) IsLogPosOverflowBeyond4Bytes(preCoordinate *FileBinlogCoordinates) bool { if preCoordinate == nil { return false } @@ -186,11 +186,11 @@ func (this *FileBinlogCoordinates) IsLogPosOverflowBeyond4Bytes(preCoordinate *F return false } - if this.LogFile != preCoordinate.LogFile { + if fbc.LogFile != preCoordinate.LogFile { return false } - if preCoordinate.LogPos+this.EventSize >= 1<<32 { + if preCoordinate.LogPos+fbc.EventSize >= 1<<32 { // Unexpected rows event, the previous binlog log_pos + current binlog event_size is overflow 4 bytes return true } diff --git a/go/mysql/binlog_gtid.go b/go/mysql/binlog_gtid.go index d7b86c04f..edd027a73 100644 --- a/go/mysql/binlog_gtid.go +++ b/go/mysql/binlog_gtid.go @@ -24,21 +24,21 @@ func NewGTIDBinlogCoordinates(gtidSet string) (*GTIDBinlogCoordinates, error) { } // DisplayString returns a user-friendly string representation of these current UUID set or the full GTID set. -func (this *GTIDBinlogCoordinates) DisplayString() string { - if this.UUIDSet != nil { - return this.UUIDSet.String() +func (coord *GTIDBinlogCoordinates) DisplayString() string { + if coord.UUIDSet != nil { + return coord.UUIDSet.String() } - return this.String() + return coord.String() } // String returns a user-friendly string representation of these full GTID set. -func (this GTIDBinlogCoordinates) String() string { - return this.GTIDSet.String() +func (coord GTIDBinlogCoordinates) String() string { + return coord.GTIDSet.String() } // Equals tests equality of this coordinate and another one. -func (this *GTIDBinlogCoordinates) Equals(other BinlogCoordinates) bool { - if other == nil || this.IsEmpty() || other.IsEmpty() { +func (coord *GTIDBinlogCoordinates) Equals(other BinlogCoordinates) bool { + if other == nil || coord.IsEmpty() || other.IsEmpty() { return false } @@ -47,17 +47,17 @@ func (this *GTIDBinlogCoordinates) Equals(other BinlogCoordinates) bool { return false } - return this.GTIDSet.Equal(otherCoords.GTIDSet) + return coord.GTIDSet.Equal(otherCoords.GTIDSet) } // IsEmpty returns true if the GTID set is empty. -func (this *GTIDBinlogCoordinates) IsEmpty() bool { - return this.GTIDSet == nil +func (coord *GTIDBinlogCoordinates) IsEmpty() bool { + return coord.GTIDSet == nil } // SmallerThan returns true if this coordinate is strictly smaller than the other. -func (this *GTIDBinlogCoordinates) SmallerThan(other BinlogCoordinates) bool { - if other == nil || this.IsEmpty() || other.IsEmpty() { +func (coord *GTIDBinlogCoordinates) SmallerThan(other BinlogCoordinates) bool { + if other == nil || coord.IsEmpty() || other.IsEmpty() { return false } otherCoords, ok := other.(*GTIDBinlogCoordinates) @@ -65,23 +65,23 @@ func (this *GTIDBinlogCoordinates) SmallerThan(other BinlogCoordinates) bool { return false } - // if 'this' does not contain the same sets we assume we are behind 'other'. + // if 'coord' does not contain the same sets we assume we are behind 'other'. // there are probably edge cases where this isn't true - return !this.GTIDSet.Contain(otherCoords.GTIDSet) + return !coord.GTIDSet.Contain(otherCoords.GTIDSet) } // SmallerThanOrEquals returns true if this coordinate is the same or equal to the other one. -func (this *GTIDBinlogCoordinates) SmallerThanOrEquals(other BinlogCoordinates) bool { - return this.Equals(other) || this.SmallerThan(other) +func (coord *GTIDBinlogCoordinates) SmallerThanOrEquals(other BinlogCoordinates) bool { + return coord.Equals(other) || coord.SmallerThan(other) } -func (this *GTIDBinlogCoordinates) Clone() BinlogCoordinates { +func (coord *GTIDBinlogCoordinates) Clone() BinlogCoordinates { out := >IDBinlogCoordinates{} - if this.GTIDSet != nil { - out.GTIDSet = this.GTIDSet.Clone().(*gomysql.MysqlGTIDSet) + if coord.GTIDSet != nil { + out.GTIDSet = coord.GTIDSet.Clone().(*gomysql.MysqlGTIDSet) } - if this.UUIDSet != nil { - out.UUIDSet = this.UUIDSet.Clone() + if coord.UUIDSet != nil { + out.UUIDSet = coord.UUIDSet.Clone() } return out } diff --git a/go/mysql/connection.go b/go/mysql/connection.go index f728fc7fe..1bd961cc0 100644 --- a/go/mysql/connection.go +++ b/go/mysql/connection.go @@ -42,23 +42,23 @@ func NewConnectionConfig() *ConnectionConfig { } // DuplicateCredentials creates a new connection config with given key and with same credentials as this config -func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionConfig { +func (con *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionConfig { config := &ConnectionConfig{ Key: key, - User: this.User, - Password: this.Password, - tlsConfig: this.tlsConfig, - Timeout: this.Timeout, - TransactionIsolation: this.TransactionIsolation, - Charset: this.Charset, + User: con.User, + Password: con.Password, + tlsConfig: con.tlsConfig, + Timeout: con.Timeout, + TransactionIsolation: con.TransactionIsolation, + Charset: con.Charset, } - if this.tlsConfig != nil { + if con.tlsConfig != nil { config.tlsConfig = &tls.Config{ ServerName: key.Hostname, - Certificates: this.tlsConfig.Certificates, - RootCAs: this.tlsConfig.RootCAs, - InsecureSkipVerify: this.tlsConfig.InsecureSkipVerify, + Certificates: con.tlsConfig.Certificates, + RootCAs: con.tlsConfig.RootCAs, + InsecureSkipVerify: con.tlsConfig.InsecureSkipVerify, } } @@ -66,19 +66,19 @@ func (this *ConnectionConfig) DuplicateCredentials(key InstanceKey) *ConnectionC return config } -func (this *ConnectionConfig) Duplicate() *ConnectionConfig { - return this.DuplicateCredentials(this.Key) +func (con *ConnectionConfig) Duplicate() *ConnectionConfig { + return con.DuplicateCredentials(con.Key) } -func (this *ConnectionConfig) String() string { - return fmt.Sprintf("%s, user=%s, usingTLS=%t", this.Key.DisplayString(), this.User, this.tlsConfig != nil) +func (con *ConnectionConfig) String() string { + return fmt.Sprintf("%s, user=%s, usingTLS=%t", con.Key.DisplayString(), con.User, con.tlsConfig != nil) } -func (this *ConnectionConfig) Equals(other *ConnectionConfig) bool { - return this.Key.Equals(&other.Key) || this.ImpliedKey.Equals(other.ImpliedKey) +func (con *ConnectionConfig) Equals(other *ConnectionConfig) bool { + return con.Key.Equals(&other.Key) || con.ImpliedKey.Equals(other.ImpliedKey) } -func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clientKey string, allowInsecure bool) error { +func (con *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clientKey string, allowInsecure bool) error { var rootCertPool *x509.CertPool var certs []tls.Certificate var err error @@ -106,35 +106,35 @@ func (this *ConnectionConfig) UseTLS(caCertificatePath, clientCertificate, clien certs = []tls.Certificate{cert} } - this.tlsConfig = &tls.Config{ - ServerName: this.Key.Hostname, + con.tlsConfig = &tls.Config{ + ServerName: con.Key.Hostname, Certificates: certs, RootCAs: rootCertPool, InsecureSkipVerify: allowInsecure, } - return this.RegisterTLSConfig() + return con.RegisterTLSConfig() } -func (this *ConnectionConfig) RegisterTLSConfig() error { - if this.tlsConfig == nil { +func (con *ConnectionConfig) RegisterTLSConfig() error { + if con.tlsConfig == nil { return nil } - if this.tlsConfig.ServerName == "" { + if con.tlsConfig.ServerName == "" { return errors.New("tlsConfig.ServerName cannot be empty") } - var tlsOption = GetDBTLSConfigKey(this.tlsConfig.ServerName) + var tlsOption = GetDBTLSConfigKey(con.tlsConfig.ServerName) - return mysql.RegisterTLSConfig(tlsOption, this.tlsConfig) + return mysql.RegisterTLSConfig(tlsOption, con.tlsConfig) } -func (this *ConnectionConfig) TLSConfig() *tls.Config { - return this.tlsConfig +func (con *ConnectionConfig) TLSConfig() *tls.Config { + return con.tlsConfig } -func (this *ConnectionConfig) GetDBUri(databaseName string) string { - hostname := this.Key.Hostname +func (con *ConnectionConfig) GetDBUri(databaseName string) string { + hostname := con.Key.Hostname var ip = net.ParseIP(hostname) if (ip != nil) && (ip.To4() == nil) { // Wrap IPv6 literals in square brackets @@ -144,26 +144,26 @@ func (this *ConnectionConfig) GetDBUri(databaseName string) string { // go-mysql-driver defaults to false if tls param is not provided; explicitly setting here to // simplify construction of the DSN below. tlsOption := "false" - if this.tlsConfig != nil { - tlsOption = GetDBTLSConfigKey(this.tlsConfig.ServerName) + if con.tlsConfig != nil { + tlsOption = GetDBTLSConfigKey(con.tlsConfig.ServerName) } - if this.Charset == "" { - this.Charset = "utf8mb4,utf8,latin1" + if con.Charset == "" { + con.Charset = "utf8mb4,utf8,latin1" } connectionParams := []string{ "autocommit=true", "interpolateParams=true", - fmt.Sprintf("charset=%s", this.Charset), + fmt.Sprintf("charset=%s", con.Charset), fmt.Sprintf("tls=%s", tlsOption), - fmt.Sprintf("transaction_isolation=%q", this.TransactionIsolation), - fmt.Sprintf("timeout=%fs", this.Timeout), - fmt.Sprintf("readTimeout=%fs", this.Timeout), - fmt.Sprintf("writeTimeout=%fs", this.Timeout), + fmt.Sprintf("transaction_isolation=%q", con.TransactionIsolation), + fmt.Sprintf("timeout=%fs", con.Timeout), + fmt.Sprintf("readTimeout=%fs", con.Timeout), + fmt.Sprintf("writeTimeout=%fs", con.Timeout), } - return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?%s", this.User, this.Password, hostname, this.Key.Port, databaseName, strings.Join(connectionParams, "&")) + return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?%s", con.User, con.Password, hostname, con.Key.Port, databaseName, strings.Join(connectionParams, "&")) } func GetDBTLSConfigKey(tlsServerName string) string { diff --git a/go/mysql/instance_key.go b/go/mysql/instance_key.go index 3d2bff114..a321ed9e0 100644 --- a/go/mysql/instance_key.go +++ b/go/mysql/instance_key.go @@ -20,7 +20,7 @@ var ( ipv4HostRegexp = regexp.MustCompile("^([^:]+)$") // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308 - ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") //nolint:gosimple + ipv6HostPortRegexp = regexp.MustCompile(`^\[([0-9a-fA-F:]+)\]:(\d{1,5})$`) //nolint:gosimple // e.g. 2001:db8:1f70::999:de8:7648:6e8 ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") ) @@ -47,13 +47,13 @@ func NewRawInstanceKey(hostPort string) (*InstanceKey, error) { } else if submatch := ipv6HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 { hostname = submatch[1] } else { - return nil, fmt.Errorf("Cannot parse address: %s", hostPort) + return nil, fmt.Errorf("cannot parse address: %s", hostPort) } instanceKey := &InstanceKey{Hostname: hostname, Port: DefaultInstancePort} if port != "" { var err error if instanceKey.Port, err = strconv.Atoi(port); err != nil { - return instanceKey, fmt.Errorf("Invalid port: %s", port) + return instanceKey, fmt.Errorf("invalid port: %s", port) } } @@ -67,68 +67,68 @@ func ParseInstanceKey(hostPort string) (*InstanceKey, error) { } // Equals tests equality between this key and another key -func (this *InstanceKey) Equals(other *InstanceKey) bool { +func (ik *InstanceKey) Equals(other *InstanceKey) bool { if other == nil { return false } - return this.Hostname == other.Hostname && this.Port == other.Port + return ik.Hostname == other.Hostname && ik.Port == other.Port } // SmallerThan returns true if this key is dictionary-smaller than another. // This is used for consistent sorting/ordering; there's nothing magical about it. -func (this *InstanceKey) SmallerThan(other *InstanceKey) bool { - if this.Hostname < other.Hostname { +func (ik *InstanceKey) SmallerThan(other *InstanceKey) bool { + if ik.Hostname < other.Hostname { return true } - if this.Hostname == other.Hostname && this.Port < other.Port { + if ik.Hostname == other.Hostname && ik.Port < other.Port { return true } return false } // IsDetached returns 'true' when this hostname is logically "detached" -func (this *InstanceKey) IsDetached() bool { - return strings.HasPrefix(this.Hostname, detachHint) +func (ik *InstanceKey) IsDetached() bool { + return strings.HasPrefix(ik.Hostname, detachHint) } // IsValid uses simple heuristics to see whether this key represents an actual instance -func (this *InstanceKey) IsValid() bool { - if this.Hostname == "_" { +func (ik *InstanceKey) IsValid() bool { + if ik.Hostname == "_" { return false } - if this.IsDetached() { + if ik.IsDetached() { return false } - return len(this.Hostname) > 0 && this.Port > 0 + return len(ik.Hostname) > 0 && ik.Port > 0 } // DetachedKey returns an instance key whose hostname is detached: invalid, but recoverable -func (this *InstanceKey) DetachedKey() *InstanceKey { - if this.IsDetached() { - return this +func (ik *InstanceKey) DetachedKey() *InstanceKey { + if ik.IsDetached() { + return ik } - return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, this.Hostname), Port: this.Port} + return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, ik.Hostname), Port: ik.Port} } // ReattachedKey returns an instance key whose hostname is detached: invalid, but recoverable -func (this *InstanceKey) ReattachedKey() *InstanceKey { - if !this.IsDetached() { - return this +func (ik *InstanceKey) ReattachedKey() *InstanceKey { + if !ik.IsDetached() { + return ik } - return &InstanceKey{Hostname: this.Hostname[len(detachHint):], Port: this.Port} + return &InstanceKey{Hostname: ik.Hostname[len(detachHint):], Port: ik.Port} } // StringCode returns an official string representation of this key -func (this *InstanceKey) StringCode() string { - return fmt.Sprintf("%s:%d", this.Hostname, this.Port) +func (ik *InstanceKey) StringCode() string { + return fmt.Sprintf("%s:%d", ik.Hostname, ik.Port) } // DisplayString returns a user-friendly string representation of this key -func (this *InstanceKey) DisplayString() string { - return this.StringCode() +func (ik *InstanceKey) DisplayString() string { + return ik.StringCode() } // String returns a user-friendly string representation of this key -func (this InstanceKey) String() string { - return this.StringCode() +func (ik InstanceKey) String() string { + return ik.StringCode() } diff --git a/go/mysql/instance_key_map.go b/go/mysql/instance_key_map.go index 1065fb935..d38287145 100644 --- a/go/mysql/instance_key_map.go +++ b/go/mysql/instance_key_map.go @@ -17,76 +17,76 @@ func NewInstanceKeyMap() *InstanceKeyMap { return &InstanceKeyMap{} } -func (this *InstanceKeyMap) Len() int { - return len(*this) +func (ikm *InstanceKeyMap) Len() int { + return len(*ikm) } // AddKey adds a single key to this map -func (this *InstanceKeyMap) AddKey(key InstanceKey) { - (*this)[key] = true +func (ikm *InstanceKeyMap) AddKey(key InstanceKey) { + (*ikm)[key] = true } // AddKeys adds all given keys to this map -func (this *InstanceKeyMap) AddKeys(keys []InstanceKey) { +func (ikm *InstanceKeyMap) AddKeys(keys []InstanceKey) { for _, key := range keys { - this.AddKey(key) + ikm.AddKey(key) } } // HasKey checks if given key is within the map -func (this *InstanceKeyMap) HasKey(key InstanceKey) bool { - _, ok := (*this)[key] +func (ikm *InstanceKeyMap) HasKey(key InstanceKey) bool { + _, ok := (*ikm)[key] return ok } // GetInstanceKeys returns keys in this map in the form of an array -func (this *InstanceKeyMap) GetInstanceKeys() []InstanceKey { +func (ikm *InstanceKeyMap) GetInstanceKeys() []InstanceKey { res := []InstanceKey{} - for key := range *this { + for key := range *ikm { res = append(res, key) } return res } // MarshalJSON will marshal this map as JSON -func (this *InstanceKeyMap) MarshalJSON() ([]byte, error) { - return json.Marshal(this.GetInstanceKeys()) +func (ikm *InstanceKeyMap) MarshalJSON() ([]byte, error) { + return json.Marshal(ikm.GetInstanceKeys()) } // ToJSON will marshal this map as JSON -func (this *InstanceKeyMap) ToJSON() (string, error) { - bytes, err := this.MarshalJSON() +func (ikm *InstanceKeyMap) ToJSON() (string, error) { + bytes, err := ikm.MarshalJSON() return string(bytes), err } // ToJSONString will marshal this map as JSON -func (this *InstanceKeyMap) ToJSONString() string { - s, _ := this.ToJSON() +func (ikm *InstanceKeyMap) ToJSONString() string { + s, _ := ikm.ToJSON() return s } // ToCommaDelimitedList will export this map in comma delimited format -func (this *InstanceKeyMap) ToCommaDelimitedList() string { +func (ikm *InstanceKeyMap) ToCommaDelimitedList() string { keyDisplays := []string{} - for key := range *this { + for key := range *ikm { keyDisplays = append(keyDisplays, key.DisplayString()) } return strings.Join(keyDisplays, ",") } // ReadJson unmarshalls a json into this map -func (this *InstanceKeyMap) ReadJson(jsonString string) error { +func (ikm *InstanceKeyMap) ReadJson(jsonString string) error { var keys []InstanceKey err := json.Unmarshal([]byte(jsonString), &keys) if err != nil { return err } - this.AddKeys(keys) + ikm.AddKeys(keys) return err } // ReadJson unmarshalls a json into this map -func (this *InstanceKeyMap) ReadCommaDelimitedList(list string) error { +func (ikm *InstanceKeyMap) ReadCommaDelimitedList(list string) error { if list == "" { return nil } @@ -96,7 +96,7 @@ func (this *InstanceKeyMap) ReadCommaDelimitedList(list string) error { if err != nil { return err } - this.AddKey(*key) + ikm.AddKey(*key) } return nil } diff --git a/go/mysql/utils.go b/go/mysql/utils.go index 9619e1e9f..654cb099e 100644 --- a/go/mysql/utils.go +++ b/go/mysql/utils.go @@ -40,8 +40,8 @@ func NewNoReplicationLagResult() *ReplicationLagResult { return &ReplicationLagResult{Lag: 0, Err: nil} } -func (this *ReplicationLagResult) HasLag() bool { - return this.Lag > 0 +func (rlg *ReplicationLagResult) HasLag() bool { + return rlg.Lag > 0 } // knownDBs is a DB cache by uri @@ -110,7 +110,7 @@ func GetMasterKeyFromSlaveStatus(dbVersion string, connectionConfig *ConnectionC slaveSQLRunning := rowMap.GetString(sqlRunningTerm) if slaveIORunning != "Yes" || slaveSQLRunning != "Yes" { - return fmt.Errorf("Replication on %+v is broken: %s: %s, %s: %s. Please make sure replication runs before using gh-ost.", + return fmt.Errorf("replication on %+v is broken: %s: %s, %s: %s. Please make sure replication runs before using gh-ost", connectionConfig.Key, ioRunningTerm, slaveIORunning, @@ -153,7 +153,7 @@ func GetMasterConnectionConfigSafe(dbVersion string, connectionConfig *Connectio if allowMasterMaster { return connectionConfig, nil } - return nil, fmt.Errorf("There seems to be a master-master setup at %+v. This is unsupported. Bailing out", masterConfig.Key) + return nil, fmt.Errorf("there seems to be a master-master setup at %+v. This is unsupported. Bailing out", masterConfig.Key) } visitedKeys.AddKey(masterConfig.Key) return GetMasterConnectionConfigSafe(dbVersion, masterConfig, visitedKeys, allowMasterMaster) @@ -232,7 +232,7 @@ func GetTableColumns(db *gosql.DB, databaseName, tableName string) (*sql.ColumnL return nil, nil, err } if len(columnNames) == 0 { - return nil, nil, log.Errorf("Found 0 columns on %s.%s. Bailing out", + return nil, nil, log.Errorf("found 0 columns on %s.%s. Bailing out", sql.EscapeName(databaseName), sql.EscapeName(tableName), ) diff --git a/go/sql/builder.go b/go/sql/builder.go index 940ca4ca3..6e41eb4e1 100644 --- a/go/sql/builder.go +++ b/go/sql/builder.go @@ -82,10 +82,10 @@ func duplicateNames(names []string) []string { func BuildValueComparison(column string, value string, comparisonSign ValueComparisonSign) (result string, err error) { if column == "" { - return "", fmt.Errorf("Empty column in GetValueComparison") + return "", fmt.Errorf("empty column in BuildValueComparison") } if value == "" { - return "", fmt.Errorf("Empty value in GetValueComparison") + return "", fmt.Errorf("empty value in BuildValueComparison") } comparison := fmt.Sprintf("(%s %s %s)", EscapeName(column), string(comparisonSign), value) return comparison, err @@ -93,10 +93,10 @@ func BuildValueComparison(column string, value string, comparisonSign ValueCompa func BuildEqualsComparison(columns []string, values []string) (result string, err error) { if len(columns) == 0 { - return "", fmt.Errorf("Got 0 columns in GetEqualsComparison") + return "", fmt.Errorf("got 0 columns in BuildEqualsComparison") } if len(columns) != len(values) { - return "", fmt.Errorf("Got %d columns but %d values in GetEqualsComparison", len(columns), len(values)) + return "", fmt.Errorf("got %d columns but %d values in BuildEqualsComparison", len(columns), len(values)) } comparisons := []string{} for i, column := range columns { @@ -125,7 +125,7 @@ type CheckpointInsertQueryBuilder struct { func NewCheckpointQueryBuilder(databaseName, tableName string, uniqueKeyColumns *ColumnList) (*CheckpointInsertQueryBuilder, error) { if uniqueKeyColumns.Len() == 0 { - return nil, fmt.Errorf("Got 0 columns in BuildSetCheckpointInsertQuery") + return nil, fmt.Errorf("got 0 columns in BuildSetCheckpointInsertQuery") } values := buildColumnsPreparedValues(uniqueKeyColumns) minUniqueColNames := []string{} @@ -181,7 +181,7 @@ func (b *CheckpointInsertQueryBuilder) BuildQuery(uniqueKeyArgs []interface{}) ( func BuildSetPreparedClause(columns *ColumnList) (result string, err error) { if columns.Len() == 0 { - return "", fmt.Errorf("Got 0 columns in BuildSetPreparedClause") + return "", fmt.Errorf("got 0 columns in BuildSetPreparedClause") } setTokens := []string{} for _, column := range columns.Columns() { @@ -202,13 +202,13 @@ func BuildSetPreparedClause(columns *ColumnList) (result string, err error) { func BuildRangeComparison(columns []string, values []string, args []interface{}, comparisonSign ValueComparisonSign) (result string, explodedArgs []interface{}, err error) { if len(columns) == 0 { - return "", explodedArgs, fmt.Errorf("Got 0 columns in GetRangeComparison") + return "", explodedArgs, fmt.Errorf("got 0 columns in BuildRangeComparison") } if len(columns) != len(values) { - return "", explodedArgs, fmt.Errorf("Got %d columns but %d values in GetEqualsComparison", len(columns), len(values)) + return "", explodedArgs, fmt.Errorf("got %d columns but %d values in BuildRangeComparison", len(columns), len(values)) } if len(columns) != len(args) { - return "", explodedArgs, fmt.Errorf("Got %d columns but %d args in GetEqualsComparison", len(columns), len(args)) + return "", explodedArgs, fmt.Errorf("got %d columns but %d args in BuildRangeComparison", len(columns), len(args)) } includeEquals := false if comparisonSign == LessThanOrEqualsComparisonSign { @@ -262,7 +262,7 @@ func BuildRangePreparedComparison(columns *ColumnList, args []interface{}, compa func BuildRangeInsertQuery(databaseName, originalTableName, ghostTableName string, sharedColumns []string, mappedSharedColumns []string, uniqueKey string, uniqueKeyColumns *ColumnList, rangeStartValues, rangeEndValues []string, rangeStartArgs, rangeEndArgs []interface{}, includeRangeStartValues bool, transactionalTable bool, noWait bool) (result string, explodedArgs []interface{}, err error) { if len(sharedColumns) == 0 { - return "", explodedArgs, fmt.Errorf("Got 0 shared columns in BuildRangeInsertQuery") + return "", explodedArgs, fmt.Errorf("got 0 shared columns in BuildRangeInsertQuery") } databaseName = EscapeName(databaseName) originalTableName = EscapeName(originalTableName) @@ -281,7 +281,7 @@ func BuildRangeInsertQuery(databaseName, originalTableName, ghostTableName strin sharedColumnsListing := strings.Join(sharedColumns, ", ") uniqueKey = EscapeName(uniqueKey) - var minRangeComparisonSign ValueComparisonSign = GreaterThanComparisonSign + var minRangeComparisonSign = GreaterThanComparisonSign if includeRangeStartValues { minRangeComparisonSign = GreaterThanOrEqualsComparisonSign } @@ -331,12 +331,12 @@ func BuildRangeInsertPreparedQuery(databaseName, originalTableName, ghostTableNa func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string, uniqueKeyColumns *ColumnList, rangeStartArgs, rangeEndArgs []interface{}, chunkSize int64, includeRangeStartValues bool, hint string) (result string, explodedArgs []interface{}, err error) { if uniqueKeyColumns.Len() == 0 { - return "", explodedArgs, fmt.Errorf("Got 0 columns in BuildUniqueKeyRangeEndPreparedQuery") + return "", explodedArgs, fmt.Errorf("got 0 columns in BuildUniqueKeyRangeEndPreparedQuery") } databaseName = EscapeName(databaseName) tableName = EscapeName(tableName) - var startRangeComparisonSign ValueComparisonSign = GreaterThanComparisonSign + var startRangeComparisonSign = GreaterThanComparisonSign if includeRangeStartValues { startRangeComparisonSign = GreaterThanOrEqualsComparisonSign } @@ -384,12 +384,12 @@ func BuildUniqueKeyRangeEndPreparedQueryViaOffset(databaseName, tableName string func BuildUniqueKeyRangeEndPreparedQueryViaTemptable(databaseName, tableName string, uniqueKeyColumns *ColumnList, rangeStartArgs, rangeEndArgs []interface{}, chunkSize int64, includeRangeStartValues bool, hint string) (result string, explodedArgs []interface{}, err error) { if uniqueKeyColumns.Len() == 0 { - return "", explodedArgs, fmt.Errorf("Got 0 columns in BuildUniqueKeyRangeEndPreparedQuery") + return "", explodedArgs, fmt.Errorf("got 0 columns in BuildUniqueKeyRangeEndPreparedQuery") } databaseName = EscapeName(databaseName) tableName = EscapeName(tableName) - var startRangeComparisonSign ValueComparisonSign = GreaterThanComparisonSign + var startRangeComparisonSign = GreaterThanComparisonSign if includeRangeStartValues { startRangeComparisonSign = GreaterThanOrEqualsComparisonSign } @@ -452,7 +452,7 @@ func BuildUniqueKeyMaxValuesPreparedQuery(databaseName, tableName string, unique func buildUniqueKeyMinMaxValuesPreparedQuery(databaseName, tableName string, uniqueKey *UniqueKey, order string) (string, error) { if uniqueKey.Columns.Len() == 0 { - return "", fmt.Errorf("Got 0 columns in BuildUniqueKeyMinMaxValuesPreparedQuery") + return "", fmt.Errorf("got 0 columns in BuildUniqueKeyMinMaxValuesPreparedQuery") } databaseName = EscapeName(databaseName) tableName = EscapeName(tableName) diff --git a/go/sql/builder_test.go b/go/sql/builder_test.go index 7f80005b0..0d10b75e7 100644 --- a/go/sql/builder_test.go +++ b/go/sql/builder_test.go @@ -24,7 +24,7 @@ func init() { } func normalizeQuery(name string) string { - name = strings.Replace(name, "`", "", -1) + name = strings.ReplaceAll(name, "`", "") name = spacesRegexp.ReplaceAllString(name, " ") name = strings.TrimSpace(name) return name diff --git a/go/sql/parser.go b/go/sql/parser.go index 2ddc60f50..72e44ea62 100644 --- a/go/sql/parser.go +++ b/go/sql/parser.go @@ -62,7 +62,7 @@ func NewParserFromAlterStatement(alterStatement string) *AlterTableParser { return parser } -func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tokens []string) { +func (atp *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tokens []string) { terminatingQuote := rune(0) f := func(c rune) bool { switch { @@ -89,13 +89,13 @@ func (this *AlterTableParser) tokenizeAlterStatement(alterStatement string) (tok return tokens } -func (this *AlterTableParser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) { +func (atp *AlterTableParser) sanitizeQuotesFromAlterStatement(alterStatement string) (strippedStatement string) { strippedStatement = alterStatement strippedStatement = sanitizeQuotesRegexp.ReplaceAllString(strippedStatement, "''") return strippedStatement } -func (this *AlterTableParser) parseAlterToken(alterToken string) { +func (atp *AlterTableParser) parseAlterToken(alterToken string) { { // rename allStringSubmatch := renameColumnRegexp.FindAllStringSubmatch(alterToken, -1) @@ -106,7 +106,7 @@ func (this *AlterTableParser) parseAlterToken(alterToken string) { if unquoted, err := strconv.Unquote(submatch[3]); err == nil { submatch[3] = unquoted } - this.columnRenameMap[submatch[2]] = submatch[3] + atp.columnRenameMap[submatch[2]] = submatch[3] } } { @@ -116,51 +116,51 @@ func (this *AlterTableParser) parseAlterToken(alterToken string) { if unquoted, err := strconv.Unquote(submatch[2]); err == nil { submatch[2] = unquoted } - this.droppedColumns[submatch[2]] = true + atp.droppedColumns[submatch[2]] = true } } { // rename table if renameTableRegexp.MatchString(alterToken) { - this.isRenameTable = true + atp.isRenameTable = true } } { // auto_increment if autoIncrementRegexp.MatchString(alterToken) { - this.isAutoIncrementDefined = true + atp.isAutoIncrementDefined = true } } } -func (this *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) { - this.alterStatementOptions = alterStatement +func (atp *AlterTableParser) ParseAlterStatement(alterStatement string) (err error) { + atp.alterStatementOptions = alterStatement for _, alterTableRegexp := range alterTableExplicitSchemaTableRegexps { - if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 { - this.explicitSchema = submatch[1] - this.explicitTable = submatch[2] - this.alterStatementOptions = submatch[3] + if submatch := alterTableRegexp.FindStringSubmatch(atp.alterStatementOptions); len(submatch) > 0 { + atp.explicitSchema = submatch[1] + atp.explicitTable = submatch[2] + atp.alterStatementOptions = submatch[3] break } } for _, alterTableRegexp := range alterTableExplicitTableRegexps { - if submatch := alterTableRegexp.FindStringSubmatch(this.alterStatementOptions); len(submatch) > 0 { - this.explicitTable = submatch[1] - this.alterStatementOptions = submatch[2] + if submatch := alterTableRegexp.FindStringSubmatch(atp.alterStatementOptions); len(submatch) > 0 { + atp.explicitTable = submatch[1] + atp.alterStatementOptions = submatch[2] break } } - for _, alterToken := range this.tokenizeAlterStatement(this.alterStatementOptions) { - alterToken = this.sanitizeQuotesFromAlterStatement(alterToken) - this.parseAlterToken(alterToken) - this.alterTokens = append(this.alterTokens, alterToken) + for _, alterToken := range atp.tokenizeAlterStatement(atp.alterStatementOptions) { + alterToken = atp.sanitizeQuotesFromAlterStatement(alterToken) + atp.parseAlterToken(alterToken) + atp.alterTokens = append(atp.alterTokens, alterToken) } return nil } -func (this *AlterTableParser) GetNonTrivialRenames() map[string]string { +func (atp *AlterTableParser) GetNonTrivialRenames() map[string]string { result := make(map[string]string) - for column, renamed := range this.columnRenameMap { + for column, renamed := range atp.columnRenameMap { if column != renamed { result[column] = renamed } @@ -168,40 +168,40 @@ func (this *AlterTableParser) GetNonTrivialRenames() map[string]string { return result } -func (this *AlterTableParser) HasNonTrivialRenames() bool { - return len(this.GetNonTrivialRenames()) > 0 +func (atp *AlterTableParser) HasNonTrivialRenames() bool { + return len(atp.GetNonTrivialRenames()) > 0 } -func (this *AlterTableParser) DroppedColumnsMap() map[string]bool { - return this.droppedColumns +func (atp *AlterTableParser) DroppedColumnsMap() map[string]bool { + return atp.droppedColumns } -func (this *AlterTableParser) IsRenameTable() bool { - return this.isRenameTable +func (atp *AlterTableParser) IsRenameTable() bool { + return atp.isRenameTable } -func (this *AlterTableParser) IsAutoIncrementDefined() bool { - return this.isAutoIncrementDefined +func (atp *AlterTableParser) IsAutoIncrementDefined() bool { + return atp.isAutoIncrementDefined } -func (this *AlterTableParser) GetExplicitSchema() string { - return this.explicitSchema +func (atp *AlterTableParser) GetExplicitSchema() string { + return atp.explicitSchema } -func (this *AlterTableParser) HasExplicitSchema() bool { - return this.GetExplicitSchema() != "" +func (atp *AlterTableParser) HasExplicitSchema() bool { + return atp.GetExplicitSchema() != "" } -func (this *AlterTableParser) GetExplicitTable() string { - return this.explicitTable +func (atp *AlterTableParser) GetExplicitTable() string { + return atp.explicitTable } -func (this *AlterTableParser) HasExplicitTable() bool { - return this.GetExplicitTable() != "" +func (atp *AlterTableParser) HasExplicitTable() bool { + return atp.GetExplicitTable() != "" } -func (this *AlterTableParser) GetAlterStatementOptions() string { - return this.alterStatementOptions +func (atp *AlterTableParser) GetAlterStatementOptions() string { + return atp.alterStatementOptions } func ParseEnumValues(enumColumnType string) string { diff --git a/go/sql/types.go b/go/sql/types.go index 1a8f8a2e2..3f7cd1b78 100644 --- a/go/sql/types.go +++ b/go/sql/types.go @@ -57,7 +57,7 @@ type Column struct { MySQLType string } -func (this *Column) convertArg(arg interface{}) interface{} { +func (cl *Column) convertArg(arg interface{}) interface{} { var arg2Bytes []byte if s, ok := arg.(string); ok { arg2Bytes = []byte(s) @@ -68,9 +68,9 @@ func (this *Column) convertArg(arg interface{}) interface{} { } if arg2Bytes != nil { - if this.Charset != "" && this.charsetConversion == nil { + if cl.Charset != "" && cl.charsetConversion == nil { arg = arg2Bytes - } else if this.Charset == "" && (strings.Contains(this.MySQLType, "binary") || strings.HasSuffix(this.MySQLType, "blob")) { + } else if cl.Charset == "" && (strings.Contains(cl.MySQLType, "binary") || strings.HasSuffix(cl.MySQLType, "blob")) { // varbinary/binary/blob column: no charset means binary storage. Return []byte so // the MySQL driver sends MYSQL_TYPE_BLOB (binary) rather than MYSQL_TYPE_VAR_STRING // (text with the connection's charset/collation metadata, often utf8mb4), which would @@ -78,17 +78,17 @@ func (this *Column) convertArg(arg interface{}) interface{} { // invalid in that charset. arg = arg2Bytes } else { - if encoding, ok := charsetEncodingMap[this.Charset]; ok { + if encoding, ok := charsetEncodingMap[cl.Charset]; ok { decodedBytes, _ := encoding.NewDecoder().Bytes(arg2Bytes) arg = string(decodedBytes) } } - if this.Type == BinaryColumnType { + if cl.Type == BinaryColumnType { size := len(arg2Bytes) - if uint(size) < this.BinaryOctetLength { + if uint(size) < cl.BinaryOctetLength { buf := bytes.NewBuffer(arg2Bytes) - for i := uint(0); i < (this.BinaryOctetLength - uint(size)); i++ { + for i := uint(0); i < (cl.BinaryOctetLength - uint(size)); i++ { buf.Write([]byte{0}) } arg = buf.Bytes() @@ -98,7 +98,7 @@ func (this *Column) convertArg(arg interface{}) interface{} { return arg } - if this.IsUnsigned { + if cl.IsUnsigned { if i, ok := arg.(int8); ok { return uint8(i) } @@ -106,7 +106,7 @@ func (this *Column) convertArg(arg interface{}) interface{} { return uint16(i) } if i, ok := arg.(int32); ok { - if this.Type == MediumIntColumnType { + if cl.Type == MediumIntColumnType { // problem with mediumint is that it's a 3-byte type. There is no compatible golang type to match that. // So to convert from negative to positive we'd need to convert the value manually if i >= 0 { @@ -179,85 +179,85 @@ func ParseColumnList(names string) *ColumnList { return result } -func (this *ColumnList) Columns() []Column { - return this.columns +func (cl *ColumnList) Columns() []Column { + return cl.columns } -func (this *ColumnList) Names() []string { - names := make([]string, len(this.columns)) - for i := range this.columns { - names[i] = this.columns[i].Name +func (cl *ColumnList) Names() []string { + names := make([]string, len(cl.columns)) + for i := range cl.columns { + names[i] = cl.columns[i].Name } return names } -func (this *ColumnList) GetColumn(columnName string) *Column { - if ordinal, ok := this.Ordinals[columnName]; ok { - return &this.columns[ordinal] +func (cl *ColumnList) GetColumn(columnName string) *Column { + if ordinal, ok := cl.Ordinals[columnName]; ok { + return &cl.columns[ordinal] } return nil } -func (this *ColumnList) SetUnsigned(columnName string) { - this.GetColumn(columnName).IsUnsigned = true +func (cl *ColumnList) SetUnsigned(columnName string) { + cl.GetColumn(columnName).IsUnsigned = true } -func (this *ColumnList) IsUnsigned(columnName string) bool { - return this.GetColumn(columnName).IsUnsigned +func (cl *ColumnList) IsUnsigned(columnName string) bool { + return cl.GetColumn(columnName).IsUnsigned } -func (this *ColumnList) SetCharset(columnName string, charset string) { - this.GetColumn(columnName).Charset = charset +func (cl *ColumnList) SetCharset(columnName string, charset string) { + cl.GetColumn(columnName).Charset = charset } -func (this *ColumnList) GetCharset(columnName string) string { - return this.GetColumn(columnName).Charset +func (cl *ColumnList) GetCharset(columnName string) string { + return cl.GetColumn(columnName).Charset } -func (this *ColumnList) SetColumnType(columnName string, columnType ColumnType) { - this.GetColumn(columnName).Type = columnType +func (cl *ColumnList) SetColumnType(columnName string, columnType ColumnType) { + cl.GetColumn(columnName).Type = columnType } -func (this *ColumnList) GetColumnType(columnName string) ColumnType { - return this.GetColumn(columnName).Type +func (cl *ColumnList) GetColumnType(columnName string) ColumnType { + return cl.GetColumn(columnName).Type } -func (this *ColumnList) SetConvertDatetimeToTimestamp(columnName string, toTimezone string) { - this.GetColumn(columnName).timezoneConversion = &TimezoneConversion{ToTimezone: toTimezone} +func (cl *ColumnList) SetConvertDatetimeToTimestamp(columnName string, toTimezone string) { + cl.GetColumn(columnName).timezoneConversion = &TimezoneConversion{ToTimezone: toTimezone} } -func (this *ColumnList) HasTimezoneConversion(columnName string) bool { - return this.GetColumn(columnName).timezoneConversion != nil +func (cl *ColumnList) HasTimezoneConversion(columnName string) bool { + return cl.GetColumn(columnName).timezoneConversion != nil } -func (this *ColumnList) SetEnumToTextConversion(columnName string) { - this.GetColumn(columnName).enumToTextConversion = true +func (cl *ColumnList) SetEnumToTextConversion(columnName string) { + cl.GetColumn(columnName).enumToTextConversion = true } -func (this *ColumnList) IsEnumToTextConversion(columnName string) bool { - return this.GetColumn(columnName).enumToTextConversion +func (cl *ColumnList) IsEnumToTextConversion(columnName string) bool { + return cl.GetColumn(columnName).enumToTextConversion } -func (this *ColumnList) SetEnumValues(columnName string, enumValues string) { - this.GetColumn(columnName).EnumValues = enumValues +func (cl *ColumnList) SetEnumValues(columnName string, enumValues string) { + cl.GetColumn(columnName).EnumValues = enumValues } -func (this *ColumnList) String() string { - return strings.Join(this.Names(), ",") +func (cl *ColumnList) String() string { + return strings.Join(cl.Names(), ",") } -func (this *ColumnList) Equals(other *ColumnList) bool { - return reflect.DeepEqual(this.Columns, other.Columns) +func (cl *ColumnList) Equals(other *ColumnList) bool { + return reflect.DeepEqual(cl.Columns, other.Columns) } -func (this *ColumnList) EqualsByNames(other *ColumnList) bool { - return reflect.DeepEqual(this.Names(), other.Names()) +func (cl *ColumnList) EqualsByNames(other *ColumnList) bool { + return reflect.DeepEqual(cl.Names(), other.Names()) } -// IsSubsetOf returns 'true' when column names of this list are a subset of +// IsSubsetOf returns 'true' when column names of cl list are a subset of // another list, in arbitrary order (order agnostic) -func (this *ColumnList) IsSubsetOf(other *ColumnList) bool { - for _, column := range this.columns { +func (cl *ColumnList) IsSubsetOf(other *ColumnList) bool { + for _, column := range cl.columns { if _, exists := other.Ordinals[column.Name]; !exists { return false } @@ -265,22 +265,22 @@ func (this *ColumnList) IsSubsetOf(other *ColumnList) bool { return true } -func (this *ColumnList) FilterBy(f func(Column) bool) *ColumnList { - filteredCols := make([]Column, 0, len(this.columns)) - for _, column := range this.columns { +func (cl *ColumnList) FilterBy(f func(Column) bool) *ColumnList { + filteredCols := make([]Column, 0, len(cl.columns)) + for _, column := range cl.columns { if f(column) { filteredCols = append(filteredCols, column) } } - return &ColumnList{Ordinals: this.Ordinals, columns: filteredCols} + return &ColumnList{Ordinals: cl.Ordinals, columns: filteredCols} } -func (this *ColumnList) Len() int { - return len(this.columns) +func (cl *ColumnList) Len() int { + return len(cl.columns) } -func (this *ColumnList) SetCharsetConversion(columnName string, fromCharset string, toCharset string) { - this.GetColumn(columnName).charsetConversion = &CharacterSetConversion{FromCharset: fromCharset, ToCharset: toCharset} +func (cl *ColumnList) SetCharsetConversion(columnName string, fromCharset string, toCharset string) { + cl.GetColumn(columnName).charsetConversion = &CharacterSetConversion{FromCharset: fromCharset, ToCharset: toCharset} } // UniqueKey is the combination of a key's name and columns @@ -293,20 +293,20 @@ type UniqueKey struct { } // IsPrimary checks if this unique key is primary -func (this *UniqueKey) IsPrimary() bool { - return this.Name == "PRIMARY" +func (uk *UniqueKey) IsPrimary() bool { + return uk.Name == "PRIMARY" } -func (this *UniqueKey) Len() int { - return this.Columns.Len() +func (uk *UniqueKey) Len() int { + return uk.Columns.Len() } -func (this *UniqueKey) String() string { - description := this.Name - if this.IsAutoIncrement { +func (uk *UniqueKey) String() string { + description := uk.Name + if uk.IsAutoIncrement { description = fmt.Sprintf("%s (auto_increment)", description) } - return fmt.Sprintf("%s: %s; has nullable: %+v", description, this.Columns.Names(), this.HasNullable) + return fmt.Sprintf("%s: %s; has nullable: %+v", description, uk.Columns.Names(), uk.HasNullable) } type ColumnValues struct { @@ -338,28 +338,28 @@ func ToColumnValues(abstractValues []interface{}) *ColumnValues { return result } -func (this *ColumnValues) AbstractValues() []interface{} { - return this.abstractValues +func (cv *ColumnValues) AbstractValues() []interface{} { + return cv.abstractValues } -func (this *ColumnValues) StringColumn(index int) string { - val := this.AbstractValues()[index] +func (cv *ColumnValues) StringColumn(index int) string { + val := cv.AbstractValues()[index] if ints, ok := val.([]uint8); ok { return fmt.Sprintf("%x", ints) } return fmt.Sprintf("%+v", val) } -func (this *ColumnValues) String() string { +func (cv *ColumnValues) String() string { stringValues := []string{} - for i := range this.AbstractValues() { - stringValues = append(stringValues, this.StringColumn(i)) + for i := range cv.AbstractValues() { + stringValues = append(stringValues, cv.StringColumn(i)) } return strings.Join(stringValues, ",") } -func (this *ColumnValues) Clone() *ColumnValues { - cv := NewColumnValues(len(this.abstractValues)) - copy(cv.abstractValues, this.abstractValues) - return cv +func (cv *ColumnValues) Clone() *ColumnValues { + newCv := NewColumnValues(len(cv.abstractValues)) + copy(newCv.abstractValues, cv.abstractValues) + return newCv } diff --git a/localtests/datetime-to-timestamp-pk-fail/expect_failure b/localtests/datetime-to-timestamp-pk-fail/expect_failure index 98ddf4a0e..404c3449b 100644 --- a/localtests/datetime-to-timestamp-pk-fail/expect_failure +++ b/localtests/datetime-to-timestamp-pk-fail/expect_failure @@ -1 +1 @@ -No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key +no support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key diff --git a/localtests/fail-drop-pk/expect_failure b/localtests/fail-drop-pk/expect_failure index 021ae87d5..21e0692f7 100644 --- a/localtests/fail-drop-pk/expect_failure +++ b/localtests/fail-drop-pk/expect_failure @@ -1 +1 @@ -No PRIMARY nor UNIQUE key found in table +no PRIMARY nor UNIQUE key found in table diff --git a/localtests/fail-existing-datetime-with-zero/expect_failure b/localtests/fail-existing-datetime-with-zero/expect_failure index 79356a144..f3a34bd28 100644 --- a/localtests/fail-existing-datetime-with-zero/expect_failure +++ b/localtests/fail-existing-datetime-with-zero/expect_failure @@ -1 +1 @@ -Invalid default value for 'dt' +FATAL Error 1067 (42000): Invalid default value for 'dt' diff --git a/localtests/fail-float-unique-key/expect_failure b/localtests/fail-float-unique-key/expect_failure index 4373a4ed8..b8eefdb82 100644 --- a/localtests/fail-float-unique-key/expect_failure +++ b/localtests/fail-float-unique-key/expect_failure @@ -1 +1 @@ -No shared unique key can be found +no shared unique key can be found diff --git a/localtests/fail-no-shared-uk/expect_failure b/localtests/fail-no-shared-uk/expect_failure index d3ef0f159..605ff31a0 100644 --- a/localtests/fail-no-shared-uk/expect_failure +++ b/localtests/fail-no-shared-uk/expect_failure @@ -1 +1 @@ -No shared unique key can be found after ALTER +no shared unique key can be found after ALTER diff --git a/localtests/fail-no-unique-key/expect_failure b/localtests/fail-no-unique-key/expect_failure index 021ae87d5..21e0692f7 100644 --- a/localtests/fail-no-unique-key/expect_failure +++ b/localtests/fail-no-unique-key/expect_failure @@ -1 +1 @@ -No PRIMARY nor UNIQUE key found in table +no PRIMARY nor UNIQUE key found in table diff --git a/localtests/fail-rename-table/expect_failure b/localtests/fail-rename-table/expect_failure index e444c172e..15c8dec91 100644 --- a/localtests/fail-rename-table/expect_failure +++ b/localtests/fail-rename-table/expect_failure @@ -1 +1 @@ -ALTER statement seems to RENAME the table +alter statement seems to RENAME the table diff --git a/localtests/modify-change-case-pk/expect_failure b/localtests/modify-change-case-pk/expect_failure index d3ef0f159..605ff31a0 100644 --- a/localtests/modify-change-case-pk/expect_failure +++ b/localtests/modify-change-case-pk/expect_failure @@ -1 +1 @@ -No shared unique key can be found after ALTER +no shared unique key can be found after ALTER diff --git a/localtests/trigger-ghost-name-conflict/expect_failure b/localtests/trigger-ghost-name-conflict/expect_failure index 1a4c997f3..01c1df4bf 100644 --- a/localtests/trigger-ghost-name-conflict/expect_failure +++ b/localtests/trigger-ghost-name-conflict/expect_failure @@ -1 +1 @@ -Found gh-ost triggers \ No newline at end of file +found gh-ost triggers