From 198cc5882d16a1c1d3715160584bb27694b2ad34 Mon Sep 17 00:00:00 2001 From: wk989898 Date: Tue, 21 Apr 2026 03:13:03 +0000 Subject: [PATCH 1/7] init Signed-off-by: wk989898 --- tools/workload/app.go | 4 + tools/workload/config.go | 2 +- tools/workload/ddl_runner.go | 38 ++- .../examples/ddl_partition_table_mixed.toml | 10 + .../examples/ddl_truncate_table_mixed.toml | 17 + tools/workload/readme.md | 55 +++- tools/workload/schema/bank4/bank.go | 308 ++++++++++++++++++ 7 files changed, 421 insertions(+), 13 deletions(-) create mode 100644 tools/workload/examples/ddl_partition_table_mixed.toml create mode 100644 tools/workload/examples/ddl_truncate_table_mixed.toml create mode 100644 tools/workload/schema/bank4/bank.go diff --git a/tools/workload/app.go b/tools/workload/app.go index e70f48e024..49bc36f915 100644 --- a/tools/workload/app.go +++ b/tools/workload/app.go @@ -30,6 +30,7 @@ import ( pbank "workload/schema/bank" pbank2 "workload/schema/bank2" pbank3 "workload/schema/bank3" + pbank4 "workload/schema/bank4" "workload/schema/bankupdate" pcrawler "workload/schema/crawler" pdc "workload/schema/dc" @@ -81,6 +82,7 @@ const ( crawler = "crawler" bank2 = "bank2" bank3 = "bank3" + bank4 = "bank4" bankUpdate = "bank_update" dc = "dc" wideTableWithJSON = "wide_table_with_json" @@ -140,6 +142,8 @@ func (app *WorkloadApp) createWorkload() schema.Workload { workload = pbank2.NewBank2Workload() case bank3: workload = pbank3.NewBankWorkload(app.Config.Partitioned) + case bank4: + workload = pbank4.NewBankWorkload(app.Config.Partitioned) case bankUpdate: workload = bankupdate.NewBankUpdateWorkload(app.Config.TotalRowCount, app.Config.UpdateLargeColumnSize) case dc: diff --git a/tools/workload/config.go b/tools/workload/config.go index 76af65d6c8..dd7fb7cd4f 100644 --- a/tools/workload/config.go +++ b/tools/workload/config.go @@ -140,7 +140,7 @@ func (c *WorkloadConfig) ParseFlags() error { flag.Float64Var(&c.PercentageForDelete, "percentage-for-delete", c.PercentageForDelete, "percentage for delete: [0, 1.0]") flag.BoolVar(&c.SkipCreateTable, "skip-create-table", c.SkipCreateTable, "do not create tables") flag.StringVar(&c.Action, "action", c.Action, "action of the workload: [prepare, insert, update, delete, write, ddl, cleanup]") - flag.StringVar(&c.WorkloadType, "workload-type", c.WorkloadType, "workload type: [bank, sysbench, large_row, shop_item, uuu, bank2, bank3, bank_update, crawler, dc, wide_table_with_json]") + flag.StringVar(&c.WorkloadType, "workload-type", c.WorkloadType, "workload type: [bank, sysbench, large_row, shop_item, uuu, bank2, bank3, bank4, bank_update, crawler, dc, wide_table_with_json]") flag.StringVar(&c.DBHost, "database-host", c.DBHost, "database host") flag.StringVar(&c.DBUser, "database-user", c.DBUser, "database user") flag.StringVar(&c.DBPassword, "database-password", c.DBPassword, "database password") diff --git a/tools/workload/ddl_runner.go b/tools/workload/ddl_runner.go index 5cdcf99d2b..9d1844f474 100644 --- a/tools/workload/ddl_runner.go +++ b/tools/workload/ddl_runner.go @@ -112,24 +112,40 @@ func (r *DDLRunner) startTypeScheduler(ddlType DDLType, perMinute int) { return } + interval := schedulerInterval(perMinute) go func() { - ticker := time.NewTicker(time.Minute) + r.enqueueTask(ddlType) + + ticker := time.NewTicker(interval) defer ticker.Stop() - for { - for i := 0; i < perMinute; i++ { - table, ok := r.selector.Next() - if !ok { - r.app.Stats.DDLSkipped.Add(1) - continue - } - r.taskCh <- DDLTask{Type: ddlType, Table: table} - } - <-ticker.C + for range ticker.C { + r.enqueueTask(ddlType) } }() } +func schedulerInterval(perMinute int) time.Duration { + if perMinute <= 0 { + return 0 + } + + interval := time.Minute / time.Duration(perMinute) + if interval <= 0 { + return time.Nanosecond + } + return interval +} + +func (r *DDLRunner) enqueueTask(ddlType DDLType) { + table, ok := r.selector.Next() + if !ok { + r.app.Stats.DDLSkipped.Add(1) + return + } + r.taskCh <- DDLTask{Type: ddlType, Table: table} +} + func (r *DDLRunner) startRandomTableRefresh() { go func() { ticker := time.NewTicker(time.Minute) diff --git a/tools/workload/examples/ddl_partition_table_mixed.toml b/tools/workload/examples/ddl_partition_table_mixed.toml new file mode 100644 index 0000000000..33b6be4d17 --- /dev/null +++ b/tools/workload/examples/ddl_partition_table_mixed.toml @@ -0,0 +1,10 @@ +# Use this with a dedicated database that only contains partitioned bank4 tables. +# bank4 creates 126 monthly partitions per table (2021-07 through 2031-12). +mode = "random" + +[rate_per_minute] +truncate_table = 1 +add_column = 6 +drop_column = 6 +add_index = 3 +drop_index = 3 diff --git a/tools/workload/examples/ddl_truncate_table_mixed.toml b/tools/workload/examples/ddl_truncate_table_mixed.toml new file mode 100644 index 0000000000..6c447d1e5a --- /dev/null +++ b/tools/workload/examples/ddl_truncate_table_mixed.toml @@ -0,0 +1,17 @@ +# Periodically run TRUNCATE TABLE while other DDLs keep changing schema. +# The scheduler spreads each rate evenly over a minute. +mode = "fixed" + +tables = [ + "test.sbtest1", + "test.sbtest2", + "test.sbtest3", + "test.sbtest4", +] + +[rate_per_minute] +truncate_table = 1 +add_column = 6 +drop_column = 6 +add_index = 3 +drop_index = 3 diff --git a/tools/workload/readme.md b/tools/workload/readme.md index 815a7689aa..78c9b7746f 100644 --- a/tools/workload/readme.md +++ b/tools/workload/readme.md @@ -31,6 +31,9 @@ Run DDL workload based on a TOML config file: -ddl-timeout 2m ``` +Each DDL type is scheduled evenly across the minute instead of being burst-enqueued at the minute boundary. +For example, `truncate_table = 1` runs about once every 60s, and `add_column = 6` runs about once every 10s. + `ddl.toml` example (fixed mode): ```toml @@ -62,6 +65,55 @@ drop_index = 5 truncate_table = 0 ``` +Prebuilt examples: + +- `examples/ddl_truncate_table_mixed.toml`: periodically runs `TRUNCATE TABLE` while add/drop column and add/drop index continue in parallel. +- `examples/ddl_partition_table_mixed.toml`: targets partitioned `bank4` tables and mixes `TRUNCATE TABLE`, add/drop column, and add/drop index. + +Truncate-table mixed DDL example: + +```bash +./bin/workload -action write \ + -database-host 127.0.0.1 \ + -database-port 4000 \ + -database-db-name test \ + -workload-type sysbench \ + -table-count 4 \ + -thread 32 \ + -batch-size 64 \ + -ddl-config ./examples/ddl_truncate_table_mixed.toml \ + -ddl-worker 1 \ + -ddl-timeout 2m +``` + +Partition-table mixed DDL example (prepare 126-partition `bank4` tables first): + +```bash +./bin/workload -action prepare \ + -database-host 127.0.0.1 \ + -database-port 4000 \ + -database-db-name partition_ddl \ + -workload-type bank4 \ + -partitioned=true \ + -table-count 4 \ + -total-row-count 0 + +./bin/workload -action write \ + -database-host 127.0.0.1 \ + -database-port 4000 \ + -database-db-name partition_ddl \ + -workload-type bank4 \ + -partitioned=true \ + -table-count 4 \ + -thread 16 \ + -batch-size 64 \ + -percentage-for-update 0.5 \ + -percentage-for-delete 0.1 \ + -ddl-config ./examples/ddl_partition_table_mixed.toml \ + -ddl-worker 1 \ + -ddl-timeout 2m +``` + ### 1. Sysbench-style Data Insertion Insert test data using sysbench-compatible schema: @@ -201,5 +253,6 @@ Generate writes for `wide_table_with_json_primary` and `wide_table_with_json_sec - Adjust the thread and batch-size parameters based on your needs. - Use `-batch-in-txn` to wrap each batch in a single explicit transaction (BEGIN/COMMIT). - `wide_table_with_json` always generates JSON-like payload data. -- For workloads that support partitioned tables (e.g. bank3), set `-partitioned=false` to create non-partitioned tables. +- For workloads that support partitioned tables (e.g. bank3, bank4), set `-partitioned=false` to create non-partitioned tables. +- `bank4` partitioned mode creates 126 monthly partitions per table, which is suitable for partition-heavy DDL stress. - `-bank3-partitioned` is deprecated; use `-partitioned`. diff --git a/tools/workload/schema/bank4/bank.go b/tools/workload/schema/bank4/bank.go new file mode 100644 index 0000000000..3de2edeecf --- /dev/null +++ b/tools/workload/schema/bank4/bank.go @@ -0,0 +1,308 @@ +// Copyright 2026 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package bank4 + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + + "workload/schema" +) + +const createBankTableBase = ` +create table if not exists bank4_%d ( + col1 varchar(3) DEFAULT NULL, + col2 varchar(2) DEFAULT NULL, + col3 varchar(180) NOT NULL, + col4 datetime NOT NULL, + col5 varchar(2) DEFAULT NULL, + col6 varchar(90) DEFAULT NULL, + col7 varchar(2) DEFAULT NULL, + col8 varchar(60) DEFAULT NULL, + col9 varchar(14) DEFAULT NULL, + col10 decimal(2,0) DEFAULT NULL, + col11 decimal(4,0) DEFAULT NULL, + col12 varchar(60) DEFAULT NULL, + col13 decimal(2,0) DEFAULT NULL, + col14 varchar(18) DEFAULT NULL, + col15 varchar(14) DEFAULT NULL, + col16 varchar(20) DEFAULT NULL, + col17 varchar(180) DEFAULT NULL, + col18 varchar(1) DEFAULT NULL, + col19 varchar(1) DEFAULT NULL, + col20 varchar(1) DEFAULT NULL, + col21 varchar(80) DEFAULT NULL, + col22 varchar(4) DEFAULT NULL, + col23 decimal(15,0) DEFAULT NULL, + col24 varchar(5) DEFAULT NULL, + col25 varchar(26) DEFAULT NULL, + col26 varchar(2) DEFAULT NULL, + col27 datetime DEFAULT NULL, + col28 decimal(3,0) DEFAULT NULL, + col29 decimal(3,0) DEFAULT NULL, + col30 decimal(3,0) DEFAULT NULL, + auto_id bigint(20) NOT NULL AUTO_INCREMENT, + KEY idx1 (col14), + KEY idx2 (col27,col24,col22), + KEY idx3 (col3,col24,col4), + KEY idx4 (col21,col23), + KEY idx5 (col15), + PRIMARY KEY (auto_id,col3,col4) /*T![clustered_index] NONCLUSTERED */, + KEY idx6 (col3) +) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +` + +const ( + bankPartitionStartYear = 2021 + bankPartitionStartMonth = 7 + bankPartitionEndYear = 2031 + bankPartitionEndMonth = 12 +) + +var createBankTablePartition = buildCreateBankTablePartition() + +type BankWorkload struct { + isPartitioned bool +} + +func NewBankWorkload(isPartitioned bool) schema.Workload { + return &BankWorkload{isPartitioned: isPartitioned} +} + +func (c *BankWorkload) BuildCreateTableStatement(n int) string { + createSQL := fmt.Sprintf(createBankTableBase, n) + if c.isPartitioned { + return createSQL + createBankTablePartition + ";" + } + return createSQL + ";" +} + +func (c *BankWorkload) BuildInsertSql(tableN int, batchSize int) string { + if batchSize <= 0 { + return "" + } + + tableName := getBankTableName(tableN) + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf( + "insert into %s (col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12,col13,col14,col15,col16,col17,col18,col19,col20,col21,col22,col23,col24,col25,col26,col27,col28,col29,col30) values", + tableName, + )) + + for i := 0; i < batchSize; i++ { + if i > 0 { + buf.WriteString(",") + } + + n := rand.Int63() + col1Value := "A01" + col2Value := "B2" + col3Value := fmt.Sprintf("acct-%d", n) + col4Value := randomBankDatetime() + + col5Value := fmt.Sprintf("%02d", rand.Intn(100)) + col6Value := fmt.Sprintf("desc-%d", n%1000000) + col7Value := fmt.Sprintf("%02d", rand.Intn(100)) + col8Value := fmt.Sprintf("branch-%d", n%1000000) + col9Value := fmt.Sprintf("%014d", n%100000000000000) + col10Value := rand.Intn(100) + col11Value := rand.Intn(10000) + col12Value := fmt.Sprintf("note-%d", n%1000000) + col13Value := rand.Intn(100) + col14Value := "acct" + col15Value := fmt.Sprintf("i%013d", n%10000000000000) + col16Value := fmt.Sprintf("type-%d", n%100000) + col17Value := fmt.Sprintf("memo-%d", n) + col18Value := fmt.Sprintf("%d", rand.Intn(2)) + col19Value := fmt.Sprintf("%d", rand.Intn(2)) + col20Value := fmt.Sprintf("%d", rand.Intn(2)) + col21Value := fmt.Sprintf("customer-%d", n%1000000) + col22Value := "A001" + col23Value := n % 1000000000000000 + col24Value := "B0001" + col25Value := fmt.Sprintf("ref-%d", n%1000000000000000000) + col26Value := "E1" + col27Value := randomBankDatetime() + col28Value := rand.Intn(1000) + col29Value := rand.Intn(1000) + col30Value := rand.Intn(1000) + + buf.WriteString(fmt.Sprintf( + "('%s','%s','%s','%s','%s','%s','%s','%s','%s',%d,%d,'%s',%d,'%s','%s','%s','%s','%s','%s','%s','%s','%s',%d,'%s','%s','%s','%s',%d,%d,%d)", + col1Value, + col2Value, + col3Value, + col4Value, + col5Value, + col6Value, + col7Value, + col8Value, + col9Value, + col10Value, + col11Value, + col12Value, + col13Value, + col14Value, + col15Value, + col16Value, + col17Value, + col18Value, + col19Value, + col20Value, + col21Value, + col22Value, + col23Value, + col24Value, + col25Value, + col26Value, + col27Value, + col28Value, + col29Value, + col30Value, + )) + } + return buf.String() +} + +func (c *BankWorkload) BuildUpdateSql(opts schema.UpdateOption) string { + if opts.Batch <= 0 { + return "" + } + + tableName := getBankTableName(opts.TableIndex) + startTime, endTime := randomBankMonthRange() + newCol30 := rand.Intn(1000) + newCol15 := fmt.Sprintf("u%013d", rand.Int63n(10000000000000)) + newCol27 := randomBankDatetime() + + return fmt.Sprintf( + `UPDATE %[1]s +SET col30 = %[2]d, col15 = '%[3]s', col27 = '%[4]s' +WHERE auto_id IN ( + SELECT auto_id FROM ( + SELECT auto_id FROM %[1]s + WHERE col4 >= '%[5]s' AND col4 < '%[6]s' + ORDER BY auto_id DESC LIMIT %[7]d + ) t +)`, + tableName, + newCol30, + newCol15, + newCol27, + startTime, + endTime, + opts.Batch, + ) +} + +func (c *BankWorkload) BuildDeleteSql(opts schema.DeleteOption) string { + if opts.Batch <= 0 { + return "" + } + + deleteType := rand.Intn(3) + tableName := getBankTableName(opts.TableIndex) + + switch deleteType { + case 0: + return fmt.Sprintf( + `DELETE FROM %[1]s +WHERE auto_id IN ( + SELECT auto_id FROM ( + SELECT auto_id FROM %[1]s ORDER BY auto_id DESC LIMIT %[2]d + ) t +)`, + tableName, opts.Batch, + ) + case 1: + startTime, endTime := randomBankMonthRange() + return fmt.Sprintf( + "DELETE FROM %s WHERE col4 >= '%s' AND col4 < '%s' LIMIT %d", + tableName, startTime, endTime, opts.Batch, + ) + case 2: + return fmt.Sprintf( + "DELETE FROM %s WHERE col14 = 'acct' AND col22 = 'A001' LIMIT %d", + tableName, opts.Batch, + ) + default: + return "" + } +} + +func (c *BankWorkload) BuildDDLSql(opts schema.DDLOption) string { + tableName := getBankTableName(opts.TableIndex) + return fmt.Sprintf("truncate table %s;", tableName) +} + +func getBankTableName(n int) string { + return fmt.Sprintf("bank4_%d", n) +} + +func buildCreateBankTablePartition() string { + startTotal, endTotal := bankPartitionMonthRange() + + var builder strings.Builder + builder.WriteString("PARTITION BY RANGE COLUMNS(col4)\n(") + for total := startTotal; total <= endTotal; total++ { + year, month := monthIndexToYearMonth(total) + nextYear, nextMonth := monthIndexToYearMonth(total + 1) + if total > startTotal { + builder.WriteString(",\n ") + } + builder.WriteString(fmt.Sprintf( + "PARTITION p_%04d%02d VALUES LESS THAN ('%04d-%02d-01')", + year, month, nextYear, nextMonth, + )) + } + builder.WriteString(")") + return builder.String() +} + +func bankPartitionMonthRange() (startTotal int, endTotal int) { + startTotal = bankPartitionStartYear*12 + (bankPartitionStartMonth - 1) + endTotal = bankPartitionEndYear*12 + (bankPartitionEndMonth - 1) + return startTotal, endTotal +} + +func monthIndexToYearMonth(total int) (year int, month int) { + return total / 12, total%12 + 1 +} + +func randomBankDatetime() string { + startTotal, endTotal := bankPartitionMonthRange() + + total := startTotal + rand.Intn(endTotal-startTotal+1) + year, month := monthIndexToYearMonth(total) + + day := rand.Intn(28) + 1 + hour := rand.Intn(24) + minute := rand.Intn(60) + second := rand.Intn(60) + return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second) +} + +func randomBankMonthRange() (start string, end string) { + startTotal, endTotal := bankPartitionMonthRange() + + total := startTotal + rand.Intn(endTotal-startTotal+1) + year, month := monthIndexToYearMonth(total) + start = fmt.Sprintf("%04d-%02d-01 00:00:00", year, month) + + endYearValue, endMonthValue := monthIndexToYearMonth(total + 1) + end = fmt.Sprintf("%04d-%02d-01 00:00:00", endYearValue, endMonthValue) + return start, end +} From 7c1cac56a8a7ee5928bb5e49cac1b5b67cedff8b Mon Sep 17 00:00:00 2001 From: wk989898 Date: Tue, 21 Apr 2026 03:35:06 +0000 Subject: [PATCH 2/7] update Signed-off-by: wk989898 --- tools/workload/app.go | 4 + tools/workload/config.go | 2 +- tools/workload/readme.md | 19 +- .../schema/table_info_sharing/readme.md | 33 ++ .../table_info_sharing/table_info_sharing.go | 448 ++++++++++++++++++ 5 files changed, 504 insertions(+), 2 deletions(-) create mode 100644 tools/workload/schema/table_info_sharing/readme.md create mode 100644 tools/workload/schema/table_info_sharing/table_info_sharing.go diff --git a/tools/workload/app.go b/tools/workload/app.go index 49bc36f915..e7232766ba 100644 --- a/tools/workload/app.go +++ b/tools/workload/app.go @@ -37,6 +37,7 @@ import ( "workload/schema/largerow" "workload/schema/shop" psysbench "workload/schema/sysbench" + ptableinfosharing "workload/schema/table_info_sharing" puuu "workload/schema/uuu" pwidetablewithjson "workload/schema/wide_table_with_json" ) @@ -85,6 +86,7 @@ const ( bank4 = "bank4" bankUpdate = "bank_update" dc = "dc" + tableInfoSharing = "table_info_sharing" wideTableWithJSON = "wide_table_with_json" ) @@ -148,6 +150,8 @@ func (app *WorkloadApp) createWorkload() schema.Workload { workload = bankupdate.NewBankUpdateWorkload(app.Config.TotalRowCount, app.Config.UpdateLargeColumnSize) case dc: workload = pdc.NewDCWorkload() + case tableInfoSharing: + workload = ptableinfosharing.NewTableInfoSharingWorkload(app.Config.TableCount, app.Config.TableStartIndex) case wideTableWithJSON: workload = pwidetablewithjson.NewWideTableWithJSONWorkload(app.Config.RowSize, app.Config.TableCount, app.Config.TableStartIndex, app.Config.TotalRowCount) default: diff --git a/tools/workload/config.go b/tools/workload/config.go index dd7fb7cd4f..b809fe244c 100644 --- a/tools/workload/config.go +++ b/tools/workload/config.go @@ -140,7 +140,7 @@ func (c *WorkloadConfig) ParseFlags() error { flag.Float64Var(&c.PercentageForDelete, "percentage-for-delete", c.PercentageForDelete, "percentage for delete: [0, 1.0]") flag.BoolVar(&c.SkipCreateTable, "skip-create-table", c.SkipCreateTable, "do not create tables") flag.StringVar(&c.Action, "action", c.Action, "action of the workload: [prepare, insert, update, delete, write, ddl, cleanup]") - flag.StringVar(&c.WorkloadType, "workload-type", c.WorkloadType, "workload type: [bank, sysbench, large_row, shop_item, uuu, bank2, bank3, bank4, bank_update, crawler, dc, wide_table_with_json]") + flag.StringVar(&c.WorkloadType, "workload-type", c.WorkloadType, "workload type: [bank, sysbench, large_row, shop_item, uuu, bank2, bank3, bank4, bank_update, crawler, dc, table_info_sharing, wide_table_with_json]") flag.StringVar(&c.DBHost, "database-host", c.DBHost, "database host") flag.StringVar(&c.DBUser, "database-user", c.DBUser, "database user") flag.StringVar(&c.DBPassword, "database-password", c.DBPassword, "database password") diff --git a/tools/workload/readme.md b/tools/workload/readme.md index 78c9b7746f..b54950a6a7 100644 --- a/tools/workload/readme.md +++ b/tools/workload/readme.md @@ -228,7 +228,24 @@ Run insert and update concurrently, and execute DDL in parallel: -ddl-timeout 2m ``` -### 7. Wide Table With JSON Workload +### 7. Table Info Sharing Workload + +Generate multiple tables with the same column layout and index layout, while making selected default values differ by table index. This workload covers a broad set of column types including numeric, bit, string, binary, temporal, enum/set, and JSON. + +```bash +./workload -action write \ + -database-host 127.0.0.1 \ + -database-port 4000 \ + -database-db-name table_info \ + -table-count 16 \ + -workload-type table_info_sharing \ + -thread 32 \ + -batch-size 32 \ + -percentage-for-update 0.5 \ + -percentage-for-delete 0.1 +``` + +### 8. Wide Table With JSON Workload Generate writes for `wide_table_with_json_primary` and `wide_table_with_json_secondary` (two tables per shard). Use `-row-size` to control payload width and `-table-count` to control shard count. diff --git a/tools/workload/schema/table_info_sharing/readme.md b/tools/workload/schema/table_info_sharing/readme.md new file mode 100644 index 0000000000..adc1a99d90 --- /dev/null +++ b/tools/workload/schema/table_info_sharing/readme.md @@ -0,0 +1,33 @@ +# `table_info_sharing` workload + +This workload creates many tables with the same column layout and index layout, while making selected default values differ by table index. + +It is intended for cases that want to stress table-info reuse/sharing with: + +- Multiple tables that look structurally the same +- A wide mix of column types +- Different default values on selected columns across tables + +Covered column families include: + +- Integer types: `TINYINT`, `SMALLINT`, `MEDIUMINT`, `INT`, `BIGINT`, `BIGINT UNSIGNED` +- Numeric types: `DECIMAL`, `FLOAT`, `DOUBLE` +- Logical/bit types: `BOOLEAN`, `BIT` +- String/binary types: `CHAR`, `VARCHAR`, `TEXT`, `BLOB`, `BINARY`, `VARBINARY` +- Temporal types: `DATE`, `DATETIME`, `TIMESTAMP`, `TIME`, `YEAR` +- Collection/document types: `ENUM`, `SET`, `JSON` + +Example: + +```bash +./workload -action write \ + -database-host 127.0.0.1 \ + -database-port 4000 \ + -database-db-name test \ + -workload-type table_info_sharing \ + -table-count 16 \ + -thread 32 \ + -batch-size 32 \ + -percentage-for-update 0.5 \ + -percentage-for-delete 0.1 +``` diff --git a/tools/workload/schema/table_info_sharing/table_info_sharing.go b/tools/workload/schema/table_info_sharing/table_info_sharing.go new file mode 100644 index 0000000000..3475abdc12 --- /dev/null +++ b/tools/workload/schema/table_info_sharing/table_info_sharing.go @@ -0,0 +1,448 @@ +// Copyright 2026 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tableinfosharing + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "strings" + "sync/atomic" + "time" + + "workload/schema" +) + +const createTableFormat = ` +CREATE TABLE IF NOT EXISTS %s ( + id BIGINT NOT NULL, + c_tinyint TINYINT NOT NULL DEFAULT %d, + c_smallint SMALLINT NOT NULL DEFAULT %d, + c_mediumint MEDIUMINT NOT NULL DEFAULT %d, + c_int INT NOT NULL DEFAULT %d, + c_bigint BIGINT NOT NULL DEFAULT %d, + c_unsigned BIGINT UNSIGNED NOT NULL DEFAULT %d, + c_decimal DECIMAL(20,6) NOT NULL DEFAULT %s, + c_float FLOAT NOT NULL DEFAULT %s, + c_double DOUBLE NOT NULL DEFAULT %s, + c_bool BOOLEAN NOT NULL DEFAULT %d, + c_bit BIT(8) NOT NULL DEFAULT b'%s', + c_char CHAR(8) NOT NULL DEFAULT '%s', + c_varchar VARCHAR(64) NOT NULL DEFAULT '%s', + c_date DATE NOT NULL DEFAULT '%s', + c_datetime DATETIME NOT NULL DEFAULT '%s', + c_timestamp TIMESTAMP NOT NULL DEFAULT '%s', + c_time TIME NOT NULL DEFAULT '%s', + c_year YEAR NOT NULL DEFAULT %d, + c_enum ENUM('red','green','blue') NOT NULL DEFAULT '%s', + c_set SET('x','y','z') NOT NULL DEFAULT '%s', + c_json JSON NULL, + c_text TEXT NULL, + c_blob BLOB NULL, + c_binary BINARY(8) NULL, + c_varbinary VARBINARY(16) NULL, + uk_token VARCHAR(64) NOT NULL, + PRIMARY KEY (id), + UNIQUE KEY uk_token (uk_token), + KEY idx_temporal (c_date, c_datetime), + KEY idx_enum_set (c_enum, c_set) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +` + +const ( + defaultRecentRowWindow = 256 +) + +type tableSpec struct { + tableIndex int +} + +type TableInfoSharingWorkload struct { + tableStartIndex int + seq []atomic.Uint64 +} + +func NewTableInfoSharingWorkload(tableCount int, tableStartIndex int) schema.Workload { + if tableCount <= 0 { + tableCount = 1 + } + return &TableInfoSharingWorkload{ + tableStartIndex: tableStartIndex, + seq: make([]atomic.Uint64, tableCount), + } +} + +func (w *TableInfoSharingWorkload) BuildCreateTableStatement(n int) string { + spec := newTableSpec(n) + return fmt.Sprintf( + createTableFormat, + tableName(n), + spec.tinyintDefault(), + spec.smallintDefault(), + spec.mediumintDefault(), + spec.intDefault(), + spec.bigintDefault(), + spec.unsignedDefault(), + spec.decimalDefault(), + spec.floatDefault(), + spec.doubleDefault(), + spec.boolDefault(), + spec.bitDefault(), + spec.charDefault(), + spec.varcharDefault(), + spec.dateDefault(), + spec.datetimeDefault(), + spec.timestampDefault(), + spec.timeDefault(), + spec.yearDefault(), + spec.enumDefault(), + spec.setDefault(), + ) +} + +func (w *TableInfoSharingWorkload) BuildInsertSql(tableN int, batchSize int) string { + if batchSize <= 0 { + return "" + } + + startID, ok := w.allocateIDs(tableN, batchSize) + if !ok { + return "" + } + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf( + "INSERT INTO %s (id, uk_token, c_json, c_text, c_blob, c_binary, c_varbinary) VALUES ", + tableName(tableN), + )) + + for i := 0; i < batchSize; i++ { + if i > 0 { + buf.WriteString(",") + } + + rowID := startID + uint64(i) + jsonValue := fmt.Sprintf(`{"table":"%s","mode":"insert","id":%d}`, tableName(tableN), rowID) + textValue := fmt.Sprintf("text-%02d-%06d", tableN, rowID) + blobValue := []byte(fmt.Sprintf("blob-%02d-%06d", tableN, rowID)) + binaryValue := []byte(fmt.Sprintf("%08d", rowID%100000000)) + varbinaryValue := []byte(fmt.Sprintf("vb-%02d-%06d", tableN, rowID)) + + buf.WriteString(fmt.Sprintf( + "(%d,'%s',CAST('%s' AS JSON),'%s',x'%s',x'%s',x'%s')", + rowID, + quoteString(uniqueToken(tableN, rowID)), + quoteString(jsonValue), + quoteString(textValue), + hex.EncodeToString(blobValue), + hex.EncodeToString(binaryValue), + hex.EncodeToString(varbinaryValue), + )) + } + return buf.String() +} + +func (w *TableInfoSharingWorkload) BuildUpdateSql(opts schema.UpdateOption) string { + if opts.Batch <= 0 { + return "" + } + + current, ok := w.currentMaxID(opts.TableIndex) + if !ok || current == 0 { + return "" + } + + spec := newTableSpec(opts.TableIndex) + startID, endID := recentRowRange(current, opts.Batch) + marker := time.Now().UnixNano() + + return fmt.Sprintf( + `UPDATE %s +SET c_int = c_int + 17, + c_decimal = c_decimal + 1.250000, + c_bool = 1 - c_bool, + c_bit = b'%s', + c_varchar = '%s', + c_date = '%s', + c_datetime = '%s', + c_timestamp = '%s', + c_time = '%s', + c_year = %d, + c_enum = '%s', + c_set = '%s', + c_json = CAST('%s' AS JSON), + c_text = '%s', + c_blob = x'%s', + c_binary = x'%s', + c_varbinary = x'%s' +WHERE id BETWEEN %d AND %d`, + tableName(opts.TableIndex), + spec.updateBitValue(marker), + quoteString(fmt.Sprintf("updated-%02d-%d", opts.TableIndex, marker)), + spec.updateDateValue(), + spec.updateDatetimeValue(), + spec.updateTimestampValue(), + spec.updateTimeValue(), + spec.updateYearValue(), + spec.updateEnumValue(), + spec.updateSetValue(), + quoteString(fmt.Sprintf(`{"table":"%s","mode":"update","marker":%d}`, tableName(opts.TableIndex), marker)), + quoteString(fmt.Sprintf("updated-text-%02d-%d", opts.TableIndex, marker)), + hex.EncodeToString([]byte(fmt.Sprintf("updated-blob-%02d-%d", opts.TableIndex, marker))), + hex.EncodeToString([]byte(fmt.Sprintf("%08d", marker%100000000))), + hex.EncodeToString([]byte(fmt.Sprintf("updated-vb-%02d", opts.TableIndex))), + startID, + endID, + ) +} + +func (w *TableInfoSharingWorkload) BuildDeleteSql(opts schema.DeleteOption) string { + if opts.Batch <= 0 { + return "" + } + + current, ok := w.currentMaxID(opts.TableIndex) + if !ok || current == 0 { + return "" + } + + startID, endID := recentRowRange(current, opts.Batch) + return fmt.Sprintf( + "DELETE FROM %s WHERE id BETWEEN %d AND %d LIMIT %d", + tableName(opts.TableIndex), + startID, + endID, + opts.Batch, + ) +} + +func (w *TableInfoSharingWorkload) allocateIDs(tableN int, batchSize int) (uint64, bool) { + slot, ok := w.slot(tableN) + if !ok { + return 0, false + } + next := w.seq[slot].Add(uint64(batchSize)) + return next - uint64(batchSize) + 1, true +} + +func (w *TableInfoSharingWorkload) currentMaxID(tableN int) (uint64, bool) { + slot, ok := w.slot(tableN) + if !ok { + return 0, false + } + return w.seq[slot].Load(), true +} + +func (w *TableInfoSharingWorkload) slot(tableN int) (int, bool) { + slot := tableN - w.tableStartIndex + if slot < 0 || slot >= len(w.seq) { + return 0, false + } + return slot, true +} + +func newTableSpec(tableIndex int) tableSpec { + return tableSpec{tableIndex: tableIndex} +} + +func (s tableSpec) tinyintDefault() int { + return (s.tableIndex % 63) + 1 +} + +func (s tableSpec) smallintDefault() int { + return s.tableIndex*10 + 1 +} + +func (s tableSpec) mediumintDefault() int { + return s.tableIndex*100 + 1 +} + +func (s tableSpec) intDefault() int { + return s.tableIndex*1000 + 1 +} + +func (s tableSpec) bigintDefault() int64 { + return int64(s.tableIndex)*10000 + 1 +} + +func (s tableSpec) unsignedDefault() uint64 { + return uint64(s.tableIndex)*100000 + 1 +} + +func (s tableSpec) decimalDefault() string { + return fmt.Sprintf("%d.%06d", s.tableIndex, (s.tableIndex*111111)%1000000) +} + +func (s tableSpec) floatDefault() string { + return fmt.Sprintf("%d.25", s.tableIndex) +} + +func (s tableSpec) doubleDefault() string { + return fmt.Sprintf("%d.125", s.tableIndex) +} + +func (s tableSpec) boolDefault() int { + return s.tableIndex % 2 +} + +func (s tableSpec) bitDefault() string { + return rotateBits(s.tableIndex) +} + +func (s tableSpec) charDefault() string { + return fmt.Sprintf("c%07d", s.tableIndex%10000000) +} + +func (s tableSpec) varcharDefault() string { + return fmt.Sprintf("varchar-%02d", s.tableIndex) +} + +func (s tableSpec) dateDefault() string { + month := monthValue(s.tableIndex) + day := dayValue(s.tableIndex) + return fmt.Sprintf("2026-%02d-%02d", month, day) +} + +func (s tableSpec) datetimeDefault() string { + month := monthValue(s.tableIndex) + day := dayValue(s.tableIndex) + hour, minute, second := clockValue(s.tableIndex) + return fmt.Sprintf("2026-%02d-%02d %02d:%02d:%02d", month, day, hour, minute, second) +} + +func (s tableSpec) timestampDefault() string { + return s.datetimeDefault() +} + +func (s tableSpec) timeDefault() string { + hour, minute, second := clockValue(s.tableIndex) + return fmt.Sprintf("%02d:%02d:%02d", hour, minute, second) +} + +func (s tableSpec) yearDefault() int { + return 2020 + (s.tableIndex % 10) +} + +func (s tableSpec) enumDefault() string { + values := []string{"red", "green", "blue"} + return values[s.tableIndex%len(values)] +} + +func (s tableSpec) setDefault() string { + values := []string{"x", "x,y", "y,z", "x,z"} + return values[s.tableIndex%len(values)] +} + +func (s tableSpec) updateDateValue() string { + month := monthValue(s.tableIndex + 5) + day := dayValue(s.tableIndex + 7) + return fmt.Sprintf("2027-%02d-%02d", month, day) +} + +func (s tableSpec) updateDatetimeValue() string { + month := monthValue(s.tableIndex + 5) + day := dayValue(s.tableIndex + 7) + hour, minute, second := clockValue(s.tableIndex + 9) + return fmt.Sprintf("2027-%02d-%02d %02d:%02d:%02d", month, day, hour, minute, second) +} + +func (s tableSpec) updateTimestampValue() string { + return s.updateDatetimeValue() +} + +func (s tableSpec) updateTimeValue() string { + hour, minute, second := clockValue(s.tableIndex + 9) + return fmt.Sprintf("%02d:%02d:%02d", hour, minute, second) +} + +func (s tableSpec) updateYearValue() int { + return 2030 + (s.tableIndex % 10) +} + +func (s tableSpec) updateEnumValue() string { + values := []string{"green", "blue", "red"} + return values[s.tableIndex%len(values)] +} + +func (s tableSpec) updateSetValue() string { + values := []string{"y", "x,z", "x,y", "y,z"} + return values[s.tableIndex%len(values)] +} + +func (s tableSpec) updateBitValue(marker int64) string { + return rotateBits(int(marker%251) + s.tableIndex) +} + +func tableName(n int) string { + return fmt.Sprintf("table_info_sharing_%d", n) +} + +func uniqueToken(tableN int, rowID uint64) string { + return fmt.Sprintf("tis-%02d-%020d", tableN, rowID) +} + +func quoteString(v string) string { + return strings.ReplaceAll(v, "'", "''") +} + +func recentRowRange(current uint64, batch int) (uint64, uint64) { + if current == 0 { + return 0, 0 + } + + window := minInt(defaultRecentRowWindow, int(current)) + offset := 0 + if window > 1 { + offset = rand.Intn(window) + } + end := current - uint64(offset) + start := uint64(1) + if end >= uint64(batch) { + start = end - uint64(batch) + 1 + } + return start, end +} + +func rotateBits(seed int) string { + var builder strings.Builder + builder.Grow(8) + for i := 0; i < 8; i++ { + if (seed+i)%2 == 0 { + builder.WriteByte('1') + } else { + builder.WriteByte('0') + } + } + return builder.String() +} + +func monthValue(seed int) int { + return seed%12 + 1 +} + +func dayValue(seed int) int { + return seed%28 + 1 +} + +func clockValue(seed int) (int, int, int) { + return seed % 24, (seed * 3) % 60, (seed * 7) % 60 +} + +func minInt(a int, b int) int { + if a < b { + return a + } + return b +} From e1e59462191c6f1def73f1ec11fd3fe9acd0eca0 Mon Sep 17 00:00:00 2001 From: wk989898 Date: Tue, 21 Apr 2026 06:01:07 +0000 Subject: [PATCH 3/7] update Signed-off-by: wk989898 --- tools/workload/ddl_config.go | 16 +++- tools/workload/ddl_runner.go | 75 +++++++++++++++++++ .../examples/ddl_truncate_table_mixed.toml | 15 ++-- tools/workload/readme.md | 32 ++++++++ 4 files changed, 126 insertions(+), 12 deletions(-) diff --git a/tools/workload/ddl_config.go b/tools/workload/ddl_config.go index cf9f0f9cc7..daf27d8e9c 100644 --- a/tools/workload/ddl_config.go +++ b/tools/workload/ddl_config.go @@ -31,6 +31,7 @@ type DDLConfig struct { Mode string `toml:"mode"` RatePerMinute DDLRatePerMinute `toml:"rate_per_minute"` Tables []string `toml:"tables"` + TablePatterns []string `toml:"table_patterns"` } type DDLRatePerMinute struct { @@ -68,7 +69,7 @@ func LoadDDLConfig(path string) (*DDLConfig, error) { func (c *DDLConfig) normalize() { c.Mode = strings.ToLower(strings.TrimSpace(c.Mode)) if c.Mode == "" { - if len(c.Tables) > 0 { + if len(c.Tables) > 0 || len(c.TablePatterns) > 0 { c.Mode = ddlModeFixed } else { c.Mode = ddlModeRandom @@ -84,14 +85,23 @@ func (c *DDLConfig) normalize() { } } c.Tables = tables + + patterns := make([]string, 0, len(c.TablePatterns)) + for _, pattern := range c.TablePatterns { + pattern = strings.TrimSpace(pattern) + if pattern != "" { + patterns = append(patterns, pattern) + } + } + c.TablePatterns = patterns } func (c *DDLConfig) validate() error { if c.Mode != ddlModeFixed && c.Mode != ddlModeRandom { return errors.Errorf("unsupported ddl mode: %s", c.Mode) } - if c.Mode == ddlModeFixed && len(c.Tables) == 0 { - return errors.New("ddl mode fixed requires tables") + if c.Mode == ddlModeFixed && len(c.Tables) == 0 && len(c.TablePatterns) == 0 { + return errors.New("ddl mode fixed requires tables or table_patterns") } if err := validateRate("add_column", c.RatePerMinute.AddColumn); err != nil { diff --git a/tools/workload/ddl_runner.go b/tools/workload/ddl_runner.go index 9d1844f474..5fc81abb8c 100644 --- a/tools/workload/ddl_runner.go +++ b/tools/workload/ddl_runner.go @@ -17,6 +17,7 @@ import ( "context" "database/sql" "math/rand" + "regexp" "sync" "sync/atomic" "time" @@ -72,6 +73,11 @@ func NewDDLRunner(app *WorkloadApp, cfg *DDLConfig) (*DDLRunner, error) { if err != nil { return nil, err } + patternTables, err := r.resolvePatternTables(cfg.TablePatterns) + if err != nil { + return nil, err + } + tables = mergeTableNames(tables, patternTables) r.selector = newFixedTableSelector(tables) case ddlModeRandom: if app.Config.DBPrefix != "" || app.Config.DBNum != 1 { @@ -299,3 +305,72 @@ func parseTableList(rawTables []string, defaultSchema string) ([]TableName, erro } return out, nil } + +func mergeTableNames(left []TableName, right []TableName) []TableName { + seen := make(map[string]struct{}, len(left)+len(right)) + out := make([]TableName, 0, len(left)+len(right)) + for _, table := range left { + key := table.String() + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + out = append(out, table) + } + for _, table := range right { + key := table.String() + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + out = append(out, table) + } + return out +} + +func (r *DDLRunner) resolvePatternTables(patterns []string) ([]TableName, error) { + if len(patterns) == 0 { + return nil, nil + } + if r.app.Config.DBPrefix != "" || r.app.Config.DBNum != 1 { + return nil, errors.New("ddl table_patterns only support single database connection") + } + + dbs := r.app.DBManager.GetDBs() + if len(dbs) == 0 { + return nil, errors.New("no database connections available") + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + tableNames, err := fetchBaseTables(ctx, dbs[0].DB, r.app.Config.DBName) + if err != nil { + return nil, err + } + + out := make([]TableName, 0) + for _, pattern := range patterns { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, errors.Annotatef(err, "compile table pattern failed: %s", pattern) + } + + matched := false + for _, name := range tableNames { + if !re.MatchString(name) { + continue + } + out = append(out, TableName{ + Schema: r.app.Config.DBName, + Name: name, + }) + matched = true + } + if !matched { + return nil, errors.Errorf("table pattern matched no tables: %s", pattern) + } + } + + return out, nil +} diff --git a/tools/workload/examples/ddl_truncate_table_mixed.toml b/tools/workload/examples/ddl_truncate_table_mixed.toml index 6c447d1e5a..7b266c21bd 100644 --- a/tools/workload/examples/ddl_truncate_table_mixed.toml +++ b/tools/workload/examples/ddl_truncate_table_mixed.toml @@ -3,15 +3,12 @@ mode = "fixed" tables = [ - "test.sbtest1", - "test.sbtest2", - "test.sbtest3", - "test.sbtest4", + "^sbtest[0-9]+$", ] [rate_per_minute] -truncate_table = 1 -add_column = 6 -drop_column = 6 -add_index = 3 -drop_index = 3 +truncate_table = 100 +add_column = 600 +drop_column = 600 +add_index = 300 +drop_index = 300 diff --git a/tools/workload/readme.md b/tools/workload/readme.md index b54950a6a7..9c19407001 100644 --- a/tools/workload/readme.md +++ b/tools/workload/readme.md @@ -52,6 +52,23 @@ drop_index = 5 truncate_table = 1 ``` +`ddl.toml` example (fixed mode with regex-matched tables in one schema): + +```toml +mode = "fixed" + +table_patterns = [ + "^sbtest[0-9]+$", +] + +[rate_per_minute] +truncate_table = 60 +add_column = 120 +drop_column = 120 +add_index = 30 +drop_index = 30 +``` + `ddl.toml` example (random mode, omit `tables`): ```toml @@ -70,6 +87,21 @@ Prebuilt examples: - `examples/ddl_truncate_table_mixed.toml`: periodically runs `TRUNCATE TABLE` while add/drop column and add/drop index continue in parallel. - `examples/ddl_partition_table_mixed.toml`: targets partitioned `bank4` tables and mixes `TRUNCATE TABLE`, add/drop column, and add/drop index. +DDL table selection notes: + +- `tables = [...]` uses explicit table names. +- `table_patterns = [...]` uses Go regular expressions to match table names inside `-database-db-name`. +- `table_patterns` currently supports only a single database connection (`-db-num=1` and no `-db-prefix`). +- You can mix `tables` and `table_patterns` in the same fixed-mode config. + +DDL rate notes: + +- `rate_per_minute` is the total rate for that DDL type across the selected table set, not a per-table rate. +- In fixed mode, tasks are distributed round-robin across the matched tables. +- Example: if you select 60 tables and set `truncate_table = 60`, each table will be truncated about once per minute on average. +- Example: if you select 200 tables and want each table truncated about once every 5 minutes, set `truncate_table = 40`. +- `add/drop column` and `add/drop index` may be skipped on some tables depending on current schema state, so scheduled rate and successful execution rate can differ. + Truncate-table mixed DDL example: ```bash From e56e0437346bdfa10abf1bdd5b5fbb1240f25dab Mon Sep 17 00:00:00 2001 From: wk989898 Date: Tue, 21 Apr 2026 06:04:49 +0000 Subject: [PATCH 4/7] . Signed-off-by: wk989898 --- tools/workload/examples/ddl_truncate_table_mixed.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/workload/examples/ddl_truncate_table_mixed.toml b/tools/workload/examples/ddl_truncate_table_mixed.toml index 7b266c21bd..7d7b6ab968 100644 --- a/tools/workload/examples/ddl_truncate_table_mixed.toml +++ b/tools/workload/examples/ddl_truncate_table_mixed.toml @@ -2,7 +2,7 @@ # The scheduler spreads each rate evenly over a minute. mode = "fixed" -tables = [ +table_patterns = [ "^sbtest[0-9]+$", ] From 056f97e10cd472accbc968cc8a3d2f3476978e97 Mon Sep 17 00:00:00 2001 From: wk989898 Date: Tue, 21 Apr 2026 06:48:38 +0000 Subject: [PATCH 5/7] fix Signed-off-by: wk989898 --- tools/workload/ddl_executor.go | 15 +++++++++++++++ tools/workload/schema/interface.go | 6 ++++++ tools/workload/schema/sysbench/sysbench.go | 20 ++++++++++++++++++-- 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/tools/workload/ddl_executor.go b/tools/workload/ddl_executor.go index 6b4c7c768c..774b2b1c70 100644 --- a/tools/workload/ddl_executor.go +++ b/tools/workload/ddl_executor.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/errors" plog "github.com/pingcap/log" "go.uber.org/zap" + "workload/schema" ) const ( @@ -135,6 +136,7 @@ func (r *DDLRunner) executeTask(conn *sql.Conn, task DDLTask) error { } r.app.Stats.DDLSucceeded.Add(1) + r.onDDLExecuted(task) plog.Debug("ddl executed", zap.String("ddlType", task.Type.String()), zap.String("table", task.Table.String()), @@ -142,6 +144,19 @@ func (r *DDLRunner) executeTask(conn *sql.Conn, task DDLTask) error { return nil } +func (r *DDLRunner) onDDLExecuted(task DDLTask) { + if task.Type != ddlTruncateTable { + return + } + + workload, ok := r.app.Workload.(schema.TableLifecycleAwareWorkload) + if !ok { + return + } + + workload.OnTableTruncated(task.Table.Schema, task.Table.Name) +} + func (r *DDLRunner) buildDDL(ctx context.Context, conn *sql.Conn, task DDLTask) (sqlStr string, skipped bool, reason string, err error) { switch task.Type { case ddlAddColumn: diff --git a/tools/workload/schema/interface.go b/tools/workload/schema/interface.go index 30ee5597f2..ce0675ef0f 100644 --- a/tools/workload/schema/interface.go +++ b/tools/workload/schema/interface.go @@ -29,6 +29,12 @@ type DDLWorkload interface { BuildDDLSql(opt DDLOption) string } +// TableLifecycleAwareWorkload is an optional interface implemented by workloads +// that need to react to DDL side effects such as TRUNCATE TABLE. +type TableLifecycleAwareWorkload interface { + OnTableTruncated(schemaName string, tableName string) +} + type Workload interface { // BuildCreateTableStatement returns the create-table sql of the table n BuildCreateTableStatement(n int) string diff --git a/tools/workload/schema/sysbench/sysbench.go b/tools/workload/schema/sysbench/sysbench.go index d53cc24b6c..94a7a0b585 100644 --- a/tools/workload/schema/sysbench/sysbench.go +++ b/tools/workload/schema/sysbench/sysbench.go @@ -17,6 +17,7 @@ import ( "bytes" "context" "database/sql" + "errors" "fmt" "math/rand" "sync" @@ -46,6 +47,7 @@ const ( var ( cachePadString = make(map[int]string) cacheIdx atomic.Int64 + errTableEmpty = errors.New("table is empty") ) // InitPadStringCache initializes the cache with random pad strings @@ -139,6 +141,9 @@ func (c *SysbenchWorkload) getOrCreateCache(conn *sql.Conn, tableIndex int, opts // Initialize ranges if err := c.initializeRanges(conn, cache, tableIndex, opts.Batch); err != nil { + if errors.Is(err, errTableEmpty) { + return nil + } log.Error("failed to initialize ranges", zap.Error(err)) return nil } @@ -161,8 +166,8 @@ func (c *SysbenchWorkload) initializeRanges(conn *sql.Conn, cache *schema.TableU tableName := fmt.Sprintf("sbtest%d", tableIndex) if len(ids) == 0 { - log.Warn("no records found in table", zap.String("tableName", tableName)) - return fmt.Errorf("no records found in table %s", tableName) + log.Debug("skip sysbench update on empty table", zap.String("tableName", tableName)) + return errTableEmpty } c.divideIntoRanges(cache, ids, tableIndex) @@ -225,6 +230,17 @@ func (c *SysbenchWorkload) divideIntoRanges(cache *schema.TableUpdateRangeCache, } } +func (c *SysbenchWorkload) OnTableTruncated(_ string, tableName string) { + var tableIndex int + if _, err := fmt.Sscanf(tableName, "sbtest%d", &tableIndex); err != nil { + return + } + + c.mu.Lock() + delete(c.tableUpdateRangesCache, tableIndex) + c.mu.Unlock() +} + // buildRangeUpdateSQL builds the final update SQL for a range func (c *SysbenchWorkload) buildRangeUpdateSQL(tableIndex int, updateRange *schema.TableUpdateRange) string { var buf bytes.Buffer From 28342b32f15f435c5ab00befb4844f39dd7d0c4b Mon Sep 17 00:00:00 2001 From: wk989898 Date: Tue, 21 Apr 2026 07:57:05 +0000 Subject: [PATCH 6/7] update Signed-off-by: wk989898 --- .../ddl_partition_table_mixed.generated.toml | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 tools/workload/examples/ddl_partition_table_mixed.generated.toml diff --git a/tools/workload/examples/ddl_partition_table_mixed.generated.toml b/tools/workload/examples/ddl_partition_table_mixed.generated.toml new file mode 100644 index 0000000000..087d507802 --- /dev/null +++ b/tools/workload/examples/ddl_partition_table_mixed.generated.toml @@ -0,0 +1,20 @@ +# Use this with a dedicated database that contains 1000 partitioned bank4 tables. +# Run workload with -workload-type bank4 -partitioned=true -table-count 1000. +# bank4 creates 126 monthly partitions per table (2021-07 through 2031-12). +# rate_per_minute is the total rate across all matched tables. +# Current values target approximately: +# - truncate_table: each table once every 10 minutes +# - add/drop column: each table once per minute +# - add/drop index: each table once every 2 minutes +mode = "fixed" + +table_patterns = [ + "^bank4_[0-9]+$", +] + +[rate_per_minute] +truncate_table = 100 +add_column = 1000 +drop_column = 1000 +add_index = 500 +drop_index = 500 From 6267b5e722edea7362e6aa0d1995dfd671a56b6f Mon Sep 17 00:00:00 2001 From: wk989898 Date: Tue, 21 Apr 2026 07:58:31 +0000 Subject: [PATCH 7/7] . Signed-off-by: wk989898 --- .../examples/ddl_partition_table_mixed.generated.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/workload/examples/ddl_partition_table_mixed.generated.toml b/tools/workload/examples/ddl_partition_table_mixed.generated.toml index 087d507802..01fe836b05 100644 --- a/tools/workload/examples/ddl_partition_table_mixed.generated.toml +++ b/tools/workload/examples/ddl_partition_table_mixed.generated.toml @@ -13,8 +13,8 @@ table_patterns = [ ] [rate_per_minute] -truncate_table = 100 +truncate_table = 3000 add_column = 1000 drop_column = 1000 -add_index = 500 -drop_index = 500 +add_index = 1000 +drop_index = 1000