Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions pkg/domain/domain.go
Original file line number Diff line number Diff line change
Expand Up @@ -3483,18 +3483,19 @@ func (do *Domain) planCacheEvictTrigger() {
// SetupWorkloadBasedLearningWorker sets up all of the workload based learning workers.
func (do *Domain) SetupWorkloadBasedLearningWorker() {
wbLearningHandle := workloadlearning.NewWorkloadLearningHandle(do.sysSessionPool)
wbCacheWorker := workloadlearning.NewWLCacheWorker(do.sysSessionPool)
// Start the workload based learning worker to analyze the read workload by statement_summary.
do.wg.Run(
func() {
do.readTableCostWorker(wbLearningHandle)
do.readTableCostWorker(wbLearningHandle, wbCacheWorker)
},
"readTableCostWorker",
)
// TODO: Add more workers for other workload based learning tasks.
}

// readTableCostWorker is a background worker that periodically analyze the read path table cost by statement_summary.
func (do *Domain) readTableCostWorker(wbLearningHandle *workloadlearning.Handle) {
func (do *Domain) readTableCostWorker(wbLearningHandle *workloadlearning.Handle, wbCacheWorker *workloadlearning.WLCacheWorker) {
// Recover the panic and log the error when worker exit.
defer util.Recover(metrics.LabelDomain, "readTableCostWorker", nil, false)
readTableCostTicker := time.NewTicker(vardef.WorkloadBasedLearningInterval.Load())
Expand All @@ -3506,7 +3507,8 @@ func (do *Domain) readTableCostWorker(wbLearningHandle *workloadlearning.Handle)
select {
case <-readTableCostTicker.C:
if vardef.EnableWorkloadBasedLearning.Load() && do.statsOwner.IsOwner() {
wbLearningHandle.HandleReadTableCost(do.InfoSchema())
wbLearningHandle.HandleTableReadCost(do.InfoSchema())
wbCacheWorker.UpdateTableReadCostCache()
}
case <-do.exit:
return
Expand Down
7 changes: 6 additions & 1 deletion pkg/workloadlearning/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "workloadlearning",
srcs = [
"cache.go",
"handle.go",
"metrics.go",
],
Expand All @@ -24,8 +25,12 @@ go_library(
go_test(
name = "workloadlearning_test",
timeout = "short",
srcs = ["handle_test.go"],
srcs = [
"cache_test.go",
"handle_test.go",
],
flaky = True,
shard_count = 3,
deps = [
":workloadlearning",
"//pkg/parser/ast",
Expand Down
154 changes: 154 additions & 0 deletions pkg/workloadlearning/cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
// Copyright 2025 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package workloadlearning

import (
"context"
"encoding/json"
"sync"

"github.com/pingcap/tidb/pkg/kv"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/logutil"
"go.uber.org/zap"
)

// TableReadCostCache stores the cached workload learning metrics
type TableReadCostCache struct {
TableReadCostMetrics map[int64]*TableReadCostMetrics // key: tableID
Version uint64
}

// WLCacheWorker the worker to cache all workload-related metrics
// Now it is also used to save the cache data of table cost metrics.
type WLCacheWorker struct {
sysSessionPool util.DestroyableSessionPool
tableReadCostCache *TableReadCostCache
sync.RWMutex
}

// NewWLCacheWorker Create a new workload learning cache worker to cache all workload-related metrics
// from storage mysql.tidb_workload_values to memory
func NewWLCacheWorker(pool util.DestroyableSessionPool) *WLCacheWorker {
cache := &TableReadCostCache{
TableReadCostMetrics: make(map[int64]*TableReadCostMetrics),
Version: 0,
}
return &WLCacheWorker{
pool, cache, sync.RWMutex{}}
}
Comment on lines +50 to +52
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
return &WLCacheWorker{
pool, cache, sync.RWMutex{}}
}
return &WLCacheWorker{
pool, cache, sync.RWMutex{},
}
}


// UpdateTableReadCostCache refreshes the cached workload learning metrics
func (cw *WLCacheWorker) UpdateTableReadCostCache() {
// Get latest metrics from storage
se, err := cw.sysSessionPool.Get()
if err != nil {
logutil.BgLogger().Warn("Get system session failed when updating table cost cache", zap.Error(err))
return
}
defer func() {
if err == nil { // only recycle when no error
cw.sysSessionPool.Put(se)
} else {
// Note: Otherwise, the session will be leaked.
cw.sysSessionPool.Destroy(se)
}
}()

sctx := se.(sessionctx.Context)
exec := sctx.GetRestrictedSQLExecutor()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnWorkloadLearning)

// Whether to update table cost metrics
// Get the latest latestVersionInStorage in the storage
// TODO(elsa): Add the index of (category, type, version) to mysql.tidb_workload_values
sql := `SELECT version FROM mysql.tidb_workload_values
WHERE category = %? AND type = %?
ORDER BY version DESC LIMIT 1`
rows, _, err := exec.ExecRestrictedSQL(ctx, nil, sql, feedbackCategory, tableReadCost)
if err != nil {
logutil.ErrVerboseLogger().Warn("Failed to get the latest table cost version", zap.Error(err))
return
}
// Case: no metrics belongs to this feedback category and type
if len(rows) != 1 {
logutil.BgLogger().Warn("The result of latest table cost version query is not 1",
zap.Int("result_rows", len(rows)))
return
}
// If the latest latestVersionInStorage is the same as the cached latestVersionInStorage, no need to update
latestVersionInStorage := rows[0].GetUint64(0)
if latestVersionInStorage <= cw.tableReadCostCache.Version {
logutil.BgLogger().Info("The latest table cost version in storage is the same as the cached version, no need to update",
zap.Uint64("latest_version_in_storage", latestVersionInStorage),
zap.Uint64("cached_version", cw.tableReadCostCache.Version))
return
}

// Get the latest table cost of metrics
sql = `SELECT table_id, value FROM mysql.tidb_workload_values
WHERE category = %? AND type = %? AND version = %?`
rows, _, err = exec.ExecRestrictedSQL(ctx, nil, sql, feedbackCategory, tableReadCost, latestVersionInStorage)
if err != nil {
logutil.ErrVerboseLogger().Warn("Failed to get the latest table cost metrics",
zap.Error(err))
return
}
newMetrics := make(map[int64]*TableReadCostMetrics)
for _, row := range rows {
tableID := row.GetInt64(0)
value := row.GetJSON(1).String()

metric := &TableReadCostMetrics{}
if err := json.Unmarshal([]byte(value), metric); err != nil {
logutil.ErrVerboseLogger().Warn("Failed to unmarshal table cost metrics",
zap.Int64("table_id", tableID),
zap.String("value", value),
zap.Error(err))
continue
}
newMetrics[tableID] = metric
}

// Update cache atomically
cw.updateTableReadCostCacheWithMetrics(newMetrics, latestVersionInStorage)
}

func (cw *WLCacheWorker) updateTableReadCostCacheWithMetrics(newMetrics map[int64]*TableReadCostMetrics,
latestVersionInStorage uint64) {
cw.RWMutex.Lock()
defer cw.RWMutex.Unlock()
cw.tableReadCostCache.TableReadCostMetrics = newMetrics
cw.tableReadCostCache.Version = latestVersionInStorage
}

// GetTableReadCostMetrics returns the cached metrics for a given table ID
func (cw *WLCacheWorker) GetTableReadCostMetrics(tableID int64) *TableReadCostMetrics {
cw.RWMutex.RLock()
defer cw.RWMutex.RUnlock()
metric, exists := cw.tableReadCostCache.TableReadCostMetrics[tableID]
if !exists {
return nil
}
// deep copy for metrics to protect the cache
result := &TableReadCostMetrics{
TableScanTime: metric.TableScanTime,
TableMemUsage: metric.TableMemUsage,
ReadFrequency: metric.ReadFrequency,
TableReadCost: metric.TableReadCost,
}
return result
}
79 changes: 79 additions & 0 deletions pkg/workloadlearning/cache_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
// Copyright 2025 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package workloadlearning_test

import (
"strconv"
"testing"
"time"

"github.com/pingcap/tidb/pkg/parser/ast"
"github.com/pingcap/tidb/pkg/testkit"
"github.com/pingcap/tidb/pkg/workloadlearning"
"github.com/stretchr/testify/require"
)

func TestUpdateTableCostCache(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)

// Create test table and insert test metrics
tk.MustExec(`use test`)
tk.MustExec("create table test (a int, b int, index idx(a))")

// Create a workload learning handle to save metrics
handle := workloadlearning.NewWorkloadLearningHandle(dom.SysSessionPool())

// Create test metrics
readTableCostMetrics := &workloadlearning.TableReadCostMetrics{
DbName: ast.CIStr{O: "test", L: "test"},
TableName: ast.CIStr{O: "test", L: "test"},
TableScanTime: 10.0,
TableMemUsage: 10.0,
ReadFrequency: 10,
TableReadCost: 1.0,
}
tableCostMetrics := map[ast.CIStr]*workloadlearning.TableReadCostMetrics{
{O: "test", L: "test"}: readTableCostMetrics,
}

// Save metrics to storage
handle.SaveTableReadCostMetrics(tableCostMetrics, time.Now(), time.Now(), dom.InfoSchema())

// Create cache worker and test UpdateTableReadCostCache
worker := workloadlearning.NewWLCacheWorker(dom.SysSessionPool())
worker.UpdateTableReadCostCache()

// Get table ID for verification
rs := tk.MustQuery("select tidb_table_id from information_schema.tables where table_schema = 'test' and table_name = 'test'")
tableIDi, _ := strconv.Atoi(rs.Rows()[0][0].(string))
tableID := int64(tableIDi)

// Verify cached metrics
metrics := worker.GetTableReadCostMetrics(tableID)
require.NotNil(t, metrics)
require.Equal(t, 10.0, metrics.TableScanTime)
require.Equal(t, 10.0, metrics.TableMemUsage)
require.Equal(t, int64(10), metrics.ReadFrequency)
require.Equal(t, 1.0, metrics.TableReadCost)
}

func TestGetTableReadCacheMetricsWithNoData(t *testing.T) {
_, dom := testkit.CreateMockStoreAndDomain(t)
// Create cache worker without saving metrics
worker := workloadlearning.NewWLCacheWorker(dom.SysSessionPool())
result := worker.GetTableReadCostMetrics(1)
require.Nil(t, result)
}
Loading