Skip to content

Commit

Permalink
executor: avoid initializing chunk with max chunk size in some places (
Browse files Browse the repository at this point in the history
  • Loading branch information
tiancaiamao authored Jan 27, 2025
1 parent 00ac9f8 commit 8b2ef95
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 6 deletions.
2 changes: 1 addition & 1 deletion pkg/executor/join/index_lookup_hash_join.go
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask,
InnerCtx: e.InnerCtx,
outerCtx: e.OuterCtx,
ctx: e.Ctx(),
executorChk: e.AllocPool.Alloc(e.InnerCtx.RowTypes, e.MaxChunkSize(), e.MaxChunkSize()),
executorChk: e.AllocPool.Alloc(e.InnerCtx.RowTypes, e.InitCap(), e.MaxChunkSize()),
indexRanges: copiedRanges,
keyOff2IdxOff: e.KeyOff2IdxOff,
stats: innerStats,
Expand Down
4 changes: 2 additions & 2 deletions pkg/executor/join/index_lookup_join.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ func (e *IndexLookUpJoin) newInnerWorker(taskCh chan *lookUpJoinTask) *innerWork
outerCtx: e.OuterCtx,
taskCh: taskCh,
ctx: e.Ctx(),
executorChk: e.AllocPool.Alloc(e.InnerCtx.RowTypes, e.MaxChunkSize(), e.MaxChunkSize()),
executorChk: e.AllocPool.Alloc(e.InnerCtx.RowTypes, e.InitCap(), e.MaxChunkSize()),
indexRanges: copiedRanges,
keyOff2IdxOff: e.KeyOff2IdxOff,
stats: innerStats,
Expand Down Expand Up @@ -453,7 +453,7 @@ func (ow *outerWorker) buildTask(ctx context.Context) (*lookUpJoinTask, error) {
}
maxChunkSize := ow.ctx.GetSessionVars().MaxChunkSize
for requiredRows > task.outerResult.Len() {
chk := ow.executor.NewChunkWithCapacity(ow.OuterCtx.RowTypes, maxChunkSize, maxChunkSize)
chk := ow.executor.NewChunkWithCapacity(ow.OuterCtx.RowTypes, requiredRows, maxChunkSize)
chk = chk.SetRequiredRows(requiredRows, maxChunkSize)
err := exec.Next(ctx, ow.executor, chk)
if err != nil {
Expand Down
14 changes: 11 additions & 3 deletions pkg/executor/test/jointest/join_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -777,13 +777,21 @@ func TestIssue30211(t *testing.T) {
}()
tk.MustExec("insert into t1 values(1),(2);")
tk.MustExec("insert into t2 values(1),(1),(2),(2);")
tk.MustExec("set @@tidb_mem_quota_query=8000;")

// the memory used in planner stage is less than the memory used in executor stage, so we have to use
// the Plan Cache so that the query will not be canceled during compilation.
tk.MustExec("prepare stmt1 from 'select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a';")
tk.MustExec("prepare stmt2 from 'select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a';")
tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 1", "1 1", "2 2", "2 2"))
tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 1", "1 1", "2 2", "2 2"))

tk.MustExec("set @@tidb_mem_quota_query=1000;")
tk.MustExec("set tidb_index_join_batch_size = 1;")
tk.MustExec("SET GLOBAL tidb_mem_oom_action = 'CANCEL'")
defer tk.MustExec("SET GLOBAL tidb_mem_oom_action='LOG'")
err := tk.QueryToErr("select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a;")
err := tk.QueryToErr("execute stmt1")
require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err))
err = tk.QueryToErr("select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a;")
err = tk.QueryToErr("execute stmt2")
require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err))
}

Expand Down

0 comments on commit 8b2ef95

Please sign in to comment.