Merge pull request #24170 from taosdata/szhou/fix/TS-4382

fix: the pre-allocated column has no data to copy
This commit is contained in:
dapan1121 2023-12-22 12:31:07 +08:00 committed by GitHub
commit ceedd5b051
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 20 additions and 2 deletions

View File

@ -475,6 +475,7 @@ static void appendOneRowToDataBlock(SSDataBlock* pBlock, const SSDataBlock* pSou
if (isNull) { if (isNull) {
colDataSetVal(pColInfo, pBlock->info.rows, NULL, true); colDataSetVal(pColInfo, pBlock->info.rows, NULL, true);
} else { } else {
if (!pSrcColInfo->pData) continue;
char* pData = colDataGetData(pSrcColInfo, *rowIndex); char* pData = colDataGetData(pSrcColInfo, *rowIndex);
colDataSetVal(pColInfo, pBlock->info.rows, pData, false); colDataSetVal(pColInfo, pBlock->info.rows, pData, false);
} }
@ -900,7 +901,7 @@ static int32_t getPageBufIncForRow(SSDataBlock* blk, int32_t row, int32_t rowIdx
for (int32_t i = 0; i < numCols; ++i) { for (int32_t i = 0; i < numCols; ++i) {
SColumnInfoData* pColInfoData = TARRAY_GET_ELEM(blk->pDataBlock, i); SColumnInfoData* pColInfoData = TARRAY_GET_ELEM(blk->pDataBlock, i);
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) { if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
if (pColInfoData->varmeta.offset[row] != -1) { if ((pColInfoData->varmeta.offset[row] != -1) && (pColInfoData->pData)) {
char* p = colDataGetData(pColInfoData, row); char* p = colDataGetData(pColInfoData, row);
sz += varDataTLen(p); sz += varDataTLen(p);
} }
@ -970,7 +971,6 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO
lastPageBufTs = ((int64_t*)tsCol->pData)[pHandle->pDataBlock->info.rows - 1]; lastPageBufTs = ((int64_t*)tsCol->pData)[pHandle->pDataBlock->info.rows - 1];
appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId); appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId);
nMergedRows += pHandle->pDataBlock->info.rows; nMergedRows += pHandle->pDataBlock->info.rows;
blockDataCleanup(pHandle->pDataBlock); blockDataCleanup(pHandle->pDataBlock);
blkPgSz = pgHeaderSz; blkPgSz = pgHeaderSz;
bufInc = getPageBufIncForRow(minBlk, minRow, 0); bufInc = getPageBufIncForRow(minBlk, minRow, 0);

View File

@ -1069,6 +1069,7 @@ e
,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim
,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim
,,y,script,./test.sh -f tsim/query/sys_tbname.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim
,,y,script,./test.sh -f tsim/query/sort-pre-cols.sim
,,y,script,./test.sh -f tsim/query/groupby.sim ,,y,script,./test.sh -f tsim/query/groupby.sim
,,y,script,./test.sh -f tsim/query/groupby_distinct.sim ,,y,script,./test.sh -f tsim/query/groupby_distinct.sim
,,y,script,./test.sh -f tsim/query/event.sim ,,y,script,./test.sh -f tsim/query/event.sim

View File

@ -0,0 +1,17 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql create database d
sql use d
sql create table st(ts timestamp, v int) tags(lj json)
sql insert into ct1 using st tags('{"instance":"200"}') values(now, 1)(now+1s, 2);
sql insert into ct2 using st tags('{"instance":"200"}') values(now+2s, 3)(now+3s, 4);
sql select to_char(ts, 'yyyy-mm-dd hh24:mi:ss') as time, irate(v) from st group by to_char(ts, 'yyyy-mm-dd hh24:mi:ss'), lj->'instance' order by time;
print $data01
if $data01 != 0.000000000 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT