refactor: optimize tablescan perf.
This commit is contained in:
parent
2531ee909c
commit
24218752ab
|
@ -184,7 +184,7 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u
|
|||
int32_t getJsonValueLen(const char* data);
|
||||
|
||||
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull);
|
||||
int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize);
|
||||
int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows);
|
||||
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
|
||||
const SColumnInfoData* pSource, int32_t numOfRow2);
|
||||
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
|
||||
|
|
|
@ -140,6 +140,54 @@ int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData, int32_t itemLen, int32_t numOfRows) {
|
||||
ASSERT(pColumnInfoData->info.bytes >= itemLen);
|
||||
size_t start = 1;
|
||||
|
||||
// the first item
|
||||
memcpy(pColumnInfoData->pData, pData, itemLen);
|
||||
|
||||
int32_t t = 0;
|
||||
int32_t count = log(numOfRows)/log(2);
|
||||
while(t < count) {
|
||||
int32_t xlen = 1 << t;
|
||||
memcpy(pColumnInfoData->pData + start * itemLen + pColumnInfoData->varmeta.length, pColumnInfoData->pData, xlen * itemLen);
|
||||
t += 1;
|
||||
start += xlen;
|
||||
}
|
||||
|
||||
// the tail part
|
||||
if (numOfRows > start) {
|
||||
memcpy(pColumnInfoData->pData + start * itemLen + currentRow * itemLen, pColumnInfoData->pData, (numOfRows - start) * itemLen);
|
||||
}
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
for(int32_t i = 0; i < numOfRows; ++i) {
|
||||
pColumnInfoData->varmeta.offset[i + currentRow] = pColumnInfoData->varmeta.length + i * itemLen;
|
||||
}
|
||||
|
||||
pColumnInfoData->varmeta.length += numOfRows * itemLen;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows) {
|
||||
ASSERT(pData != NULL && pColumnInfoData != NULL);
|
||||
|
||||
int32_t len = pColumnInfoData->info.bytes;
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
len = varDataTLen(pData);
|
||||
if (pColumnInfoData->varmeta.allocLen < (numOfRows + currentRow) * len) {
|
||||
int32_t code = colDataReserve(pColumnInfoData, (numOfRows + currentRow) * len);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
doCopyNItems(pColumnInfoData, currentRow, pData, len, numOfRows);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, const SColumnInfoData* pSource,
|
||||
int32_t numOfRow2) {
|
||||
if (numOfRow2 <= 0) return;
|
||||
|
|
|
@ -399,22 +399,17 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int
|
|||
const char* p = metaGetTableTagVal(&mr.me, pColInfoData->info.type, &tagVal);
|
||||
|
||||
char* data = NULL;
|
||||
int32_t len = 0;
|
||||
if (pColInfoData->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
|
||||
data = tTagValToData((const STagVal*)p, false);
|
||||
len = varDataTLen(data);
|
||||
code = colDataReserve(pColInfoData, len * pBlock->info.rows);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
data = (char*)p;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
colDataAppend(pColInfoData, i, data,
|
||||
(data == NULL) || (pColInfoData->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data)));
|
||||
bool isNullVal = (data == NULL) || (pColInfoData->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data));
|
||||
if (isNullVal) {
|
||||
colDataAppendNNULL(pColInfoData, 0, pBlock->info.rows);
|
||||
} else {
|
||||
colDataAppendNItems(pColInfoData, 0, data, pBlock->info.rows);
|
||||
}
|
||||
|
||||
if (data && (pColInfoData->info.type != TSDB_DATA_TYPE_JSON) && p != NULL &&
|
||||
|
|
|
@ -1727,17 +1727,11 @@ int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pO
|
|||
|
||||
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
metaGetTableNameByUid(pInput->param, uid, str);
|
||||
|
||||
colDataReserve(pOutput->columnData, varDataTLen(str) * (pInput->numOfRows + pOutput->numOfRows));
|
||||
for(int32_t i = 0; i < pInput->numOfRows; ++i) {
|
||||
colDataAppend(pOutput->columnData, pOutput->numOfRows + i, str, false);
|
||||
}
|
||||
|
||||
colDataAppendNItems(pOutput->columnData, pOutput->numOfRows, str, pInput->numOfRows);
|
||||
pOutput->numOfRows += pInput->numOfRows;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
/** Aggregation functions **/
|
||||
int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||
SColumnInfoData *pInputData = pInput->columnData;
|
||||
|
|
|
@ -31,6 +31,7 @@ void swapStr(char* j, char* J, int width) {
|
|||
}
|
||||
#endif
|
||||
|
||||
// todo refactor: 1) move away; 2) use merge sort instead; 3) qsort is not a stable sort actually.
|
||||
void taosSort(void* arr, int64_t sz, int64_t width, __compar_fn_t compar) {
|
||||
#ifdef WINDOWS
|
||||
int64_t i, j;
|
||||
|
|
Loading…
Reference in New Issue