Merge remote-tracking branch 'origin/3.0' into enh/opt-transport
This commit is contained in:
commit
a7035daf17
|
@ -169,11 +169,48 @@ ELSE ()
|
|||
SET(COMPILER_SUPPORT_AVX512VL false)
|
||||
ELSE()
|
||||
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
|
||||
CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX)
|
||||
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
|
||||
|
||||
INCLUDE(CheckCSourceRuns)
|
||||
SET(CMAKE_REQUIRED_FLAGS "-mavx")
|
||||
check_c_source_runs("
|
||||
#include <immintrin.h>
|
||||
int main() {
|
||||
__m256d a, b, c;
|
||||
double buf[4] = {0};
|
||||
a = _mm256_loadu_pd(buf);
|
||||
b = _mm256_loadu_pd(buf);
|
||||
c = _mm256_add_pd(a, b);
|
||||
_mm256_storeu_pd(buf, c);
|
||||
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
|
||||
if (buf[i] != 0) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
" COMPILER_SUPPORT_AVX)
|
||||
|
||||
SET(CMAKE_REQUIRED_FLAGS "-mavx2")
|
||||
check_c_source_runs("
|
||||
#include <immintrin.h>
|
||||
int main() {
|
||||
__m256i a, b, c;
|
||||
int buf[8] = {0};
|
||||
a = _mm256_loadu_si256((__m256i *)buf);
|
||||
b = _mm256_loadu_si256((__m256i *)buf);
|
||||
c = _mm256_and_si256(a, b);
|
||||
_mm256_storeu_si256((__m256i *)buf, c);
|
||||
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
|
||||
if (buf[i] != 0) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
" COMPILER_SUPPORT_AVX2)
|
||||
ENDIF()
|
||||
|
||||
IF (COMPILER_SUPPORT_SSE42)
|
||||
|
|
|
@ -153,7 +153,7 @@ SELECT * from information_schema.`ins_streams`;
|
|||
|
||||
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。
|
||||
|
||||
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。
|
||||
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY最小时间是5s,如果低于5s,创建流计算时会报错。
|
||||
|
||||
MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ description: 3.3.3.0 版本说明
|
|||
4. TDengine支持macOS企业版客户端 [企业版]
|
||||
5. taosX日志默认不写入syslog [企业版]
|
||||
6. 服务端记录所有慢查询信息到log库
|
||||
7. show cluster machines 查询结果中添加服务端版本号
|
||||
7. show cluster machines 查询结果中添加服务端版本号 [企业版]
|
||||
8. 删除保留关键字LEVEL/ENCODE/COMPRESS, 可以作为列名/表名/数据库名等使用
|
||||
9. 禁止动态修改临时目录
|
||||
10. round 函数:支持四舍五入的精度
|
||||
|
|
|
@ -29,6 +29,7 @@ struct SqlFunctionCtx;
|
|||
struct SResultRowEntryInfo;
|
||||
|
||||
struct SFunctionNode;
|
||||
struct SExprSupp;
|
||||
typedef struct SScalarParam SScalarParam;
|
||||
typedef struct SStreamState SStreamState;
|
||||
|
||||
|
@ -43,6 +44,7 @@ typedef int32_t (*FExecProcess)(struct SqlFunctionCtx *pCtx);
|
|||
typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock);
|
||||
typedef int32_t (*FScalarExecProcess)(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
typedef int32_t (*FExecCombine)(struct SqlFunctionCtx *pDestCtx, struct SqlFunctionCtx *pSourceCtx);
|
||||
typedef int32_t (*FExecDecode)(struct SqlFunctionCtx *pCtx, const char *buf, struct SResultRowEntryInfo *pResultCellInfo, int32_t version);
|
||||
typedef int32_t (*processFuncByRow)(SArray* pCtx); // array of SqlFunctionCtx
|
||||
|
||||
typedef struct SScalarFuncExecFuncs {
|
||||
|
@ -57,6 +59,7 @@ typedef struct SFuncExecFuncs {
|
|||
FExecFinalize finalize;
|
||||
FExecCombine combine;
|
||||
FExecCleanUp cleanup;
|
||||
FExecDecode decode;
|
||||
processFuncByRow processFuncByRow;
|
||||
} SFuncExecFuncs;
|
||||
|
||||
|
@ -65,6 +68,8 @@ typedef struct SFuncExecFuncs {
|
|||
#define TOP_BOTTOM_QUERY_LIMIT 100
|
||||
#define FUNCTIONS_NAME_MAX_LENGTH 32
|
||||
|
||||
#define FUNCTION_RESULT_INFO_VERSION 1
|
||||
|
||||
typedef struct SResultRowEntryInfo {
|
||||
bool initialized : 1; // output buffer has been initialized
|
||||
bool complete : 1; // query has completed
|
||||
|
@ -165,6 +170,11 @@ typedef struct STdbState {
|
|||
void *txn;
|
||||
} STdbState;
|
||||
|
||||
typedef struct SResultRowStore {
|
||||
int32_t (*resultRowPut)(struct SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize);
|
||||
int32_t (*resultRowGet)(struct SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize);
|
||||
} SResultRowStore;
|
||||
|
||||
struct SStreamState {
|
||||
STdbState *pTdbState;
|
||||
struct SStreamFileState *pFileState;
|
||||
|
@ -175,6 +185,8 @@ struct SStreamState {
|
|||
int64_t streamBackendRid;
|
||||
int8_t dump;
|
||||
int32_t tsIndex;
|
||||
SResultRowStore pResultRowStore;
|
||||
struct SExprSupp *pExprSupp;
|
||||
};
|
||||
|
||||
typedef struct SFunctionStateStore {
|
||||
|
|
|
@ -236,7 +236,7 @@ typedef struct {
|
|||
void* vnode; // not available to encoder and decoder
|
||||
FTbSink* tbSinkFunc;
|
||||
STSchema* pTSchema;
|
||||
SSHashObj* pTblInfo;
|
||||
SSHashObj* pTbInfo;
|
||||
} STaskSinkTb;
|
||||
|
||||
typedef struct {
|
||||
|
@ -754,7 +754,7 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
|
|||
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
||||
int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
void streamMetaAcquireOneTask(SStreamTask* pTask);
|
||||
int32_t streamMetaAcquireOneTask(SStreamTask* pTask);
|
||||
void streamMetaClear(SStreamMeta* pMeta);
|
||||
void streamMetaInitBackend(SStreamMeta* pMeta);
|
||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||
|
|
|
@ -50,6 +50,7 @@ typedef struct {
|
|||
int32_t rollPeriod; // secs
|
||||
int64_t retentionSize;
|
||||
int64_t segSize;
|
||||
int64_t committed;
|
||||
EWalType level; // wal level
|
||||
int32_t encryptAlgorithm;
|
||||
char encryptKey[ENCRYPT_KEY_LEN + 1];
|
||||
|
|
|
@ -89,9 +89,9 @@ int32_t taosResetTerminalMode();
|
|||
snprintf(array[size], STACKSIZE, "0x%lx : (%s+0x%lx) [0x%lx]\n", (long)pc, fname, (long)offset, (long)pc); \
|
||||
} \
|
||||
if (ignoreNum < size && size > 0) { \
|
||||
offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
offset = tsnprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \
|
||||
offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, \
|
||||
offset += tsnprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, \
|
||||
array[i]); \
|
||||
} \
|
||||
} \
|
||||
|
@ -140,9 +140,9 @@ int32_t taosResetTerminalMode();
|
|||
char **strings = backtrace_symbols(array, size); \
|
||||
int32_t offset = 0; \
|
||||
if (strings != NULL) { \
|
||||
offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
offset = tsnprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \
|
||||
offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, \
|
||||
offset += tsnprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, \
|
||||
strings[i]); \
|
||||
} \
|
||||
} \
|
||||
|
@ -193,7 +193,7 @@ int32_t taosResetTerminalMode();
|
|||
snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \
|
||||
for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \
|
||||
SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \
|
||||
offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%i, %s - 0x%0X\n", \
|
||||
offset += tsnprintf(buf + offset, bufSize - 1 - offset, "frame:%i, %s - 0x%0X\n", \
|
||||
(ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \
|
||||
} \
|
||||
} \
|
||||
|
|
|
@ -156,9 +156,9 @@ int32_t getWordLength(char type);
|
|||
int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type);
|
||||
int32_t tsDecompressFloatImpAvx2(const char *input, int32_t nelements, char *output);
|
||||
int32_t tsDecompressDoubleImpAvx2(const char *input, int32_t nelements, char *output);
|
||||
void tsDecompressTimestampAvx2(const char *input, int32_t nelements, char *output, bool bigEndian);
|
||||
#endif
|
||||
#ifdef __AVX512VL__
|
||||
void tsDecompressTimestampAvx2(const char *input, int32_t nelements, char *output, bool bigEndian);
|
||||
void tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output, bool bigEndian);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -199,6 +199,7 @@ typedef struct {
|
|||
SArray *preLineTagKV;
|
||||
SArray *maxTagKVs;
|
||||
SArray *maxColKVs;
|
||||
SArray *escapedStringList;
|
||||
|
||||
SSmlLineInfo preLine;
|
||||
STableMeta *currSTableMeta;
|
||||
|
|
|
@ -84,7 +84,7 @@ void taos_cleanup(void) {
|
|||
taosCloseRef(id);
|
||||
|
||||
nodesDestroyAllocatorSet();
|
||||
cleanupAppInfo();
|
||||
// cleanupAppInfo();
|
||||
rpcCleanup();
|
||||
tscDebug("rpc cleanup");
|
||||
|
||||
|
@ -492,53 +492,53 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD
|
|||
}
|
||||
|
||||
if (row[i] == NULL) {
|
||||
len += snprintf(str + len, size - len, "%s", TSDB_DATA_NULL_STR);
|
||||
len += tsnprintf(str + len, size - len, "%s", TSDB_DATA_NULL_STR);
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (fields[i].type) {
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
len += snprintf(str + len, size - len, "%d", *((int8_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%d", *((int8_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
len += snprintf(str + len, size - len, "%u", *((uint8_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%u", *((uint8_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
len += snprintf(str + len, size - len, "%d", *((int16_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%d", *((int16_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
len += snprintf(str + len, size - len, "%u", *((uint16_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%u", *((uint16_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
len += snprintf(str + len, size - len, "%d", *((int32_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%d", *((int32_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
len += snprintf(str + len, size - len, "%u", *((uint32_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%u", *((uint32_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
len += snprintf(str + len, size - len, "%" PRId64, *((int64_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%" PRId64, *((int64_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
len += snprintf(str + len, size - len, "%" PRIu64, *((uint64_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%" PRIu64, *((uint64_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
len += snprintf(str + len, size - len, "%f", fv);
|
||||
len += tsnprintf(str + len, size - len, "%f", fv);
|
||||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
len += snprintf(str + len, size - len, "%lf", dv);
|
||||
len += tsnprintf(str + len, size - len, "%lf", dv);
|
||||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_VARBINARY: {
|
||||
|
@ -576,11 +576,11 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD
|
|||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
len += snprintf(str + len, size - len, "%" PRId64, *((int64_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%" PRId64, *((int64_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
len += snprintf(str + len, size - len, "%d", *((int8_t *)row[i]));
|
||||
len += tsnprintf(str + len, size - len, "%d", *((int8_t *)row[i]));
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -479,6 +479,7 @@ int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs)
|
|||
}
|
||||
|
||||
clearColValArraySml(info->currTableDataCtx->pValues);
|
||||
taosArrayClearP(info->escapedStringList, taosMemoryFree);
|
||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
||||
return ret;
|
||||
|
@ -1608,6 +1609,7 @@ void smlDestroyInfo(SSmlHandle *info) {
|
|||
taosArrayDestroy(info->valueJsonArray);
|
||||
|
||||
taosArrayDestroyEx(info->preLineTagKV, freeSSmlKv);
|
||||
taosArrayDestroyP(info->escapedStringList, taosMemoryFree);
|
||||
|
||||
if (!info->dataFormat) {
|
||||
for (int i = 0; i < info->lineNum; i++) {
|
||||
|
@ -1667,8 +1669,9 @@ int32_t smlBuildSmlInfo(TAOS *taos, SSmlHandle **handle) {
|
|||
info->tagJsonArray = taosArrayInit(8, POINTER_BYTES);
|
||||
info->valueJsonArray = taosArrayInit(8, POINTER_BYTES);
|
||||
info->preLineTagKV = taosArrayInit(8, sizeof(SSmlKv));
|
||||
|
||||
if (info->tagJsonArray == NULL || info->valueJsonArray == NULL || info->preLineTagKV == NULL) {
|
||||
info->escapedStringList = taosArrayInit(8, POINTER_BYTES);
|
||||
if (info->tagJsonArray == NULL || info->valueJsonArray == NULL ||
|
||||
info->preLineTagKV == NULL || info->escapedStringList == NULL) {
|
||||
uError("SML:0x%" PRIx64 " failed to allocate memory", info->id);
|
||||
code = terrno;
|
||||
goto FAILED;
|
||||
|
@ -1949,6 +1952,7 @@ int32_t smlClearForRerun(SSmlHandle *info) {
|
|||
}
|
||||
}
|
||||
|
||||
taosArrayClearP(info->escapedStringList, taosMemoryFree);
|
||||
(void)memset(&info->preLine, 0, sizeof(SSmlLineInfo));
|
||||
info->currSTableMeta = NULL;
|
||||
info->currTableDataCtx = NULL;
|
||||
|
|
|
@ -451,6 +451,13 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
|
|||
|
||||
if (info->dataFormat) {
|
||||
bool isAligned = isSmlColAligned(info, cnt, &kv);
|
||||
if (kv.type == TSDB_DATA_TYPE_BINARY && valueEscaped) {
|
||||
if (taosArrayPush(info->escapedStringList, &kv.value) == NULL){
|
||||
freeSSmlKv(&kv);
|
||||
return terrno;
|
||||
}
|
||||
kv.value = NULL;
|
||||
}
|
||||
freeSSmlKv(&kv);
|
||||
if(!isAligned){
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -459,10 +466,12 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
|
|||
if (currElement->colArray == NULL) {
|
||||
currElement->colArray = taosArrayInit_s(sizeof(SSmlKv), 1);
|
||||
if (currElement->colArray == NULL) {
|
||||
freeSSmlKv(&kv);
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
if (taosArrayPush(currElement->colArray, &kv) == NULL){ // reserve for timestamp
|
||||
freeSSmlKv(&kv);
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1200,6 +1200,22 @@ static int stmtAddBatch2(TAOS_STMT2* stmt) {
|
|||
|
||||
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_ADD_BATCH));
|
||||
|
||||
if (pStmt->sql.stbInterlaceMode) {
|
||||
int64_t startUs2 = taosGetTimestampUs();
|
||||
pStmt->stat.addBatchUs += startUs2 - startUs;
|
||||
|
||||
pStmt->sql.siInfo.tableColsReady = false;
|
||||
|
||||
SStmtQNode* param = NULL;
|
||||
STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)¶m));
|
||||
param->restoreTbCols = true;
|
||||
param->next = NULL;
|
||||
|
||||
stmtEnqueue(pStmt, param);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtCacheBlock(pStmt));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1403,9 +1419,9 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) {
|
|||
|
||||
if (pStmt->sql.stbInterlaceMode) {
|
||||
STMT_ERR_RET(stmtAppendTablePostHandle(pStmt, param));
|
||||
}
|
||||
|
||||
} else {
|
||||
STMT_ERR_RET(stmtAddBatch2(pStmt));
|
||||
}
|
||||
|
||||
pStmt->stat.bindDataUs4 += taosGetTimestampUs() - startUs4;
|
||||
|
||||
|
@ -1609,24 +1625,12 @@ int stmtExec2(TAOS_STMT2* stmt, int* affected_rows) {
|
|||
return pStmt->errCode;
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_EXECUTE));
|
||||
|
||||
if (pStmt->sql.stbInterlaceMode) {
|
||||
int64_t startUs2 = taosGetTimestampUs();
|
||||
pStmt->stat.addBatchUs += startUs2 - startUs;
|
||||
|
||||
pStmt->sql.siInfo.tableColsReady = false;
|
||||
|
||||
SStmtQNode* param = NULL;
|
||||
STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)¶m));
|
||||
param->restoreTbCols = true;
|
||||
param->next = NULL;
|
||||
|
||||
stmtEnqueue(pStmt, param);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
STMT_ERR_RET(stmtAddBatch2(pStmt));
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_EXECUTE));
|
||||
|
||||
if (STMT_TYPE_QUERY != pStmt->sql.type) {
|
||||
if (pStmt->sql.stbInterlaceMode) {
|
||||
int64_t startTs = taosGetTimestampUs();
|
||||
|
|
|
@ -2625,6 +2625,8 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf
|
|||
uError("func %s failed to convert to ucs charset since %s", __func__, tstrerror(code));
|
||||
lino = __LINE__;
|
||||
goto _exit;
|
||||
} else { // reset the length value
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
}
|
||||
len += tsnprintf(dumpBuf + len, size - len, " %15s |", pBuf);
|
||||
if (len >= size - 1) goto _exit;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "dmMgmt.h"
|
||||
#include "mnode.h"
|
||||
#include "osFile.h"
|
||||
#include "tconfig.h"
|
||||
#include "tglobal.h"
|
||||
#include "version.h"
|
||||
|
@ -181,6 +182,7 @@ static void dmSetSignalHandle() {
|
|||
}
|
||||
#endif
|
||||
}
|
||||
extern bool generateNewMeta;
|
||||
|
||||
static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
|
||||
global.startTime = taosGetTimestampMs();
|
||||
|
@ -220,6 +222,8 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
|
|||
global.dumpSdb = true;
|
||||
} else if (strcmp(argv[i], "-dTxn") == 0) {
|
||||
global.deleteTrans = true;
|
||||
} else if (strcmp(argv[i], "-r") == 0) {
|
||||
generateNewMeta = true;
|
||||
} else if (strcmp(argv[i], "-E") == 0) {
|
||||
if (i < argc - 1) {
|
||||
if (strlen(argv[++i]) >= PATH_MAX) {
|
||||
|
@ -415,6 +419,9 @@ int mainWindows(int argc, char **argv) {
|
|||
return code;
|
||||
}
|
||||
int ret = dmUpdateEncryptKey(global.encryptKey, toLogFile);
|
||||
if (taosCloseFile(&pFile) != 0) {
|
||||
encryptError("failed to close file:%p", pFile);
|
||||
}
|
||||
taosCloseLog();
|
||||
taosCleanupArgs();
|
||||
return ret;
|
||||
|
|
|
@ -123,6 +123,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
|
|||
int32_t code = 0;
|
||||
SStatusReq req = {0};
|
||||
|
||||
dDebug("send status req to mnode, statusSeq:%d, begin to mgnt lock", pMgmt->statusSeq);
|
||||
(void)taosThreadRwlockRdlock(&pMgmt->pData->lock);
|
||||
req.sver = tsVersion;
|
||||
req.dnodeVer = pMgmt->pData->dnodeVer;
|
||||
|
@ -161,14 +162,17 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
|
|||
memcpy(req.clusterCfg.charset, tsCharset, TD_LOCALE_LEN);
|
||||
(void)taosThreadRwlockUnlock(&pMgmt->pData->lock);
|
||||
|
||||
dDebug("send status req to mnode, statusSeq:%d, begin to get vnode loads", pMgmt->statusSeq);
|
||||
SMonVloadInfo vinfo = {0};
|
||||
(*pMgmt->getVnodeLoadsFp)(&vinfo);
|
||||
req.pVloads = vinfo.pVloads;
|
||||
|
||||
dDebug("send status req to mnode, statusSeq:%d, begin to get mnode loads", pMgmt->statusSeq);
|
||||
SMonMloadInfo minfo = {0};
|
||||
(*pMgmt->getMnodeLoadsFp)(&minfo);
|
||||
req.mload = minfo.load;
|
||||
|
||||
dDebug("send status req to mnode, statusSeq:%d, begin to get qnode loads", pMgmt->statusSeq);
|
||||
(*pMgmt->getQnodeLoadsFp)(&req.qload);
|
||||
|
||||
pMgmt->statusSeq++;
|
||||
|
@ -206,6 +210,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
|
|||
int8_t epUpdated = 0;
|
||||
(void)dmGetMnodeEpSet(pMgmt->pData, &epSet);
|
||||
|
||||
dDebug("send status req to mnode, statusSeq:%d, begin to send rpc msg", pMgmt->statusSeq);
|
||||
code =
|
||||
rpcSendRecvWithTimeout(pMgmt->msgCb.statusRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, tsStatusInterval * 5 * 1000);
|
||||
if (code != 0) {
|
||||
|
|
|
@ -208,7 +208,7 @@ SArray *mmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_VIEW, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_VIEW_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STATIS, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_CONFIG_CLUSTER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_COMPACT_PROGRESS_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_ENCRYPT_KEY, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -415,14 +415,24 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
// taosThreadMutexLock(&pMgmt->createLock);
|
||||
code = taosThreadMutexLock(&pMgmt->createLock);
|
||||
if (code != 0) {
|
||||
dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(code));
|
||||
goto _OVER;
|
||||
}
|
||||
code = vmWriteVnodeListToFile(pMgmt);
|
||||
if (code != 0) {
|
||||
code = terrno != 0 ? terrno : code;
|
||||
// taosThreadMutexUnlock(&pMgmt->createLock);
|
||||
int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock);
|
||||
if (ret != 0) {
|
||||
dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret));
|
||||
}
|
||||
goto _OVER;
|
||||
}
|
||||
// taosThreadMutexUnlock(&pMgmt->createLock);
|
||||
int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock);
|
||||
if (ret != 0) {
|
||||
dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret));
|
||||
}
|
||||
|
||||
_OVER:
|
||||
if (code != 0) {
|
||||
|
@ -1037,7 +1047,7 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TRIM, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_S3MIGRATE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutMsgToMultiMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_VNODE_TYPE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -515,6 +515,7 @@ static int32_t mndInitWal(SMnode *pMnode) {
|
|||
.fsyncPeriod = 0,
|
||||
.rollPeriod = -1,
|
||||
.segSize = -1,
|
||||
.committed = -1,
|
||||
.retentionPeriod = 0,
|
||||
.retentionSize = 0,
|
||||
.level = TAOS_WAL_FSYNC,
|
||||
|
|
|
@ -2252,7 +2252,7 @@ static int32_t mndRetrieveTSMA(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo
|
|||
if (nodeType(pFunc) == QUERY_NODE_FUNCTION) {
|
||||
SFunctionNode *pFuncNode = (SFunctionNode *)pFunc;
|
||||
if (!fmIsTSMASupportedFunc(pFuncNode->funcId)) continue;
|
||||
len += snprintf(start, TSDB_MAX_SAVED_SQL_LEN - len, "%s%s", start != buf + VARSTR_HEADER_SIZE ? "," : "",
|
||||
len += tsnprintf(start, TSDB_MAX_SAVED_SQL_LEN - len, "%s%s", start != buf + VARSTR_HEADER_SIZE ? "," : "",
|
||||
((SExprNode *)pFunc)->userAlias);
|
||||
if (len >= TSDB_MAX_SAVED_SQL_LEN) {
|
||||
len = TSDB_MAX_SAVED_SQL_LEN;
|
||||
|
|
|
@ -1811,6 +1811,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
mInfo("stream:%s,%" PRId64 " start to resume stream from pause", resumeReq.name, pStream->uid);
|
||||
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
return -1;
|
||||
|
|
|
@ -61,7 +61,6 @@ static int32_t doSetPauseAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTa
|
|||
static int32_t doSetDropAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) {
|
||||
SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq));
|
||||
if (pReq == NULL) {
|
||||
// terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
@ -93,7 +92,6 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT
|
|||
if (pReq == NULL) {
|
||||
mError("failed to malloc in resume stream, size:%" PRIzu ", code:%s", sizeof(SVResumeStreamTaskReq),
|
||||
tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||
// terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
@ -106,19 +104,18 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT
|
|||
bool hasEpset = false;
|
||||
int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId);
|
||||
if (code != TSDB_CODE_SUCCESS || (!hasEpset)) {
|
||||
terrno = code;
|
||||
taosMemoryFree(pReq);
|
||||
return terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
code = setTransAction(pTrans, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0, TSDB_CODE_VND_INVALID_VGROUP_ID);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
mDebug("set the resume action for trans:%d", pTrans->id);
|
||||
return 0;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t doSetDropActionFromId(SMnode *pMnode, STrans *pTrans, SOrphanTask* pTask) {
|
||||
|
|
|
@ -474,6 +474,21 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) {
|
|||
// pTrans->startFunc = 0;
|
||||
}
|
||||
|
||||
if (pTrans->stage == TRN_STAGE_COMMIT) {
|
||||
pTrans->stage = TRN_STAGE_COMMIT_ACTION;
|
||||
mInfo("trans:%d, stage from commit to commitAction since perform update action", pTrans->id);
|
||||
}
|
||||
|
||||
if (pTrans->stage == TRN_STAGE_ROLLBACK) {
|
||||
pTrans->stage = TRN_STAGE_UNDO_ACTION;
|
||||
mInfo("trans:%d, stage from rollback to undoAction since perform update action", pTrans->id);
|
||||
}
|
||||
|
||||
if (pTrans->stage == TRN_STAGE_PRE_FINISH) {
|
||||
pTrans->stage = TRN_STAGE_FINISH;
|
||||
mInfo("trans:%d, stage from pre-finish to finished since perform update action", pTrans->id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -563,17 +578,17 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
|
|||
|
||||
if (pOld->stage == TRN_STAGE_COMMIT) {
|
||||
pOld->stage = TRN_STAGE_COMMIT_ACTION;
|
||||
mTrace("trans:%d, stage from commit to commitAction since perform update action", pNew->id);
|
||||
mInfo("trans:%d, stage from commit to commitAction since perform update action", pNew->id);
|
||||
}
|
||||
|
||||
if (pOld->stage == TRN_STAGE_ROLLBACK) {
|
||||
pOld->stage = TRN_STAGE_UNDO_ACTION;
|
||||
mTrace("trans:%d, stage from rollback to undoAction since perform update action", pNew->id);
|
||||
mInfo("trans:%d, stage from rollback to undoAction since perform update action", pNew->id);
|
||||
}
|
||||
|
||||
if (pOld->stage == TRN_STAGE_PRE_FINISH) {
|
||||
pOld->stage = TRN_STAGE_FINISH;
|
||||
mTrace("trans:%d, stage from pre-finish to finished since perform update action", pNew->id);
|
||||
mInfo("trans:%d, stage from pre-finish to finished since perform update action", pNew->id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1252,8 +1267,9 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
|
|||
pAction->errCode = pRsp->code;
|
||||
pTrans->lastErrorNo = pRsp->code;
|
||||
|
||||
mInfo("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId,
|
||||
mndTransStr(pAction->stage), action, pRsp->code, pAction->acceptableCode, pAction->retryCode);
|
||||
mInfo("trans:%d, %s:%d response is received, received code:0x%x(%s), accept:0x%x(%s) retry:0x%x(%s)", transId,
|
||||
mndTransStr(pAction->stage), action, pRsp->code, tstrerror(pRsp->code), pAction->acceptableCode,
|
||||
tstrerror(pAction->acceptableCode), pAction->retryCode, tstrerror(pAction->retryCode));
|
||||
} else {
|
||||
mInfo("trans:%d, invalid action, index:%d, code:0x%x", transId, action, pRsp->code);
|
||||
}
|
||||
|
@ -1294,7 +1310,7 @@ static void mndTransResetActions(SMnode *pMnode, STrans *pTrans, SArray *pArray)
|
|||
}
|
||||
}
|
||||
|
||||
// execute at bottom half
|
||||
// execute in sync context
|
||||
static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) {
|
||||
if (pAction->rawWritten) return 0;
|
||||
if (topHalf) {
|
||||
|
@ -1320,7 +1336,7 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi
|
|||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
// execute at top half
|
||||
// execute in trans context
|
||||
static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) {
|
||||
if (pAction->msgSent) return 0;
|
||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) {
|
||||
|
@ -1469,8 +1485,8 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA
|
|||
static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||
int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->redoActions, topHalf);
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
||||
mError("trans:%d, failed to execute redoActions since:%s, code:0x%x, topHalf:%d", pTrans->id, terrstr(), terrno,
|
||||
topHalf);
|
||||
mError("trans:%d, failed to execute redoActions since:%s, code:0x%x, topHalf(TransContext):%d", pTrans->id,
|
||||
terrstr(), terrno, topHalf);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -1478,7 +1494,8 @@ static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool t
|
|||
static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||
int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->undoActions, topHalf);
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
||||
mError("trans:%d, failed to execute undoActions since %s. topHalf:%d", pTrans->id, terrstr(), topHalf);
|
||||
mError("trans:%d, failed to execute undoActions since %s. topHalf(TransContext):%d", pTrans->id, terrstr(),
|
||||
topHalf);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -1486,7 +1503,8 @@ static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool t
|
|||
static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||
int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions, topHalf);
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
||||
mError("trans:%d, failed to execute commitActions since %s. topHalf:%d", pTrans->id, terrstr(), topHalf);
|
||||
mError("trans:%d, failed to execute commitActions since %s. topHalf(TransContext):%d", pTrans->id, terrstr(),
|
||||
topHalf);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -1500,11 +1518,15 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr
|
|||
return code;
|
||||
}
|
||||
|
||||
mInfo("trans:%d, execute %d actions serial, current action:%d", pTrans->id, numOfActions, pTrans->actionPos);
|
||||
mInfo("trans:%d, execute %d actions serial, begin at action:%d, stage:%s", pTrans->id, numOfActions,
|
||||
pTrans->actionPos, mndTransStr(pTrans->stage));
|
||||
|
||||
for (int32_t action = pTrans->actionPos; action < numOfActions; ++action) {
|
||||
STransAction *pAction = taosArrayGet(pActions, action);
|
||||
|
||||
mInfo("trans:%d, current action:%d, stage:%s, actionType(0:log,1:msg):%d", pTrans->id, pTrans->actionPos,
|
||||
mndTransStr(pAction->stage), pAction->actionType);
|
||||
|
||||
code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf);
|
||||
if (code == 0) {
|
||||
if (pAction->msgSent) {
|
||||
|
@ -1536,8 +1558,8 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr
|
|||
if (mndCannotExecuteTransAction(pMnode, topHalf)) {
|
||||
pTrans->lastErrorNo = code;
|
||||
pTrans->code = code;
|
||||
mInfo("trans:%d, %s:%d, topHalf:%d, not execute next action, code:%s", pTrans->id, mndTransStr(pAction->stage),
|
||||
action, topHalf, tstrerror(code));
|
||||
mInfo("trans:%d, %s:%d, topHalf(TransContext):%d, not execute next action, code:%s", pTrans->id,
|
||||
mndTransStr(pAction->stage), action, topHalf, tstrerror(code));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1561,7 +1583,8 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr
|
|||
break;
|
||||
} else if (code == pAction->retryCode || code == TSDB_CODE_SYN_PROPOSE_NOT_READY ||
|
||||
code == TSDB_CODE_SYN_RESTORING || code == TSDB_CODE_SYN_NOT_LEADER) {
|
||||
mInfo("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code);
|
||||
mInfo("trans:%d, %s:%d receive code:0x%x(%s) and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id,
|
||||
code, tstrerror(code));
|
||||
pTrans->lastErrorNo = code;
|
||||
taosMsleep(300);
|
||||
action--;
|
||||
|
@ -1570,8 +1593,8 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr
|
|||
terrno = code;
|
||||
pTrans->lastErrorNo = code;
|
||||
pTrans->code = code;
|
||||
mInfo("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id,
|
||||
mndTransStr(pAction->stage), pAction->id, code, pTrans->failedTimes);
|
||||
mInfo("trans:%d, %s:%d receive code:0x%x(%s) and wait another schedule, failedTimes:%d", pTrans->id,
|
||||
mndTransStr(pAction->stage), pAction->id, code, tstrerror(code), pTrans->failedTimes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1647,8 +1670,8 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool
|
|||
} else {
|
||||
continueExec = false;
|
||||
}
|
||||
mInfo("trans:%d, cannot execute redo action stage, topHalf:%d, continueExec:%d, code:%s", pTrans->id, topHalf,
|
||||
continueExec, tstrerror(code));
|
||||
mInfo("trans:%d, cannot execute redo action stage, topHalf(TransContext):%d, continueExec:%d, code:%s", pTrans->id,
|
||||
topHalf, continueExec, tstrerror(code));
|
||||
|
||||
return continueExec;
|
||||
}
|
||||
|
@ -1680,7 +1703,9 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool
|
|||
}
|
||||
|
||||
pTrans->stage = TRN_STAGE_ROLLBACK;
|
||||
mError("trans:%d, stage from redoAction to rollback since %s", pTrans->id, terrstr());
|
||||
pTrans->actionPos = 0;
|
||||
mError("trans:%d, stage from redoAction to rollback since %s, and set actionPos to %d", pTrans->id, terrstr(),
|
||||
pTrans->actionPos);
|
||||
continueExec = true;
|
||||
} else {
|
||||
mError("trans:%d, stage keep on redoAction since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes);
|
||||
|
@ -1691,6 +1716,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool
|
|||
return continueExec;
|
||||
}
|
||||
|
||||
// in trans context
|
||||
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) return false;
|
||||
|
||||
|
@ -1765,6 +1791,7 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans, bool
|
|||
return continueExec;
|
||||
}
|
||||
|
||||
// in trans context
|
||||
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) return false;
|
||||
|
||||
|
@ -1773,8 +1800,6 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool to
|
|||
|
||||
if (code == 0) {
|
||||
pTrans->stage = TRN_STAGE_UNDO_ACTION;
|
||||
pTrans->actionPos = 0;
|
||||
mInfo("trans:%d, stage from rollback to undoAction, actionPos:%d", pTrans->id, pTrans->actionPos);
|
||||
continueExec = true;
|
||||
} else {
|
||||
pTrans->failedTimes++;
|
||||
|
@ -1829,7 +1854,7 @@ void mndTransExecuteImp(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
|||
bool continueExec = true;
|
||||
|
||||
while (continueExec) {
|
||||
mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " topHalf:%d", pTrans->id,
|
||||
mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " topHalf(TransContext):%d", pTrans->id,
|
||||
mndTransStr(pTrans->stage), pTrans->createdTime, topHalf);
|
||||
pTrans->lastExecTime = taosGetTimestampMs();
|
||||
switch (pTrans->stage) {
|
||||
|
|
|
@ -81,6 +81,9 @@ typedef struct SCommitInfo SCommitInfo;
|
|||
typedef struct SCompactInfo SCompactInfo;
|
||||
typedef struct SQueryNode SQueryNode;
|
||||
|
||||
#define VNODE_META_TMP_DIR "meta.tmp"
|
||||
#define VNODE_META_BACKUP_DIR "meta.backup"
|
||||
|
||||
#define VNODE_META_DIR "meta"
|
||||
#define VNODE_TSDB_DIR "tsdb"
|
||||
#define VNODE_TQ_DIR "tq"
|
||||
|
|
|
@ -133,7 +133,7 @@ static void doScan(SMeta *pMeta) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
|
||||
static int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) {
|
||||
SMeta *pMeta = NULL;
|
||||
int32_t code = 0;
|
||||
int32_t lino;
|
||||
|
@ -144,7 +144,11 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
|
|||
// create handle
|
||||
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN);
|
||||
offset = strlen(path);
|
||||
snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, VNODE_META_DIR);
|
||||
snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir);
|
||||
|
||||
if (strncmp(metaDir, VNODE_META_TMP_DIR, strlen(VNODE_META_TMP_DIR)) == 0) {
|
||||
taosRemoveDir(path);
|
||||
}
|
||||
|
||||
if ((pMeta = taosMemoryCalloc(1, sizeof(*pMeta) + strlen(path) + 1)) == NULL) {
|
||||
TSDB_CHECK_CODE(code = terrno, lino, _exit);
|
||||
|
@ -245,6 +249,188 @@ _exit:
|
|||
return code;
|
||||
}
|
||||
|
||||
bool generateNewMeta = false;
|
||||
|
||||
static int32_t metaGenerateNewMeta(SMeta **ppMeta) {
|
||||
SMeta *pNewMeta = NULL;
|
||||
SMeta *pMeta = *ppMeta;
|
||||
SVnode *pVnode = pMeta->pVnode;
|
||||
|
||||
metaInfo("vgId:%d start to generate new meta", TD_VID(pMeta->pVnode));
|
||||
|
||||
// Open a new meta for orgainzation
|
||||
int32_t code = metaOpenImpl(pMeta->pVnode, &pNewMeta, VNODE_META_TMP_DIR, false);
|
||||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL);
|
||||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
// i == 0, scan super table
|
||||
// i == 1, scan normal table and child table
|
||||
for (int i = 0; i < 2; i++) {
|
||||
TBC *uidCursor = NULL;
|
||||
int32_t counter = 0;
|
||||
|
||||
code = tdbTbcOpen(pMeta->pUidIdx, &uidCursor, NULL);
|
||||
if (code) {
|
||||
metaError("vgId:%d failed to open uid index cursor, reason:%s", TD_VID(pVnode), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = tdbTbcMoveToFirst(uidCursor);
|
||||
if (code) {
|
||||
metaError("vgId:%d failed to move to first, reason:%s", TD_VID(pVnode), tstrerror(code));
|
||||
tdbTbcClose(uidCursor);
|
||||
return code;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
const void *pKey;
|
||||
int kLen;
|
||||
const void *pVal;
|
||||
int vLen;
|
||||
|
||||
if (tdbTbcGet(uidCursor, &pKey, &kLen, &pVal, &vLen) < 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
tb_uid_t uid = *(tb_uid_t *)pKey;
|
||||
SUidIdxVal *pUidIdxVal = (SUidIdxVal *)pVal;
|
||||
if ((i == 0 && (pUidIdxVal->suid && pUidIdxVal->suid == uid)) // super table
|
||||
|| (i == 1 && (pUidIdxVal->suid == 0 || pUidIdxVal->suid != uid)) // normal table and child table
|
||||
) {
|
||||
counter++;
|
||||
if (i == 0) {
|
||||
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, "super", uid);
|
||||
} else {
|
||||
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter,
|
||||
pUidIdxVal->suid == 0 ? "normal" : "child", uid);
|
||||
}
|
||||
|
||||
// fetch table entry
|
||||
void *value = NULL;
|
||||
int valueSize = 0;
|
||||
if (tdbTbGet(pMeta->pTbDb,
|
||||
&(STbDbKey){
|
||||
.version = pUidIdxVal->version,
|
||||
.uid = uid,
|
||||
},
|
||||
sizeof(uid), &value, &valueSize) == 0) {
|
||||
SDecoder dc = {0};
|
||||
SMetaEntry me = {0};
|
||||
tDecoderInit(&dc, value, valueSize);
|
||||
if (metaDecodeEntry(&dc, &me) == 0) {
|
||||
if (metaHandleEntry(pNewMeta, &me) != 0) {
|
||||
metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid);
|
||||
}
|
||||
}
|
||||
tDecoderClear(&dc);
|
||||
}
|
||||
tdbFree(value);
|
||||
}
|
||||
|
||||
code = tdbTbcMoveToNext(uidCursor);
|
||||
if (code) {
|
||||
metaError("vgId:%d failed to move to next, reason:%s", TD_VID(pVnode), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
tdbTbcClose(uidCursor);
|
||||
}
|
||||
|
||||
code = metaCommit(pNewMeta, pNewMeta->txn);
|
||||
if (code) {
|
||||
metaError("vgId:%d failed to commit, reason:%s", TD_VID(pVnode), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = metaFinishCommit(pNewMeta, pNewMeta->txn);
|
||||
if (code) {
|
||||
metaError("vgId:%d failed to finish commit, reason:%s", TD_VID(pVnode), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
if ((code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL)) != 0) {
|
||||
metaError("vgId:%d failed to begin new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
|
||||
}
|
||||
metaClose(&pNewMeta);
|
||||
metaInfo("vgId:%d finish to generate new meta", TD_VID(pVnode));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
|
||||
if (generateNewMeta) {
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
char oldMetaPath[TSDB_FILENAME_LEN] = {0};
|
||||
char newMetaPath[TSDB_FILENAME_LEN] = {0};
|
||||
char backupMetaPath[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN);
|
||||
snprintf(oldMetaPath, sizeof(oldMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_DIR);
|
||||
snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR);
|
||||
snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR);
|
||||
|
||||
bool oldMetaExist = taosCheckExistFile(oldMetaPath);
|
||||
bool newMetaExist = taosCheckExistFile(newMetaPath);
|
||||
bool backupMetaExist = taosCheckExistFile(backupMetaPath);
|
||||
|
||||
if ((!backupMetaExist && !oldMetaExist && newMetaExist) // case 2
|
||||
|| (backupMetaExist && !oldMetaExist && !newMetaExist) // case 4
|
||||
|| (backupMetaExist && oldMetaExist && newMetaExist) // case 8
|
||||
) {
|
||||
metaError("vgId:%d invalid meta state, please check", TD_VID(pVnode));
|
||||
return TSDB_CODE_FAILED;
|
||||
} else if ((backupMetaExist && oldMetaExist && !newMetaExist) // case 7
|
||||
|| (!backupMetaExist && !oldMetaExist && !newMetaExist) // case 1
|
||||
) {
|
||||
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
|
||||
} else if (backupMetaExist && !oldMetaExist && newMetaExist) {
|
||||
if (taosRenameFile(newMetaPath, oldMetaPath) != 0) {
|
||||
metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
|
||||
} else {
|
||||
int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
|
||||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = metaGenerateNewMeta(ppMeta);
|
||||
if (code) {
|
||||
metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
|
||||
}
|
||||
|
||||
metaClose(ppMeta);
|
||||
if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) {
|
||||
metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
// rename the new meta to old meta
|
||||
if (taosRenameFile(newMetaPath, oldMetaPath) != 0) {
|
||||
metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false);
|
||||
if (code) {
|
||||
metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t metaUpgrade(SVnode *pVnode, SMeta **ppMeta) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino;
|
||||
|
|
|
@ -2985,9 +2985,6 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
|
|||
}
|
||||
}
|
||||
end:
|
||||
if (terrno != 0) {
|
||||
ret = terrno;
|
||||
}
|
||||
tDecoderClear(&dc);
|
||||
tdbFree(pData);
|
||||
return ret;
|
||||
|
|
|
@ -746,13 +746,13 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
|
|||
return terrno;
|
||||
}
|
||||
|
||||
pOutputInfo->tbSink.pTblInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
|
||||
if (pOutputInfo->tbSink.pTblInfo == NULL) {
|
||||
pOutputInfo->tbSink.pTbInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
|
||||
if (pOutputInfo->tbSink.pTbInfo == NULL) {
|
||||
tqError("vgId:%d failed init sink tableInfo, code:%s", vgId, tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTblInfo, freePtr);
|
||||
tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTbInfo, freePtr);
|
||||
}
|
||||
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#include "tmsg.h"
|
||||
#include "tq.h"
|
||||
|
||||
#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1))
|
||||
|
||||
typedef struct STableSinkInfo {
|
||||
uint64_t uid;
|
||||
tstr name;
|
||||
|
@ -35,16 +37,22 @@ static int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema
|
|||
int64_t earlyTs, const char* id);
|
||||
static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo,
|
||||
const char* dstTableName, int64_t* uid);
|
||||
static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId,
|
||||
const char* id);
|
||||
static int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id);
|
||||
|
||||
static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid);
|
||||
static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName,
|
||||
int32_t numOfTags);
|
||||
static int32_t createDefaultTagColName(SArray** pColNameList);
|
||||
static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName,
|
||||
int64_t gid, bool newSubTableRule);
|
||||
static int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo);
|
||||
static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock,
|
||||
const char* stbFullName, int64_t gid, bool newSubTableRule);
|
||||
static int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo);
|
||||
static int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId,
|
||||
const char* id);
|
||||
static bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo);
|
||||
static int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id);
|
||||
static int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode);
|
||||
static void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs);
|
||||
static int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode,
|
||||
int64_t earlyTs);
|
||||
|
||||
int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
||||
const char* pIdStr, bool newSubTableRule) {
|
||||
|
@ -81,7 +89,8 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p
|
|||
|
||||
memcpy(name, varDataVal(varTbName), varDataLen(varTbName));
|
||||
name[varDataLen(varTbName)] = '\0';
|
||||
if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 && stbFullName) {
|
||||
if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 &&
|
||||
stbFullName) {
|
||||
int32_t code = buildCtbNameAddGroupId(stbFullName, name, groupId, cap);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -161,16 +170,6 @@ end:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool tqGetTableInfo(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) {
|
||||
void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t));
|
||||
if (pVal) {
|
||||
*pInfo = *(STableSinkInfo**)pVal;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
|
||||
void* buf = NULL;
|
||||
int32_t tlen = 0;
|
||||
|
@ -245,18 +244,17 @@ int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock*
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
// tqDebug("gen name from:%s", pDataBlock->info.parTbName);
|
||||
// tqDebug("gen name from:%s", pDataBlock->info.parTbName);
|
||||
} else {
|
||||
pCreateTableReq->name = taosStrdup(pDataBlock->info.parTbName);
|
||||
if (pCreateTableReq->name == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
// tqDebug("copy name:%s", pDataBlock->info.parTbName);
|
||||
// tqDebug("copy name:%s", pDataBlock->info.parTbName);
|
||||
}
|
||||
} else {
|
||||
int32_t code = buildCtbNameByGroupId(stbFullName, gid, &pCreateTableReq->name);
|
||||
return code;
|
||||
// tqDebug("gen name from stbFullName:%s gid:%"PRId64, stbFullName, gid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -266,14 +264,18 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
|
|||
SStreamTask* pTask, int64_t suid) {
|
||||
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
SArray* tagArray = taosArrayInit(4, sizeof(STagVal));
|
||||
SArray* tagArray = NULL;
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t vgId = pTask->pMeta->vgId;
|
||||
int32_t code = 0;
|
||||
STableSinkInfo* pInfo = NULL;
|
||||
SVCreateTbBatchReq reqs = {0};
|
||||
SArray* crTblArray = NULL;
|
||||
|
||||
tqDebug("s-task:%s build create %d table(s) msg", id, rows);
|
||||
SVCreateTbBatchReq reqs = {0};
|
||||
SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
|
||||
|
||||
tagArray = taosArrayInit(4, sizeof(STagVal));
|
||||
crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
|
||||
if ((NULL == reqs.pArray) || (tagArray == NULL)) {
|
||||
tqError("s-task:%s failed to init create table msg, code:%s", id, tstrerror(terrno));
|
||||
code = terrno;
|
||||
|
@ -291,6 +293,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
|
|||
tqError("s-task:%s vgId:%d failed to init create table msg", id, vgId);
|
||||
continue;
|
||||
}
|
||||
|
||||
taosArrayClear(tagArray);
|
||||
|
||||
if (size == 2) {
|
||||
|
@ -356,8 +359,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
|
|||
}
|
||||
}
|
||||
|
||||
code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid,
|
||||
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1);
|
||||
code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, IS_NEW_SUBTB_RULE(pTask));
|
||||
if (code) {
|
||||
goto _end;
|
||||
}
|
||||
|
@ -368,16 +370,15 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
|
|||
goto _end;
|
||||
}
|
||||
|
||||
STableSinkInfo* pInfo = NULL;
|
||||
bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, gid, &pInfo);
|
||||
bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, gid, &pInfo);
|
||||
if (!alreadyCached) {
|
||||
code = doCreateSinkInfo(pCreateTbReq->name, &pInfo);
|
||||
code = doCreateSinkTableInfo(pCreateTbReq->name, &pInfo);
|
||||
if (code) {
|
||||
tqError("vgId:%d failed to create sink tableInfo for table:%s, s-task:%s", vgId, pCreateTbReq->name, id);
|
||||
continue;
|
||||
}
|
||||
|
||||
code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pInfo, gid, id);
|
||||
code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pInfo, gid, id);
|
||||
if (code) {
|
||||
tqError("vgId:%d failed to put sink tableInfo:%s into cache, s-task:%s", vgId, pCreateTbReq->name, id);
|
||||
}
|
||||
|
@ -527,8 +528,8 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c
|
|||
taosArrayDestroy(pExisted->aRowP);
|
||||
pExisted->aRowP = pFinal;
|
||||
|
||||
tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d",
|
||||
id, (int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL),
|
||||
tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id,
|
||||
(int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL),
|
||||
(pNew->pCreateTbReq != NULL));
|
||||
|
||||
tdDestroySVCreateTbReq(pNew->pCreateTbReq);
|
||||
|
@ -806,7 +807,7 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo) {
|
||||
int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo) {
|
||||
int32_t nameLen = strlen(pDstTableName);
|
||||
(*pInfo) = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1);
|
||||
if (*pInfo == NULL) {
|
||||
|
@ -830,7 +831,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
STableSinkInfo* pTableSinkInfo = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, groupId, &pTableSinkInfo);
|
||||
bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, groupId, &pTableSinkInfo);
|
||||
|
||||
if (alreadyCached) {
|
||||
if (dstTableName[0] == 0) { // data block does not set the destination table name
|
||||
|
@ -870,7 +871,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
}
|
||||
}
|
||||
|
||||
code = doCreateSinkInfo(dstTableName, &pTableSinkInfo);
|
||||
code = doCreateSinkTableInfo(dstTableName, &pTableSinkInfo);
|
||||
if (code == 0) {
|
||||
tqDebug("s-task:%s build new sinkTableInfo to add cache, dstTable:%s", id, dstTableName);
|
||||
} else {
|
||||
|
@ -906,14 +907,14 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
|
||||
SArray* pTagArray = taosArrayInit(pTSchema->numOfCols + 1, sizeof(STagVal));
|
||||
if (pTagArray == NULL) {
|
||||
tqError("s-task:%s failed to build auto create submit msg in sink, vgId:%d, due to %s", id, vgId,
|
||||
tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
|
||||
code =
|
||||
buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray,
|
||||
(pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1),
|
||||
&pTableData->pCreateTbReq);
|
||||
code = buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray,
|
||||
IS_NEW_SUBTB_RULE(pTask), &pTableData->pCreateTbReq);
|
||||
taosArrayDestroy(pTagArray);
|
||||
|
||||
if (code) {
|
||||
|
@ -923,12 +924,12 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
}
|
||||
|
||||
pTableSinkInfo->uid = 0;
|
||||
code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id);
|
||||
code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id);
|
||||
} else {
|
||||
metaReaderClear(&mr);
|
||||
|
||||
tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id,
|
||||
vgId, dstTableName);
|
||||
tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id, vgId,
|
||||
dstTableName);
|
||||
return TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
}
|
||||
} else {
|
||||
|
@ -944,7 +945,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
pTableSinkInfo->uid = mr.me.uid;
|
||||
|
||||
metaReaderClear(&mr);
|
||||
code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id);
|
||||
code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -952,7 +953,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock,
|
||||
int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema* pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock,
|
||||
SSubmitTbData* pTableData, int64_t earlyTs, const char* id) {
|
||||
int32_t numOfRows = pDataBlock->info.rows;
|
||||
char* dstTableName = pDataBlock->info.parTbName;
|
||||
|
@ -975,6 +976,43 @@ int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
const char* id = pTask->id.idStr;
|
||||
STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
|
||||
int32_t vgId = pTask->pMeta->vgId;
|
||||
|
||||
if (pTask->outputInfo.tbSink.pTagSchema == NULL) {
|
||||
SMetaReader mer1 = {0};
|
||||
metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK);
|
||||
|
||||
code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId);
|
||||
metaReaderClear(&mer1);
|
||||
return code;
|
||||
}
|
||||
|
||||
pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag);
|
||||
metaReaderClear(&mer1);
|
||||
|
||||
if (pOutputInfo->tbSink.pTagSchema == NULL) {
|
||||
tqError("s-task:%s failed to clone tag schema, code:%s, failed to sink results", id, tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema;
|
||||
SSchema* pCol1 = &pTagSchema->pSchema[0];
|
||||
if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) {
|
||||
pOutputInfo->tbSink.autoCreateCtb = true;
|
||||
} else {
|
||||
pOutputInfo->tbSink.autoCreateCtb = false;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
||||
const SArray* pBlocks = (const SArray*)data;
|
||||
SVnode* pVnode = (SVnode*)vnode;
|
||||
|
@ -988,29 +1026,11 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
|||
int64_t earlyTs = tsdbGetEarliestTs(pVnode->pTsdb);
|
||||
STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
|
||||
|
||||
if (pTask->outputInfo.tbSink.pTagSchema == NULL) {
|
||||
SMetaReader mer1 = {0};
|
||||
metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK);
|
||||
|
||||
code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid);
|
||||
code = checkTagSchema(pTask, pVnode);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId);
|
||||
metaReaderClear(&mer1);
|
||||
return;
|
||||
}
|
||||
|
||||
pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag);
|
||||
metaReaderClear(&mer1);
|
||||
|
||||
SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema;
|
||||
SSchema* pCol1 = &pTagSchema->pSchema[0];
|
||||
if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) {
|
||||
pOutputInfo->tbSink.autoCreateCtb = true;
|
||||
} else {
|
||||
pOutputInfo->tbSink.autoCreateCtb = false;
|
||||
}
|
||||
}
|
||||
|
||||
bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks);
|
||||
if (!onlySubmitData) {
|
||||
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id,
|
||||
|
@ -1033,45 +1053,127 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
|||
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
|
||||
continue;
|
||||
} else {
|
||||
pTask->execInfo.sink.numOfBlocks += 1;
|
||||
|
||||
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
|
||||
if (submitReq.aSubmitTbData == NULL) {
|
||||
code = terrno;
|
||||
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code));
|
||||
return;
|
||||
}
|
||||
|
||||
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP};
|
||||
code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName);
|
||||
continue;
|
||||
}
|
||||
|
||||
code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id);
|
||||
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
|
||||
if (tbData.pCreateTbReq != NULL) {
|
||||
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
|
||||
(void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, pDataBlock->info.id.groupId, id);
|
||||
tbData.pCreateTbReq = NULL;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData);
|
||||
if (p == NULL) {
|
||||
tqDebug("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id);
|
||||
}
|
||||
|
||||
code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1);
|
||||
if (code) { // failed and continue
|
||||
tqDebug("vgId:%d, s-task:%s submit msg failed, data lost", vgId, id);
|
||||
}
|
||||
code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, merge submit msg", vgId, id, numOfBlocks);
|
||||
if (streamTaskShouldStop(pTask)) {
|
||||
return;
|
||||
}
|
||||
|
||||
reubuildAndSendMultiResBlock(pTask, pBlocks, pVnode, earlyTs);
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
bool hasOnlySubmitData(const SArray* pBlocks, int32_t numOfBlocks) {
|
||||
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
SSDataBlock* p = taosArrayGet(pBlocks, i);
|
||||
if (p == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (p->info.type == STREAM_DELETE_RESULT || p->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) {
|
||||
int32_t code = tSimpleHashPut(pSinkTableMap, &groupId, sizeof(uint64_t), &pTableSinkInfo, POINTER_BYTES);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFreeClear(pTableSinkInfo);
|
||||
} else {
|
||||
tqDebug("s-task:%s new dst table:%s(uid:%" PRIu64 ") added into cache, total:%d", id, pTableSinkInfo->name.data,
|
||||
pTableSinkInfo->uid, tSimpleHashGetSize(pSinkTableMap));
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) {
|
||||
void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t));
|
||||
if (pVal) {
|
||||
*pInfo = *(STableSinkInfo**)pVal;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) {
|
||||
if (tSimpleHashGetSize(pSinkTableMap) == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t code = tSimpleHashRemove(pSinkTableMap, &groupId, sizeof(groupId));
|
||||
if (code == 0) {
|
||||
tqDebug("s-task:%s remove cached table meta for groupId:%" PRId64, id, groupId);
|
||||
} else {
|
||||
tqError("s-task:%s failed to remove table meta from hashmap, groupId:%" PRId64, id, groupId);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||
int64_t suid) {
|
||||
SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))};
|
||||
if (deleteReq.deleteReqs == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t code =
|
||||
tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, IS_NEW_SUBTB_RULE(pTask));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if (taosArrayGetSize(deleteReq.deleteReqs) == 0) {
|
||||
taosArrayDestroy(deleteReq.deleteReqs);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t len;
|
||||
tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("s-task:%s failed to encode delete request", pTask->id.idStr);
|
||||
return code;
|
||||
}
|
||||
|
||||
SEncoder encoder = {0};
|
||||
void* serializedDeleteReq = rpcMallocCont(len + sizeof(SMsgHead));
|
||||
void* abuf = POINTER_SHIFT(serializedDeleteReq, sizeof(SMsgHead));
|
||||
tEncoderInit(&encoder, abuf, len);
|
||||
code = tEncodeSBatchDeleteReq(&encoder, &deleteReq);
|
||||
tEncoderClear(&encoder);
|
||||
taosArrayDestroy(deleteReq.deleteReqs);
|
||||
|
||||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
((SMsgHead*)serializedDeleteReq)->vgId = TD_VID(pVnode);
|
||||
|
||||
SRpcMsg msg = {.msgType = TDMT_VND_BATCH_DEL, .pCont = serializedDeleteReq, .contLen = len + sizeof(SMsgHead)};
|
||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
tqDebug("failed to put delete req into write-queue since %s", terrstr());
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs) {
|
||||
int32_t code = 0;
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t vgId = pTask->pMeta->vgId;
|
||||
int32_t numOfBlocks = taosArrayGetSize(pBlocks);
|
||||
int64_t suid = pTask->outputInfo.tbSink.stbUid;
|
||||
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
|
||||
char* stbFullName = pTask->outputInfo.tbSink.stbFullName;
|
||||
|
||||
SHashObj* pTableIndexMap =
|
||||
taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
|
||||
|
@ -1085,12 +1187,6 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
|||
|
||||
bool hasSubmit = false;
|
||||
for (int32_t i = 0; i < numOfBlocks; i++) {
|
||||
if (streamTaskShouldStop(pTask)) {
|
||||
taosHashCleanup(pTableIndexMap);
|
||||
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
||||
return;
|
||||
}
|
||||
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
if (pDataBlock == NULL) {
|
||||
continue;
|
||||
|
@ -1118,7 +1214,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
|||
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
|
||||
if (tbData.pCreateTbReq != NULL) {
|
||||
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
|
||||
(void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, groupId, id);
|
||||
(void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, groupId, id);
|
||||
tbData.pCreateTbReq = NULL;
|
||||
}
|
||||
continue;
|
||||
|
@ -1171,94 +1267,52 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
|||
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
||||
tqDebug("vgId:%d, s-task:%s write results completed", vgId, id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
bool hasOnlySubmitData(const SArray* pBlocks, int32_t numOfBlocks) {
|
||||
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
SSDataBlock* p = taosArrayGet(pBlocks, i);
|
||||
if (p == NULL) {
|
||||
continue;
|
||||
}
|
||||
int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode, int64_t earlyTs) {
|
||||
int32_t code = 0;
|
||||
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
|
||||
int64_t suid = pTask->outputInfo.tbSink.stbUid;
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t vgId = TD_VID(pVnode);
|
||||
char* stbFullName = pTask->outputInfo.tbSink.stbFullName;
|
||||
|
||||
if (p->info.type == STREAM_DELETE_RESULT || p->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
pTask->execInfo.sink.numOfBlocks += 1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) {
|
||||
int32_t code = tSimpleHashPut(pSinkTableMap, &groupId, sizeof(uint64_t), &pTableSinkInfo, POINTER_BYTES);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFreeClear(pTableSinkInfo);
|
||||
} else {
|
||||
tqDebug("s-task:%s new dst table:%s(uid:%" PRIu64 ") added into cache, total:%d", id, pTableSinkInfo->name.data,
|
||||
pTableSinkInfo->uid, tSimpleHashGetSize(pSinkTableMap));
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) {
|
||||
if (tSimpleHashGetSize(pSinkTableMap) == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t code = tSimpleHashRemove(pSinkTableMap, &groupId, sizeof(groupId));
|
||||
if (code == 0) {
|
||||
tqDebug("s-task:%s remove cached table meta for groupId:%" PRId64, id, groupId);
|
||||
} else {
|
||||
tqError("s-task:%s failed to remove table meta from hashmap, groupId:%" PRId64, id, groupId);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||
int64_t suid) {
|
||||
SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))};
|
||||
if (deleteReq.deleteReqs == NULL) {
|
||||
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
|
||||
if (submitReq.aSubmitTbData == NULL) {
|
||||
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr,
|
||||
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1);
|
||||
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP};
|
||||
code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (taosArrayGetSize(deleteReq.deleteReqs) == 0) {
|
||||
taosArrayDestroy(deleteReq.deleteReqs);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
code = tqSetDstTableDataPayload(suid, pTSchema, index, pDataBlock, &tbData, earlyTs, id);
|
||||
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
|
||||
if (tbData.pCreateTbReq != NULL) {
|
||||
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
|
||||
(void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, pDataBlock->info.id.groupId, id);
|
||||
tbData.pCreateTbReq = NULL;
|
||||
}
|
||||
|
||||
int32_t len;
|
||||
tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("s-task:%s failed to encode delete request", pTask->id.idStr);
|
||||
return code;
|
||||
}
|
||||
|
||||
SEncoder encoder = {0};
|
||||
void* serializedDeleteReq = rpcMallocCont(len + sizeof(SMsgHead));
|
||||
void* abuf = POINTER_SHIFT(serializedDeleteReq, sizeof(SMsgHead));
|
||||
tEncoderInit(&encoder, abuf, len);
|
||||
code = tEncodeSBatchDeleteReq(&encoder, &deleteReq);
|
||||
tEncoderClear(&encoder);
|
||||
taosArrayDestroy(deleteReq.deleteReqs);
|
||||
void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData);
|
||||
if (p == NULL) {
|
||||
tqDebug("vgId:%d, s-task:%s failed to build submit msg, code:%s, data lost", vgId, id, tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1);
|
||||
if (code) { // failed and continue
|
||||
tqDebug("vgId:%d, s-task:%s submit msg failed, code:%s data lost", vgId, id, tstrerror(code));
|
||||
}
|
||||
|
||||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
((SMsgHead*)serializedDeleteReq)->vgId = TD_VID(pVnode);
|
||||
|
||||
SRpcMsg msg = {.msgType = TDMT_VND_BATCH_DEL, .pCont = serializedDeleteReq, .contLen = len + sizeof(SMsgHead)};
|
||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
tqDebug("failed to put delete req into write-queue since %s", terrstr());
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -692,7 +692,7 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
|
|||
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if ((ppTask != NULL) && ((*ppTask) != NULL)) {
|
||||
streamMetaAcquireOneTask(*ppTask);
|
||||
int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask);
|
||||
SStreamTask* pTask = *ppTask;
|
||||
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
|
@ -1119,10 +1119,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t
|
|||
int32_t vgId = pMeta->vgId;
|
||||
int32_t code = 0;
|
||||
|
||||
if (pTask == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
streamTaskResume(pTask);
|
||||
ETaskStatus status = streamTaskGetStatus(pTask).state;
|
||||
|
||||
|
@ -1150,7 +1146,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t
|
|||
}
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1173,6 +1168,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m
|
|||
|
||||
code = tqProcessTaskResumeImpl(handle, pTask, sversion, pReq->igUntreated, fromVnode);
|
||||
if (code != 0) {
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1186,6 +1182,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m
|
|||
streamMutexUnlock(&pHTask->lock);
|
||||
|
||||
code = tqProcessTaskResumeImpl(handle, pHTask, sversion, pReq->igUntreated, fromVnode);
|
||||
streamMetaReleaseTask(pMeta, pHTask);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
|
|
@ -602,14 +602,14 @@ int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs
|
|||
SSttLvl *lvl;
|
||||
code = tsdbSttLvlInitRef(pTsdb, lvl1, &lvl);
|
||||
if (code) {
|
||||
taosMemoryFree(lvl);
|
||||
tsdbSttLvlClear(&lvl);
|
||||
tsdbTFileSetClear(fset);
|
||||
return code;
|
||||
}
|
||||
|
||||
code = TARRAY2_APPEND(fset[0]->lvlArr, lvl);
|
||||
if (code) {
|
||||
taosMemoryFree(lvl);
|
||||
tsdbSttLvlClear(&lvl);
|
||||
tsdbTFileSetClear(fset);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -855,6 +855,7 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S
|
|||
STableBlockScanInfo** p = taosArrayGetLast(pTableScanInfoList);
|
||||
if (p == NULL) {
|
||||
clearBrinBlockIter(&iter);
|
||||
tsdbError("invalid param, empty in tablescanInfoList, %s", pReader->idStr);
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
||||
|
@ -5256,7 +5257,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
|
|||
// NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit
|
||||
// the data should be ingested in round-robin and all the child tables should be createted before ingesting data
|
||||
// the version range of query will be used to identify the correctness of suspend/resume functions.
|
||||
// this function will blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files
|
||||
// this function will be blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files
|
||||
#if SUSPEND_RESUME_TEST
|
||||
if (!pReader->status.suspendInvoked && !pReader->status.loadFromFile) {
|
||||
tsem_wait(&pReader->resumeAfterSuspend);
|
||||
|
@ -5909,6 +5910,7 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_
|
|||
} else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing
|
||||
} else {
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
tsdbError("invalid mr.me.type:%d, code:%s", mr.me.type, tstrerror(code));
|
||||
metaReaderClear(&mr);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ const SVnodeCfg vnodeCfgDefault = {.vgId = -1,
|
|||
.retentionPeriod = -1,
|
||||
.rollPeriod = 0,
|
||||
.segSize = 0,
|
||||
.committed = 0,
|
||||
.retentionSize = -1,
|
||||
.level = TAOS_WAL_WRITE,
|
||||
.clearFiles = 0,
|
||||
|
|
|
@ -257,6 +257,7 @@ int vnodeLoadInfo(const char *dir, SVnodeInfo *pInfo) {
|
|||
code = vnodeDecodeInfo(pData, pInfo);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
pInfo->config.walCfg.committed = pInfo->state.committed;
|
||||
_exit:
|
||||
if (code) {
|
||||
if (pFile) {
|
||||
|
|
|
@ -633,47 +633,44 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
|
|||
}
|
||||
break;
|
||||
case TDMT_STREAM_TASK_DEPLOY: {
|
||||
int32_t code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
if ((code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len)) != TSDB_CODE_SUCCESS) {
|
||||
goto _err;
|
||||
}
|
||||
} break;
|
||||
case TDMT_STREAM_TASK_DROP: {
|
||||
if (tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) {
|
||||
if ((code = tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
} break;
|
||||
case TDMT_STREAM_TASK_UPDATE_CHKPT: {
|
||||
if (tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) {
|
||||
if ((code = tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
} break;
|
||||
case TDMT_STREAM_CONSEN_CHKPT: {
|
||||
if (pVnode->restored) {
|
||||
if (tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg) < 0) {
|
||||
if (pVnode->restored && (code = tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg)) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
} break;
|
||||
case TDMT_STREAM_TASK_PAUSE: {
|
||||
if (pVnode->restored && vnodeIsLeader(pVnode) &&
|
||||
tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) {
|
||||
(code = tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
} break;
|
||||
case TDMT_STREAM_TASK_RESUME: {
|
||||
if (pVnode->restored && vnodeIsLeader(pVnode) &&
|
||||
tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) {
|
||||
(code = tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
} break;
|
||||
case TDMT_VND_STREAM_TASK_RESET: {
|
||||
if (pVnode->restored && vnodeIsLeader(pVnode)) {
|
||||
if (tqProcessTaskResetReq(pVnode->pTq, pMsg) < 0) {
|
||||
if (pVnode->restored && vnodeIsLeader(pVnode) &&
|
||||
(code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
} break;
|
||||
case TDMT_VND_ALTER_CONFIRM:
|
||||
needCommit = pVnode->config.hashChange;
|
||||
|
@ -693,10 +690,10 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
|
|||
case TDMT_VND_DROP_INDEX:
|
||||
vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp);
|
||||
break;
|
||||
case TDMT_VND_STREAM_CHECK_POINT_SOURCE:
|
||||
case TDMT_VND_STREAM_CHECK_POINT_SOURCE: // always return true
|
||||
tqProcessTaskCheckPointSourceReq(pVnode->pTq, pMsg, pRsp);
|
||||
break;
|
||||
case TDMT_VND_STREAM_TASK_UPDATE:
|
||||
case TDMT_VND_STREAM_TASK_UPDATE: // always return true
|
||||
tqProcessTaskUpdateReq(pVnode->pTq, pMsg);
|
||||
break;
|
||||
case TDMT_VND_COMPACT:
|
||||
|
@ -752,7 +749,7 @@ _exit:
|
|||
|
||||
_err:
|
||||
vError("vgId:%d, process %s request failed since %s, ver:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
|
||||
tstrerror(terrno), ver);
|
||||
tstrerror(code), ver);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -188,17 +188,17 @@ do { \
|
|||
#define EXPLAIN_ROW_NEW(level, ...) \
|
||||
do { \
|
||||
if (isVerboseLine) { \
|
||||
tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, "%*s", (level) * 3 + 3, ""); \
|
||||
tlen = tsnprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, "%*s", (level) * 3 + 3, ""); \
|
||||
} else { \
|
||||
tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, "%*s%s", (level) * 3, "", "-> "); \
|
||||
tlen = tsnprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, "%*s%s", (level) * 3, "", "-> "); \
|
||||
} \
|
||||
tlen += snprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__); \
|
||||
tlen += tsnprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define EXPLAIN_ROW_APPEND(...) tlen += snprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__)
|
||||
#define EXPLAIN_ROW_APPEND(...) tlen += tsnprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE - tlen, __VA_ARGS__)
|
||||
#define EXPLAIN_ROW_END() do { varDataSetLen(tbuf, tlen); tlen += VARSTR_HEADER_SIZE; isVerboseLine = true; } while (0)
|
||||
|
||||
#define EXPLAIN_SUM_ROW_NEW(...) tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, __VA_ARGS__)
|
||||
#define EXPLAIN_SUM_ROW_NEW(...) tlen = tsnprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, __VA_ARGS__)
|
||||
#define EXPLAIN_SUM_ROW_END() do { varDataSetLen(tbuf, tlen); tlen += VARSTR_HEADER_SIZE; } while (0)
|
||||
|
||||
#define EXPLAIN_ROW_APPEND_LIMIT_IMPL(_pLimit, sl) do { \
|
||||
|
|
|
@ -515,7 +515,7 @@ void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) {
|
|||
TSDB_DATA_TYPE_GEOMETRY == pSchema->type) {
|
||||
typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)", (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE));
|
||||
} else if (TSDB_DATA_TYPE_NCHAR == pSchema->type) {
|
||||
typeLen += snprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)",
|
||||
typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)",
|
||||
(int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
}
|
||||
|
||||
|
@ -551,7 +551,7 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
|
|||
(int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
}
|
||||
|
||||
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, sizeof(type) - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s",
|
||||
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s",
|
||||
((i > 0) ? ", " : ""), pSchema->name, type);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ typedef struct SGroupResInfo {
|
|||
} SGroupResInfo;
|
||||
|
||||
typedef struct SResultRow {
|
||||
int32_t version;
|
||||
int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer
|
||||
int32_t offset : 29; // row index in buffer page
|
||||
bool startInterp; // the time window start timestamp has done the interpolation already.
|
||||
|
@ -152,6 +153,9 @@ static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRo
|
|||
return pRow;
|
||||
}
|
||||
|
||||
int32_t getResultRowFromBuf(struct SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize);
|
||||
int32_t putResultRowToBuf(struct SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize);
|
||||
|
||||
int32_t initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order);
|
||||
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo);
|
||||
|
||||
|
|
|
@ -88,11 +88,116 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
|
|||
rowSize += pCtx[i].resDataInfo.interBufSize;
|
||||
}
|
||||
|
||||
rowSize += (numOfOutput * sizeof(bool));
|
||||
// expand rowSize to mark if col is null for top/bottom result(saveTupleData)
|
||||
return rowSize;
|
||||
}
|
||||
|
||||
// Convert buf read from rocksdb to result row
|
||||
int32_t getResultRowFromBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize) {
|
||||
if (inBuf == NULL || pSup == NULL) {
|
||||
qError("invalid input parameters, inBuf:%p, pSup:%p", inBuf, pSup);
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
SqlFunctionCtx *pCtx = pSup->pCtx;
|
||||
int32_t *offset = pSup->rowEntryInfoOffset;
|
||||
SResultRow *pResultRow = NULL;
|
||||
size_t processedSize = 0;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
// calculate the size of output buffer
|
||||
*outBufSize = getResultRowSize(pCtx, pSup->numOfExprs);
|
||||
*outBuf = taosMemoryMalloc(*outBufSize);
|
||||
if (*outBuf == NULL) {
|
||||
qError("failed to allocate memory for output buffer, size:%zu", *outBufSize);
|
||||
return terrno;
|
||||
}
|
||||
pResultRow = (SResultRow*)*outBuf;
|
||||
(void)memcpy(pResultRow, inBuf, sizeof(SResultRow));
|
||||
inBuf += sizeof(SResultRow);
|
||||
processedSize += sizeof(SResultRow);
|
||||
|
||||
for (int32_t i = 0; i < pSup->numOfExprs; ++i) {
|
||||
int32_t len = *(int32_t*)inBuf;
|
||||
inBuf += sizeof(int32_t);
|
||||
processedSize += sizeof(int32_t);
|
||||
if (pResultRow->version != FUNCTION_RESULT_INFO_VERSION && pCtx->fpSet.decode) {
|
||||
code = pCtx->fpSet.decode(&pCtx[i], inBuf, getResultEntryInfo(pResultRow, i, offset), pResultRow->version);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to decode result row, code:%d", code);
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
(void)memcpy(getResultEntryInfo(pResultRow, i, offset), inBuf, len);
|
||||
}
|
||||
inBuf += len;
|
||||
processedSize += len;
|
||||
}
|
||||
|
||||
if (processedSize < inBufSize) {
|
||||
// stream stores extra data after result row
|
||||
size_t leftLen = inBufSize - processedSize;
|
||||
TAOS_MEMORY_REALLOC(*outBuf, *outBufSize + leftLen);
|
||||
if (*outBuf == NULL) {
|
||||
qError("failed to reallocate memory for output buffer, size:%zu", *outBufSize + leftLen);
|
||||
return terrno;
|
||||
}
|
||||
(void)memcpy(*outBuf + *outBufSize, inBuf, leftLen);
|
||||
inBuf += leftLen;
|
||||
processedSize += leftLen;
|
||||
*outBufSize += leftLen;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// Convert result row to buf for rocksdb
|
||||
int32_t putResultRowToBuf(SExprSupp *pSup, const char* inBuf, size_t inBufSize, char **outBuf, size_t *outBufSize) {
|
||||
if (pSup == NULL || inBuf == NULL || outBuf == NULL || outBufSize == NULL) {
|
||||
qError("invalid input parameters, inBuf:%p, pSup:%p, outBufSize:%p, outBuf:%p", inBuf, pSup, outBufSize, outBuf);
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
||||
SqlFunctionCtx *pCtx = pSup->pCtx;
|
||||
int32_t *offset = pSup->rowEntryInfoOffset;
|
||||
SResultRow *pResultRow = (SResultRow*)inBuf;
|
||||
size_t rowSize = getResultRowSize(pCtx, pSup->numOfExprs);
|
||||
|
||||
if (rowSize > inBufSize) {
|
||||
qError("invalid input buffer size, rowSize:%zu, inBufSize:%zu", rowSize, inBufSize);
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
||||
// calculate the size of output buffer
|
||||
*outBufSize = rowSize + sizeof(int32_t) * pSup->numOfExprs;
|
||||
if (rowSize < inBufSize) {
|
||||
*outBufSize += inBufSize - rowSize;
|
||||
}
|
||||
|
||||
*outBuf = taosMemoryMalloc(*outBufSize);
|
||||
if (*outBuf == NULL) {
|
||||
qError("failed to allocate memory for output buffer, size:%zu", *outBufSize);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
char *pBuf = *outBuf;
|
||||
pResultRow->version = FUNCTION_RESULT_INFO_VERSION;
|
||||
(void)memcpy(pBuf, pResultRow, sizeof(SResultRow));
|
||||
pBuf += sizeof(SResultRow);
|
||||
for (int32_t i = 0; i < pSup->numOfExprs; ++i) {
|
||||
size_t len = sizeof(SResultRowEntryInfo) + pCtx[i].resDataInfo.interBufSize;
|
||||
*(int32_t *) pBuf = (int32_t)len;
|
||||
pBuf += sizeof(int32_t);
|
||||
(void)memcpy(pBuf, getResultEntryInfo(pResultRow, i, offset), len);
|
||||
pBuf += len;
|
||||
}
|
||||
|
||||
if (rowSize < inBufSize) {
|
||||
// stream stores extra data after result row
|
||||
size_t leftLen = inBufSize - rowSize;
|
||||
(void)memcpy(pBuf, inBuf + rowSize, leftLen);
|
||||
pBuf += leftLen;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void freeEx(void* p) { taosMemoryFree(*(void**)p); }
|
||||
|
||||
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) {
|
||||
|
|
|
@ -84,9 +84,9 @@ static void logGroupCacheExecInfo(SGroupCacheOperatorInfo* pGrpCacheOperator) {
|
|||
if (NULL == buf) {
|
||||
return;
|
||||
}
|
||||
int32_t offset = snprintf(buf, bufSize, "groupCache exec info, downstreamBlkNum:");
|
||||
int32_t offset = tsnprintf(buf, bufSize, "groupCache exec info, downstreamBlkNum:");
|
||||
for (int32_t i = 0; i < pGrpCacheOperator->downstreamNum; ++i) {
|
||||
offset += snprintf(buf + offset, bufSize, " %" PRId64 , pGrpCacheOperator->execInfo.pDownstreamBlkNum[i]);
|
||||
offset += tsnprintf(buf + offset, bufSize, " %" PRId64 , pGrpCacheOperator->execInfo.pDownstreamBlkNum[i]);
|
||||
}
|
||||
qDebug("%s", buf);
|
||||
taosMemoryFree(buf);
|
||||
|
|
|
@ -2006,6 +2006,12 @@ int32_t createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiN
|
|||
pInfo->stateStore = pTaskInfo->storageAPI.stateStore;
|
||||
int32_t funResSize = getMaxFunResSize(&pOperator->exprSupp, numOfCols);
|
||||
pInfo->pState->pFileState = NULL;
|
||||
|
||||
// used for backward compatibility of function's result info
|
||||
pInfo->pState->pResultRowStore.resultRowGet = getResultRowFromBuf;
|
||||
pInfo->pState->pResultRowStore.resultRowPut = putResultRowToBuf;
|
||||
pInfo->pState->pExprSupp = &pOperator->exprSupp;
|
||||
|
||||
code =
|
||||
pAPI->stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize,
|
||||
compareTs, pInfo->pState, pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo),
|
||||
|
@ -2223,6 +2229,12 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SExprSupp* pExpSup, in
|
|||
pSup->stateStore.streamStateSetNumber(pSup->pState, -1, tsIndex);
|
||||
int32_t funResSize = getMaxFunResSize(pExpSup, numOfOutput);
|
||||
pSup->pState->pFileState = NULL;
|
||||
|
||||
// used for backward compatibility of function's result info
|
||||
pSup->pState->pResultRowStore.resultRowGet = getResultRowFromBuf;
|
||||
pSup->pState->pResultRowStore.resultRowPut = putResultRowToBuf;
|
||||
pSup->pState->pExprSupp = pExpSup;
|
||||
|
||||
code = pSup->stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SSessionKey), pSup->resultRowSize, funResSize,
|
||||
sesionTs, pSup->pState, pTwAggSup->deleteMark, taskIdStr,
|
||||
pHandle->checkpointId, STREAM_STATE_BUFF_SORT, &pSup->pState->pFileState);
|
||||
|
@ -5385,6 +5397,12 @@ int32_t createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
|
|||
|
||||
pInfo->stateStore = pTaskInfo->storageAPI.stateStore;
|
||||
pInfo->pState->pFileState = NULL;
|
||||
|
||||
// used for backward compatibility of function's result info
|
||||
pInfo->pState->pResultRowStore.resultRowGet = getResultRowFromBuf;
|
||||
pInfo->pState->pResultRowStore.resultRowPut = putResultRowToBuf;
|
||||
pInfo->pState->pExprSupp = &pOperator->exprSupp;
|
||||
|
||||
code = pTaskInfo->storageAPI.stateStore.streamFileStateInit(
|
||||
tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize, compareTs, pInfo->pState,
|
||||
pInfo->twAggSup.deleteMark, GET_TASKID(pTaskInfo), pHandle->checkpointId, STREAM_STATE_BUFF_HASH,
|
||||
|
|
|
@ -278,7 +278,7 @@ static bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t in
|
|||
}
|
||||
|
||||
static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock,
|
||||
SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo, bool genAfterBlock) {
|
||||
SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
int32_t rows = pResBlock->info.rows;
|
||||
|
@ -427,7 +427,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
break;
|
||||
}
|
||||
|
||||
if (start.key == INT64_MIN || end.key == INT64_MIN || genAfterBlock) {
|
||||
if (start.key == INT64_MIN || end.key == INT64_MIN) {
|
||||
colDataSetNULL(pDst, rows);
|
||||
break;
|
||||
}
|
||||
|
@ -463,13 +463,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
break;
|
||||
}
|
||||
|
||||
if (genAfterBlock && rows == 0) {
|
||||
hasInterp = false;
|
||||
break;
|
||||
}
|
||||
|
||||
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot);
|
||||
if (pkey->isNull == false && !genAfterBlock) {
|
||||
if (pkey->isNull == false) {
|
||||
code = colDataSetVal(pDst, rows, pkey->pData, false);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
} else {
|
||||
|
@ -841,7 +836,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
|
|||
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
|
||||
if (nextTs > pSliceInfo->current) {
|
||||
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo, false) &&
|
||||
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo) &&
|
||||
pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
break;
|
||||
} else {
|
||||
|
@ -869,7 +864,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
|
|||
doKeepLinearInfo(pSliceInfo, pBlock, i);
|
||||
|
||||
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo, false) &&
|
||||
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo) &&
|
||||
pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
break;
|
||||
} else {
|
||||
|
@ -914,12 +909,13 @@ static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperato
|
|||
SSDataBlock* pResBlock = pSliceInfo->pRes;
|
||||
SInterval* pInterval = &pSliceInfo->interval;
|
||||
|
||||
if (pSliceInfo->pPrevGroupKey == NULL) {
|
||||
if (pSliceInfo->fillType == TSDB_FILL_NEXT || pSliceInfo->fillType == TSDB_FILL_LINEAR ||
|
||||
pSliceInfo->pPrevGroupKey == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
while (pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
(void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo, true);
|
||||
(void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
}
|
||||
|
|
|
@ -4734,10 +4734,10 @@ int32_t histogramFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
int32_t len;
|
||||
char buf[512] = {0};
|
||||
if (!pInfo->normalized) {
|
||||
len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%" PRId64 "}",
|
||||
len = tsnprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%" PRId64 "}",
|
||||
pInfo->bins[i].lower, pInfo->bins[i].upper, pInfo->bins[i].count);
|
||||
} else {
|
||||
len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%lf}", pInfo->bins[i].lower,
|
||||
len = tsnprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%lf}", pInfo->bins[i].lower,
|
||||
pInfo->bins[i].upper, pInfo->bins[i].percentage);
|
||||
}
|
||||
varDataSetLen(buf, len);
|
||||
|
@ -6365,7 +6365,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
compRatio = pData->totalSize * 100 / (double)totalRawSize;
|
||||
}
|
||||
|
||||
int32_t len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE,
|
||||
int32_t len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE,
|
||||
"Total_Blocks=[%d] Total_Size=[%.2f KiB] Average_size=[%.2f KiB] Compression_Ratio=[%.2f %c]",
|
||||
pData->numOfBlocks, pData->totalSize / 1024.0, averageSize / 1024.0, compRatio, '%');
|
||||
|
||||
|
@ -6380,7 +6380,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
avgRows = pData->totalRows / pData->numOfBlocks;
|
||||
}
|
||||
|
||||
len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Block_Rows=[%" PRId64 "] MinRows=[%d] MaxRows=[%d] AvgRows=[%" PRId64 "]",
|
||||
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Block_Rows=[%" PRId64 "] MinRows=[%d] MaxRows=[%d] AvgRows=[%" PRId64 "]",
|
||||
pData->totalRows, pData->minRows, pData->maxRows, avgRows);
|
||||
varDataSetLen(st, len);
|
||||
code = colDataSetVal(pColInfo, row++, st, false);
|
||||
|
@ -6388,14 +6388,14 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
return code;
|
||||
}
|
||||
|
||||
len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Inmem_Rows=[%d] Stt_Rows=[%d] ", pData->numOfInmemRows, pData->numOfSttRows);
|
||||
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Inmem_Rows=[%d] Stt_Rows=[%d] ", pData->numOfInmemRows, pData->numOfSttRows);
|
||||
varDataSetLen(st, len);
|
||||
code = colDataSetVal(pColInfo, row++, st, false);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Total_Tables=[%d] Total_Filesets=[%d] Total_Vgroups=[%d]", pData->numOfTables,
|
||||
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Total_Tables=[%d] Total_Filesets=[%d] Total_Vgroups=[%d]", pData->numOfTables,
|
||||
pData->numOfFiles, pData->numOfVgroups);
|
||||
|
||||
varDataSetLen(st, len);
|
||||
|
@ -6404,7 +6404,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
return code;
|
||||
}
|
||||
|
||||
len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE,
|
||||
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE,
|
||||
"--------------------------------------------------------------------------------");
|
||||
varDataSetLen(st, len);
|
||||
code = colDataSetVal(pColInfo, row++, st, false);
|
||||
|
@ -6431,7 +6431,7 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
int32_t bucketRange = ceil(((double) (pData->defMaxRows - pData->defMinRows)) / numOfBuckets);
|
||||
|
||||
for (int32_t i = 0; i < tListLen(pData->blockRowsHisto); ++i) {
|
||||
len = snprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "%04d |", pData->defMinRows + bucketRange * (i + 1));
|
||||
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "%04d |", pData->defMinRows + bucketRange * (i + 1));
|
||||
|
||||
int32_t num = 0;
|
||||
if (pData->blockRowsHisto[i] > 0) {
|
||||
|
@ -6439,13 +6439,13 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
}
|
||||
|
||||
for (int32_t j = 0; j < num; ++j) {
|
||||
int32_t x = snprintf(varDataVal(st) + len, sizeof(st) - VARSTR_HEADER_SIZE - len, "%c", '|');
|
||||
int32_t x = tsnprintf(varDataVal(st) + len, sizeof(st) - VARSTR_HEADER_SIZE - len, "%c", '|');
|
||||
len += x;
|
||||
}
|
||||
|
||||
if (pData->blockRowsHisto[i] > 0) {
|
||||
double v = pData->blockRowsHisto[i] * 100.0 / pData->numOfBlocks;
|
||||
len += snprintf(varDataVal(st) + len, sizeof(st) - VARSTR_HEADER_SIZE - len, " %d (%.2f%c)", pData->blockRowsHisto[i], v, '%');
|
||||
len += tsnprintf(varDataVal(st) + len, sizeof(st) - VARSTR_HEADER_SIZE - len, " %d (%.2f%c)", pData->blockRowsHisto[i], v, '%');
|
||||
}
|
||||
|
||||
varDataSetLen(st, len);
|
||||
|
|
|
@ -224,19 +224,18 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index
|
|||
|
||||
*index = -1;
|
||||
|
||||
if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) {
|
||||
if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// divide a range of [dMinVal, dMaxVal] into 1024 buckets
|
||||
double span = pBucket->range.dMaxVal - pBucket->range.dMinVal;
|
||||
if (span < pBucket->numOfSlots) {
|
||||
int32_t delta = (int32_t)(v - pBucket->range.dMinVal);
|
||||
*index = (delta % pBucket->numOfSlots);
|
||||
if (fabs(span) < DBL_EPSILON) {
|
||||
*index = 0;
|
||||
} else {
|
||||
double slotSpan = span / pBucket->numOfSlots;
|
||||
*index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan);
|
||||
if (v == pBucket->range.dMaxVal) {
|
||||
if (fabs(v - pBucket->range.dMaxVal) < DBL_EPSILON) {
|
||||
*index -= 1;
|
||||
}
|
||||
}
|
||||
|
@ -583,48 +582,52 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction
|
|||
*result = getIdenticalDataVal(pMemBucket, i);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// try next round
|
||||
pMemBucket->times += 1;
|
||||
// qDebug("MemBucket:%p, start next round data bucketing, time:%d", pMemBucket, pMemBucket->times);
|
||||
|
||||
pMemBucket->range = pSlot->range;
|
||||
pMemBucket->total = 0;
|
||||
|
||||
resetSlotInfo(pMemBucket);
|
||||
|
||||
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1);
|
||||
tMemBucket *tmpBucket = NULL;
|
||||
int32_t code = tMemBucketCreate(pMemBucket->bytes, pMemBucket->type, pSlot->range.dMinVal, pSlot->range.dMaxVal,
|
||||
false, &tmpBucket);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tMemBucketDestroy(&tmpBucket);
|
||||
return code;
|
||||
}
|
||||
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times);
|
||||
|
||||
SArray* list;
|
||||
void *p = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
|
||||
if (p != NULL) {
|
||||
list = *(SArray **)p;
|
||||
if (list == NULL || list->size <= 0) {
|
||||
tMemBucketDestroy(&tmpBucket);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
tMemBucketDestroy(&tmpBucket);
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (int32_t f = 0; f < list->size; ++f) {
|
||||
int32_t *pageId = taosArrayGet(list, f);
|
||||
if (NULL == pageId) {
|
||||
tMemBucketDestroy(&tmpBucket);
|
||||
return TSDB_CODE_OUT_OF_RANGE;
|
||||
}
|
||||
SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId);
|
||||
if (pg == NULL) {
|
||||
tMemBucketDestroy(&tmpBucket);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t code = tMemBucketPut(pMemBucket, pg->data, (int32_t)pg->num);
|
||||
code = tMemBucketPut(tmpBucket, pg->data, (int32_t)pg->num);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tMemBucketDestroy(&tmpBucket);
|
||||
return code;
|
||||
}
|
||||
setBufPageDirty(pg, true);
|
||||
releaseBufPage(pMemBucket->pBuffer, pg);
|
||||
}
|
||||
|
||||
return getPercentileImpl(pMemBucket, count - num, fraction, result);
|
||||
code = getPercentileImpl(tmpBucket, count - num, fraction, result);
|
||||
tMemBucketDestroy(&tmpBucket);
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
num += pSlot->info.size;
|
||||
|
|
|
@ -887,7 +887,8 @@ _err:
|
|||
}
|
||||
|
||||
static int32_t addParamToLogicConditionNode(SLogicConditionNode* pCond, SNode* pParam) {
|
||||
if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType) {
|
||||
if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType &&
|
||||
((SLogicConditionNode*)pParam)->condType != LOGIC_COND_TYPE_NOT) {
|
||||
int32_t code = nodesListAppendList(pCond->pParameterList, ((SLogicConditionNode*)pParam)->pParameterList);
|
||||
((SLogicConditionNode*)pParam)->pParameterList = NULL;
|
||||
nodesDestroyNode(pParam);
|
||||
|
|
|
@ -10609,6 +10609,19 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm
|
|||
"Non window query only support scalar function, aggregate function is not allowed");
|
||||
}
|
||||
|
||||
if (NULL != pStmt->pOptions->pDelay) {
|
||||
SValueNode* pVal = (SValueNode*)pStmt->pOptions->pDelay;
|
||||
int64_t minDelay = 0;
|
||||
char* str = "5s";
|
||||
if (DEAL_RES_ERROR != translateValue(pCxt, pVal) && TSDB_CODE_SUCCESS ==
|
||||
parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) {
|
||||
if (pVal->datum.i < minDelay) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"stream max delay must be bigger than 5 session");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -247,7 +247,10 @@ int32_t generateSyntaxErrMsgExt(SMsgBuf* pBuf, int32_t errCode, const char* pFor
|
|||
}
|
||||
|
||||
int32_t buildInvalidOperationMsg(SMsgBuf* pBuf, const char* msg) {
|
||||
if (pBuf->buf) {
|
||||
strncpy(pBuf->buf, msg, pBuf->len);
|
||||
}
|
||||
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
|
@ -259,7 +262,6 @@ int32_t buildInvalidOperationMsgExt(SMsgBuf* pBuf, const char* pFormat, ...) {
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
|
||||
int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr) {
|
||||
if (pBuf == NULL) return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
const char* msgFormat1 = "syntax error near \'%s\'";
|
||||
|
@ -328,7 +330,7 @@ STableMeta* tableMetaDup(const STableMeta* pTableMeta) {
|
|||
STableMeta* p = taosMemoryMalloc(size + schemaExtSize);
|
||||
if (NULL == p) return NULL;
|
||||
|
||||
memcpy(p, pTableMeta, schemaExtSize+size);
|
||||
memcpy(p, pTableMeta, schemaExtSize + size);
|
||||
if (hasSchemaExt) {
|
||||
p->schemaExt = (SSchemaExt*)(((char*)p) + size);
|
||||
} else {
|
||||
|
@ -816,7 +818,8 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t createSelectStmtImpl(bool isDistinct, SNodeList* pProjectionList, SNode* pTable, SNodeList* pHint, SNode** ppSelect) {
|
||||
int32_t createSelectStmtImpl(bool isDistinct, SNodeList* pProjectionList, SNode* pTable, SNodeList* pHint,
|
||||
SNode** ppSelect) {
|
||||
SSelectStmt* select = NULL;
|
||||
int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&select);
|
||||
if (NULL == select) {
|
||||
|
@ -1299,7 +1302,7 @@ int32_t reserveTableCfgInCache(int32_t acctId, const char* pDb, const char* pTab
|
|||
return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableCfg);
|
||||
}
|
||||
|
||||
int32_t reserveTableTSMAInfoInCache(int32_t acctId, const char *pDb, const char *pTable, SParseMetaCache *pMetaCache) {
|
||||
int32_t reserveTableTSMAInfoInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
|
||||
return reserveTableReqInCache(acctId, pDb, pTable, &pMetaCache->pTableTSMAs);
|
||||
}
|
||||
|
||||
|
@ -1310,7 +1313,8 @@ int32_t reserveTSMAInfoInCache(int32_t acctId, const char* pDb, const char* pTsm
|
|||
int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName, SArray** pIndexes) {
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t code = tNameExtractFullName(pName, fullName);
|
||||
if (TSDB_CODE_SUCCESS != code) return code;;
|
||||
if (TSDB_CODE_SUCCESS != code) return code;
|
||||
;
|
||||
SArray* pSmaIndexes = NULL;
|
||||
code = getMetaDataFromHash(fullName, strlen(fullName), pMetaCache->pTableIndex, (void**)&pSmaIndexes);
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pSmaIndexes) {
|
||||
|
@ -1349,7 +1353,7 @@ int32_t getTsmaFromCache(SParseMetaCache* pMetaCache, const SName* pTsmaName, ST
|
|||
return TSDB_CODE_PAR_INTERNAL_ERROR;
|
||||
}
|
||||
*pTsma = taosArrayGetP(pTsmaRsp->pTsmas, 0);
|
||||
} else if (code == TSDB_CODE_PAR_INTERNAL_ERROR){
|
||||
} else if (code == TSDB_CODE_PAR_INTERNAL_ERROR) {
|
||||
code = TSDB_CODE_MND_SMA_NOT_EXIST;
|
||||
}
|
||||
return code;
|
||||
|
|
|
@ -1769,36 +1769,36 @@ int32_t fltConverToStr(char *str, int32_t strMaxLen, int type, void *buf, int32_
|
|||
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_NULL:
|
||||
n = snprintf(str, strMaxLen, "null");
|
||||
n = tsnprintf(str, strMaxLen, "null");
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
n = snprintf(str, strMaxLen, (*(int8_t *)buf) ? "true" : "false");
|
||||
n = tsnprintf(str, strMaxLen, (*(int8_t *)buf) ? "true" : "false");
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
n = snprintf(str, strMaxLen, "%d", *(int8_t *)buf);
|
||||
n = tsnprintf(str, strMaxLen, "%d", *(int8_t *)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
n = snprintf(str, strMaxLen, "%d", *(int16_t *)buf);
|
||||
n = tsnprintf(str, strMaxLen, "%d", *(int16_t *)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
n = snprintf(str, strMaxLen, "%d", *(int32_t *)buf);
|
||||
n = tsnprintf(str, strMaxLen, "%d", *(int32_t *)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
n = snprintf(str, strMaxLen, "%" PRId64, *(int64_t *)buf);
|
||||
n = tsnprintf(str, strMaxLen, "%" PRId64, *(int64_t *)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
n = snprintf(str, strMaxLen, "%e", GET_FLOAT_VAL(buf));
|
||||
n = tsnprintf(str, strMaxLen, "%e", GET_FLOAT_VAL(buf));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
n = snprintf(str, strMaxLen, "%e", GET_DOUBLE_VAL(buf));
|
||||
n = tsnprintf(str, strMaxLen, "%e", GET_DOUBLE_VAL(buf));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
|
@ -1817,19 +1817,19 @@ int32_t fltConverToStr(char *str, int32_t strMaxLen, int type, void *buf, int32_
|
|||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
n = snprintf(str, strMaxLen, "%d", *(uint8_t *)buf);
|
||||
n = tsnprintf(str, strMaxLen, "%d", *(uint8_t *)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
n = snprintf(str, strMaxLen, "%d", *(uint16_t *)buf);
|
||||
n = tsnprintf(str, strMaxLen, "%d", *(uint16_t *)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
n = snprintf(str, strMaxLen, "%u", *(uint32_t *)buf);
|
||||
n = tsnprintf(str, strMaxLen, "%u", *(uint32_t *)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
n = snprintf(str, strMaxLen, "%" PRIu64, *(uint64_t *)buf);
|
||||
n = tsnprintf(str, strMaxLen, "%" PRIu64, *(uint64_t *)buf);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1886,7 +1886,7 @@ int32_t filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t optio
|
|||
SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit);
|
||||
SColumnNode *refNode = (SColumnNode *)left->desc;
|
||||
if (unit->compare.optr <= OP_TYPE_JSON_CONTAINS) {
|
||||
len += snprintf(str, sizeof(str), "UNIT[%d] => [%d][%d] %s [", i, refNode->dataBlockId, refNode->slotId,
|
||||
len += tsnprintf(str, sizeof(str), "UNIT[%d] => [%d][%d] %s [", i, refNode->dataBlockId, refNode->slotId,
|
||||
operatorTypeStr(unit->compare.optr));
|
||||
}
|
||||
|
||||
|
@ -1912,7 +1912,7 @@ int32_t filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t optio
|
|||
(void)strncat(str, " && ", sizeof(str) - len - 1);
|
||||
len += 4;
|
||||
if (unit->compare.optr2 <= OP_TYPE_JSON_CONTAINS) {
|
||||
len += snprintf(str + len, sizeof(str) - len, "[%d][%d] %s [", refNode->dataBlockId,
|
||||
len += tsnprintf(str + len, sizeof(str) - len, "[%d][%d] %s [", refNode->dataBlockId,
|
||||
refNode->slotId, operatorTypeStr(unit->compare.optr2));
|
||||
}
|
||||
|
||||
|
@ -4679,6 +4679,9 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) {
|
|||
|
||||
cell = cell->pNext;
|
||||
}
|
||||
if (node->condType == LOGIC_COND_TYPE_NOT) {
|
||||
stat->scalarMode = true;
|
||||
}
|
||||
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
|
|
@ -2068,7 +2068,7 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp
|
|||
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||
if (inputType == TSDB_DATA_TYPE_BOOL) {
|
||||
// NOTE: snprintf will append '\0' at the end of string
|
||||
int32_t len = snprintf(varDataVal(output), outputLen + TSDB_NCHAR_SIZE - VARSTR_HEADER_SIZE, "%.*s",
|
||||
int32_t len = tsnprintf(varDataVal(output), outputLen + TSDB_NCHAR_SIZE - VARSTR_HEADER_SIZE, "%.*s",
|
||||
(int32_t)(outputLen - VARSTR_HEADER_SIZE), *(int8_t *)input ? "true" : "false");
|
||||
varDataSetLen(output, len);
|
||||
} else if (inputType == TSDB_DATA_TYPE_BINARY) {
|
||||
|
@ -2109,7 +2109,7 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp
|
|||
int32_t len;
|
||||
if (inputType == TSDB_DATA_TYPE_BOOL) {
|
||||
char tmp[8] = {0};
|
||||
len = snprintf(tmp, sizeof(tmp), "%.*s", outputCharLen, *(int8_t *)input ? "true" : "false");
|
||||
len = tsnprintf(tmp, sizeof(tmp), "%.*s", outputCharLen, *(int8_t *)input ? "true" : "false");
|
||||
bool ret = taosMbsToUcs4(tmp, len, (TdUcs4 *)varDataVal(output), outputLen - VARSTR_HEADER_SIZE, &len);
|
||||
if (!ret) {
|
||||
code = TSDB_CODE_SCALAR_CONVERT_ERROR;
|
||||
|
@ -4515,10 +4515,10 @@ int32_t histogramScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarP
|
|||
int32_t len;
|
||||
char buf[512] = {0};
|
||||
if (!normalized) {
|
||||
len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%" PRId64 "}",
|
||||
len = tsnprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%" PRId64 "}",
|
||||
bins[k].lower, bins[k].upper, bins[k].count);
|
||||
} else {
|
||||
len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%lf}",
|
||||
len = tsnprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{\"lower_bin\":%g, \"upper_bin\":%g, \"count\":%lf}",
|
||||
bins[k].lower, bins[k].upper, bins[k].percentage);
|
||||
}
|
||||
varDataSetLen(buf, len);
|
||||
|
|
|
@ -734,7 +734,7 @@ int32_t vectorConvertToVarData(SSclVectorConvCtx *pCtx) {
|
|||
|
||||
int64_t value = 0;
|
||||
GET_TYPED_DATA(value, int64_t, pCtx->inType, colDataGetData(pInputCol, i));
|
||||
int32_t len = snprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%" PRId64, value);
|
||||
int32_t len = tsnprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%" PRId64, value);
|
||||
varDataLen(tmp) = len;
|
||||
if (pCtx->outType == TSDB_DATA_TYPE_NCHAR) {
|
||||
SCL_ERR_RET(varToNchar(tmp, pCtx->pOut, i, NULL));
|
||||
|
@ -751,7 +751,7 @@ int32_t vectorConvertToVarData(SSclVectorConvCtx *pCtx) {
|
|||
|
||||
uint64_t value = 0;
|
||||
GET_TYPED_DATA(value, uint64_t, pCtx->inType, colDataGetData(pInputCol, i));
|
||||
int32_t len = snprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%" PRIu64, value);
|
||||
int32_t len = tsnprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%" PRIu64, value);
|
||||
varDataLen(tmp) = len;
|
||||
if (pCtx->outType == TSDB_DATA_TYPE_NCHAR) {
|
||||
SCL_ERR_RET(varToNchar(tmp, pCtx->pOut, i, NULL));
|
||||
|
@ -768,7 +768,7 @@ int32_t vectorConvertToVarData(SSclVectorConvCtx *pCtx) {
|
|||
|
||||
double value = 0;
|
||||
GET_TYPED_DATA(value, double, pCtx->inType, colDataGetData(pInputCol, i));
|
||||
int32_t len = snprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%lf", value);
|
||||
int32_t len = tsnprintf(varDataVal(tmp), sizeof(tmp) - VARSTR_HEADER_SIZE, "%lf", value);
|
||||
varDataLen(tmp) = len;
|
||||
if (pCtx->outType == TSDB_DATA_TYPE_NCHAR) {
|
||||
SCL_ERR_RET(varToNchar(tmp, pCtx->pOut, i, NULL));
|
||||
|
|
|
@ -169,7 +169,7 @@ int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key);
|
|||
int32_t streamStateGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateAddIfNotExist_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
|
||||
void streamStateCurPrev_rocksdb(SStreamStateCur* pCur);
|
||||
int32_t streamStateGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
|
||||
SStreamStateCur* streamStateGetAndCheckCur_rocksdb(SStreamState* pState, SWinKey* key);
|
||||
SStreamStateCur* streamStateSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key);
|
||||
SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState);
|
||||
|
@ -191,7 +191,8 @@ SStreamStateCur* streamStateSessionSeekKeyPrev_rocksdb(SStreamState* pState, con
|
|||
SStreamStateCur* streamStateSessionSeekToLast_rocksdb(SStreamState* pState, int64_t groupId);
|
||||
int32_t streamStateSessionCurPrev_rocksdb(SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateSessionGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SSessionKey* pKey,
|
||||
void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSessionKey* key, SSessionKey* curKey);
|
||||
int32_t streamStateSessionAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
|
||||
int32_t* pVLen);
|
||||
|
@ -255,7 +256,7 @@ int32_t taskDbDestroySnap(void* arg, SArray* pSnapInfo);
|
|||
|
||||
int32_t taskDbDoCheckpoint(void* arg, int64_t chkpId, int64_t processId);
|
||||
|
||||
int32_t bkdMgtCreate(char* path, SBkdMgt **bm);
|
||||
int32_t bkdMgtCreate(char* path, SBkdMgt** bm);
|
||||
int32_t bkdMgtAddChkp(SBkdMgt* bm, char* task, char* path);
|
||||
int32_t bkdMgtGetDelta(SBkdMgt* bm, char* taskId, int64_t chkpId, SArray* list, char* name);
|
||||
int32_t bkdMgtDumpTo(SBkdMgt* bm, char* taskId, char* dname);
|
||||
|
|
|
@ -3289,13 +3289,40 @@ int32_t streamStatePut_rocksdb(SStreamState* pState, const SWinKey* key, const v
|
|||
int code = 0;
|
||||
|
||||
SStateKey sKey = {.key = *key, .opNum = pState->number};
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)value, vLen);
|
||||
char* dst = NULL;
|
||||
size_t size = 0;
|
||||
if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) {
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)value, (int32_t)vLen);
|
||||
} else {
|
||||
code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "state", &sKey, (void*)dst, (int32_t)size);
|
||||
taosMemoryFree(dst);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
int32_t streamStateGet_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
||||
int code = 0;
|
||||
SStateKey sKey = {.key = *key, .opNum = pState->number};
|
||||
STREAM_STATE_GET_ROCKSDB(pState, "state", &sKey, pVal, pVLen);
|
||||
|
||||
char* tVal = NULL;
|
||||
size_t tValLen = 0;
|
||||
STREAM_STATE_GET_ROCKSDB(pState, "state", &sKey, &tVal, &tValLen);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(tVal);
|
||||
return code;
|
||||
}
|
||||
if (pState->pResultRowStore.resultRowGet == NULL || pState->pExprSupp == NULL) {
|
||||
*pVal = tVal;
|
||||
*pVLen = tValLen;
|
||||
return code;
|
||||
}
|
||||
size_t pValLen = 0;
|
||||
code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, &pValLen);
|
||||
*pVLen = (int32_t)pValLen;
|
||||
taosMemoryFree(tVal);
|
||||
return code;
|
||||
}
|
||||
int32_t streamStateDel_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||
|
@ -3351,7 +3378,7 @@ int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key) {
|
|||
}
|
||||
|
||||
SStreamStateCur* pCur = streamStateSeekKeyNext_rocksdb(pState, &tmp);
|
||||
code = streamStateGetKVByCur_rocksdb(pCur, key, NULL, 0);
|
||||
code = streamStateGetKVByCur_rocksdb(pState, pCur, key, NULL, 0);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
|
@ -3395,7 +3422,8 @@ void streamStateCurPrev_rocksdb(SStreamStateCur* pCur) {
|
|||
rocksdb_iter_prev(pCur->iter);
|
||||
}
|
||||
}
|
||||
int32_t streamStateGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
||||
int32_t streamStateGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SWinKey* pKey, const void** pVal,
|
||||
int32_t* pVLen) {
|
||||
if (!pCur) return -1;
|
||||
SStateKey tkey;
|
||||
SStateKey* pKtmp = &tkey;
|
||||
|
@ -3411,10 +3439,35 @@ int32_t streamStateGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, cons
|
|||
if (pVLen != NULL) {
|
||||
size_t vlen = 0;
|
||||
const char* valStr = rocksdb_iter_value(pCur->iter, &vlen);
|
||||
*pVLen = valueDecode((void*)valStr, vlen, NULL, (char**)pVal);
|
||||
if (*pVLen < 0) {
|
||||
char* val = NULL;
|
||||
int32_t len = valueDecode((void*)valStr, vlen, NULL, (char**)val);
|
||||
if (len <= 0) {
|
||||
taosMemoryFree(val);
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* tVal = val;
|
||||
size_t tVlen = len;
|
||||
|
||||
if (pVal != NULL) {
|
||||
if (pState != NULL && pState->pResultRowStore.resultRowGet != NULL && pState->pExprSupp != NULL) {
|
||||
int code =
|
||||
(pState->pResultRowStore.resultRowGet)(pState->pExprSupp, val, len, (char**)&tVal, (size_t*)&tVlen);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(val);
|
||||
return code;
|
||||
}
|
||||
taosMemoryFree(val);
|
||||
*pVal = (char*)tVal;
|
||||
} else {
|
||||
stInfo("streamStateGetKVByCur_rocksdb, pState = %p, pResultRowStore = %p, pExprSupp = %p", pState,
|
||||
pState->pResultRowStore.resultRowGet, pState->pExprSupp);
|
||||
*pVal = (char*)tVal;
|
||||
}
|
||||
} else {
|
||||
taosMemoryFree(val);
|
||||
}
|
||||
*pVLen = (int32_t)tVlen;
|
||||
}
|
||||
|
||||
*pKey = pKtmp->key;
|
||||
|
@ -3545,13 +3598,43 @@ SStreamStateCur* streamStateGetCur_rocksdb(SStreamState* pState, const SWinKey*
|
|||
// func cf
|
||||
int32_t streamStateFuncPut_rocksdb(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen) {
|
||||
int code = 0;
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "func", key, (void*)value, vLen);
|
||||
char* dst = NULL;
|
||||
size_t size = 0;
|
||||
if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) {
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "func", key, (void*)value, (int32_t)vLen);
|
||||
return code;
|
||||
}
|
||||
code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "func", key, (void*)dst, (int32_t)size);
|
||||
taosMemoryFree(dst);
|
||||
|
||||
return code;
|
||||
}
|
||||
int32_t streamStateFuncGet_rocksdb(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen) {
|
||||
int code = 0;
|
||||
STREAM_STATE_GET_ROCKSDB(pState, "func", key, pVal, pVLen);
|
||||
return 0;
|
||||
char* tVal = NULL;
|
||||
size_t tValLen = 0;
|
||||
STREAM_STATE_GET_ROCKSDB(pState, "func", key, tVal, &tValLen);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(tVal);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (pState->pResultRowStore.resultRowGet == NULL || pState->pExprSupp == NULL) {
|
||||
*pVal = tVal;
|
||||
*pVLen = tValLen;
|
||||
return code;
|
||||
}
|
||||
|
||||
size_t pValLen = 0;
|
||||
code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)pVal, &pValLen);
|
||||
*pVLen = (int32_t)pValLen;
|
||||
|
||||
taosMemoryFree(tVal);
|
||||
return code;
|
||||
}
|
||||
int32_t streamStateFuncDel_rocksdb(SStreamState* pState, const STupleKey* key) {
|
||||
int code = 0;
|
||||
|
@ -3566,7 +3649,20 @@ int32_t streamStateSessionPut_rocksdb(SStreamState* pState, const SSessionKey* k
|
|||
if (value == NULL || vLen == 0) {
|
||||
stError("streamStateSessionPut_rocksdb val: %p, len: %d", value, vLen);
|
||||
}
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "sess", &sKey, value, vLen);
|
||||
char* dst = NULL;
|
||||
size_t size = 0;
|
||||
if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) {
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "sess", &sKey, (void*)value, (int32_t)vLen);
|
||||
return code;
|
||||
}
|
||||
|
||||
code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "sess", &sKey, dst, (int32_t)size);
|
||||
taosMemoryFree(dst);
|
||||
|
||||
return code;
|
||||
}
|
||||
int32_t streamStateSessionGet_rocksdb(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen) {
|
||||
|
@ -3577,7 +3673,7 @@ int32_t streamStateSessionGet_rocksdb(SStreamState* pState, SSessionKey* key, vo
|
|||
void* tmp = NULL;
|
||||
int32_t vLen = 0;
|
||||
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, &resKey, &tmp, &vLen);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &resKey, &tmp, &vLen);
|
||||
if (code == 0 && key->win.skey == resKey.win.skey) {
|
||||
*key = resKey;
|
||||
|
||||
|
@ -3816,7 +3912,8 @@ SStreamStateCur* streamStateSessionSeekKeyPrev_rocksdb(SStreamState* pState, con
|
|||
return pCur;
|
||||
}
|
||||
|
||||
int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
||||
int32_t streamStateSessionGetKVByCur_rocksdb(SStreamState* pState, SStreamStateCur* pCur, SSessionKey* pKey,
|
||||
void** pVal, int32_t* pVLen) {
|
||||
if (!pCur) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -3850,13 +3947,27 @@ int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey*
|
|||
return -1;
|
||||
}
|
||||
|
||||
char* tVal = val;
|
||||
size_t tVlen = len;
|
||||
|
||||
if (pVal != NULL) {
|
||||
*pVal = (char*)val;
|
||||
if (pState != NULL && pState->pResultRowStore.resultRowGet != NULL && pState->pExprSupp != NULL) {
|
||||
int code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, val, len, (char**)&tVal, (size_t*)&tVlen);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(val);
|
||||
return code;
|
||||
}
|
||||
taosMemoryFree(val);
|
||||
*pVal = (char*)tVal;
|
||||
} else {
|
||||
*pVal = (char*)tVal;
|
||||
}
|
||||
} else {
|
||||
taosMemoryFree(val);
|
||||
}
|
||||
|
||||
if (pVLen != NULL) *pVLen = len;
|
||||
if (pVLen != NULL) *pVLen = (int32_t)tVlen;
|
||||
|
||||
*pKey = pKTmp->key;
|
||||
return 0;
|
||||
}
|
||||
|
@ -3870,6 +3981,7 @@ int32_t streamStateFillPut_rocksdb(SStreamState* pState, const SWinKey* key, con
|
|||
|
||||
int32_t streamStateFillGet_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
||||
int code = 0;
|
||||
|
||||
STREAM_STATE_GET_ROCKSDB(pState, "fill", key, pVal, pVLen);
|
||||
return code;
|
||||
}
|
||||
|
@ -4044,7 +4156,7 @@ int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSes
|
|||
c = stateSessionKeyCmpr(&sKey, sizeof(sKey), &iKey, sizeof(iKey));
|
||||
|
||||
SSessionKey resKey = *key;
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, &resKey, NULL, 0);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &resKey, NULL, NULL);
|
||||
if (code == 0 && sessionRangeKeyCmpr(key, &resKey) == 0) {
|
||||
*curKey = resKey;
|
||||
streamStateFreeCur(pCur);
|
||||
|
@ -4053,7 +4165,7 @@ int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSes
|
|||
|
||||
if (c > 0) {
|
||||
streamStateCurNext_rocksdb(pCur);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, &resKey, NULL, 0);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &resKey, NULL, NULL);
|
||||
if (code == 0 && sessionRangeKeyCmpr(key, &resKey) == 0) {
|
||||
*curKey = resKey;
|
||||
streamStateFreeCur(pCur);
|
||||
|
@ -4061,7 +4173,7 @@ int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSes
|
|||
}
|
||||
} else if (c < 0) {
|
||||
streamStateCurPrev(pState, pCur);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, &resKey, NULL, 0);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &resKey, NULL, NULL);
|
||||
if (code == 0 && sessionRangeKeyCmpr(key, &resKey) == 0) {
|
||||
*curKey = resKey;
|
||||
streamStateFreeCur(pCur);
|
||||
|
@ -4091,7 +4203,7 @@ int32_t streamStateSessionAddIfNotExist_rocksdb(SStreamState* pState, SSessionKe
|
|||
}
|
||||
|
||||
SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentPrev_rocksdb(pState, key);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, key, pVal, pVLen);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, key, pVal, pVLen);
|
||||
|
||||
if (code == 0) {
|
||||
if (sessionRangeKeyCmpr(&searchKey, key) == 0) {
|
||||
|
@ -4108,7 +4220,7 @@ int32_t streamStateSessionAddIfNotExist_rocksdb(SStreamState* pState, SSessionKe
|
|||
pCur = streamStateSessionSeekKeyNext_rocksdb(pState, key);
|
||||
}
|
||||
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, key, pVal, pVLen);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, key, pVal, pVLen);
|
||||
if (code == 0) {
|
||||
if (sessionRangeKeyCmpr(&searchKey, key) == 0) {
|
||||
memcpy(tmp, *pVal, *pVLen);
|
||||
|
@ -4135,7 +4247,7 @@ void streamStateSessionClear_rocksdb(SStreamState* pState) {
|
|||
SSessionKey delKey = {0};
|
||||
void* buf = NULL;
|
||||
int32_t size = 0;
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, &delKey, &buf, &size);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, &delKey, &buf, &size);
|
||||
if (code == 0 && size > 0) {
|
||||
memset(buf, 0, size);
|
||||
// refactor later
|
||||
|
@ -4163,7 +4275,7 @@ int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey*
|
|||
}
|
||||
|
||||
SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentPrev_rocksdb(pState, key);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, key, pVal, pVLen);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, key, pVal, pVLen);
|
||||
if (code == 0) {
|
||||
if (key->win.skey <= tmpKey.win.skey && tmpKey.win.ekey <= key->win.ekey) {
|
||||
memcpy(tmp, *pVal, valSize);
|
||||
|
@ -4183,7 +4295,7 @@ int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey*
|
|||
pCur = streamStateSessionSeekKeyNext_rocksdb(pState, key);
|
||||
}
|
||||
taosMemoryFreeClear(*pVal);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, key, pVal, pVLen);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, key, pVal, pVLen);
|
||||
if (code == 0) {
|
||||
void* stateKey = (char*)(*pVal) + (valSize - keyDataLen);
|
||||
if (fn(pKeyData, stateKey) == true) {
|
||||
|
@ -4208,13 +4320,33 @@ _end:
|
|||
// partag cf
|
||||
int32_t streamStatePutParTag_rocksdb(SStreamState* pState, int64_t groupId, const void* tag, int32_t tagLen) {
|
||||
int code = 0;
|
||||
char* dst = NULL;
|
||||
size_t size = 0;
|
||||
if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) {
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "partag", &groupId, tag, tagLen);
|
||||
return code;
|
||||
}
|
||||
code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, value, vLen, &dst, &size);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "partag", &groupId, dst, size);
|
||||
taosMemoryFree(dst);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t streamStateGetParTag_rocksdb(SStreamState* pState, int64_t groupId, void** tagVal, int32_t* tagLen) {
|
||||
int code = 0;
|
||||
STREAM_STATE_GET_ROCKSDB(pState, "partag", &groupId, tagVal, tagLen);
|
||||
char* tVal;
|
||||
size_t tValLen = 0;
|
||||
STREAM_STATE_GET_ROCKSDB(pState, "partag", &groupId, &tVal, &tValLen);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(tVal);
|
||||
return code;
|
||||
}
|
||||
code = (pState->pResultRowStore.resultRowGet)(pState->pExprSupp, tVal, tValLen, (char**)tagVal, (size_t*)tagLen);
|
||||
taosMemoryFree(tVal);
|
||||
|
||||
return code;
|
||||
}
|
||||
#endif
|
||||
|
@ -4351,6 +4483,7 @@ void streamStateClearBatch(void* pBatch) { rocksdb_writebatch_clear((rocksdb_
|
|||
void streamStateDestroyBatch(void* pBatch) { rocksdb_writebatch_destroy((rocksdb_writebatch_t*)pBatch); }
|
||||
int32_t streamStatePutBatch(SStreamState* pState, const char* cfKeyName, rocksdb_writebatch_t* pBatch, void* key,
|
||||
void* val, int32_t vlen, int64_t ttl) {
|
||||
int32_t code = 0;
|
||||
STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend;
|
||||
TAOS_UNUSED(atomic_add_fetch_64(&wrapper->dataWritten, 1));
|
||||
|
||||
|
@ -4380,10 +4513,23 @@ int32_t streamStatePutBatch(SStreamState* pState, const char* cfKeyName, rocksdb
|
|||
|
||||
int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key,
|
||||
void* val, int32_t vlen, int64_t ttl, void* tmpBuf) {
|
||||
int32_t code = 0;
|
||||
char buf[128] = {0};
|
||||
|
||||
char* dst = NULL;
|
||||
size_t size = 0;
|
||||
if (pState->pResultRowStore.resultRowPut == NULL || pState->pExprSupp == NULL) {
|
||||
dst = val;
|
||||
size = vlen;
|
||||
} else {
|
||||
code = (pState->pResultRowStore.resultRowPut)(pState->pExprSupp, val, vlen, &dst, &size);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
}
|
||||
int32_t klen = ginitDict[cfIdx].enFunc((void*)key, buf);
|
||||
char* ttlV = tmpBuf;
|
||||
int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(val, vlen, ttl, &ttlV);
|
||||
int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(dst, size, ttl, &ttlV);
|
||||
|
||||
STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend;
|
||||
|
||||
|
@ -4392,6 +4538,8 @@ int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb
|
|||
rocksdb_column_family_handle_t* pCf = wrapper->pCf[ginitDict[cfIdx].idx];
|
||||
rocksdb_writebatch_put_cf((rocksdb_writebatch_t*)pBatch, pCf, buf, (size_t)klen, ttlV, (size_t)ttlVLen);
|
||||
|
||||
taosMemoryFree(dst);
|
||||
|
||||
if (tmpBuf == NULL) {
|
||||
taosMemoryFree(ttlV);
|
||||
}
|
||||
|
@ -4931,8 +5079,8 @@ int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) {
|
|||
}
|
||||
|
||||
char content[256] = {0};
|
||||
nBytes = tsnprintf(content, sizeof(content), META_ON_S3_FORMATE, p->pCurrent, p->curChkpId, p->pManifest, p->curChkpId,
|
||||
"processVer", processId);
|
||||
nBytes = tsnprintf(content, sizeof(content), META_ON_S3_FORMATE, p->pCurrent, p->curChkpId, p->pManifest,
|
||||
p->curChkpId, "processVer", processId);
|
||||
if (nBytes <= 0 || nBytes >= sizeof(content)) {
|
||||
code = TSDB_CODE_OUT_OF_RANGE;
|
||||
stError("chkp failed to format meta file: %s, reason: invalid msg", dstDir);
|
||||
|
|
|
@ -299,7 +299,7 @@ void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) {
|
|||
return;
|
||||
}
|
||||
|
||||
/*SStreamTask* p = */ streamMetaAcquireOneTask(pTask); // add task ref here
|
||||
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); // add task ref here
|
||||
streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs());
|
||||
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||
|
|
|
@ -347,7 +347,8 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
|
|||
if (old == 0) {
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref);
|
||||
streamMetaAcquireOneTask(pTask);
|
||||
|
||||
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
|
||||
streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||
"trigger-recv-monitor");
|
||||
pTmrInfo->launchChkptId = pActiveInfo->activeId;
|
||||
|
|
|
@ -1162,7 +1162,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) {
|
|||
if (old == 0) {
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref);
|
||||
streamMetaAcquireOneTask(pTask);
|
||||
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
|
||||
|
||||
streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||
"chkpt-ready-monitor");
|
||||
|
|
|
@ -753,12 +753,17 @@ int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t task
|
|||
return code;
|
||||
}
|
||||
|
||||
void streamMetaAcquireOneTask(SStreamTask* pTask) {
|
||||
int32_t streamMetaAcquireOneTask(SStreamTask* pTask) {
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
|
||||
stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref);
|
||||
return ref;
|
||||
}
|
||||
|
||||
void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) {
|
||||
if (pTask == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t taskId = pTask->id.taskId;
|
||||
int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1);
|
||||
|
||||
|
@ -862,7 +867,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
|||
ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if (ppTask) {
|
||||
pTask = *ppTask;
|
||||
// it is an fill-history task, remove the related stream task's id that points to it
|
||||
// it is a fill-history task, remove the related stream task's id that points to it
|
||||
if (pTask->info.fillHistory == 0) {
|
||||
int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ static void streamTaskSchedHelper(void* param, void* tmrId);
|
|||
void streamSetupScheduleTrigger(SStreamTask* pTask) {
|
||||
int64_t delaySchema = pTask->info.delaySchedParam;
|
||||
if (delaySchema != 0 && pTask->info.fillHistory == 0) {
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
|
||||
int32_t ref = streamMetaAcquireOneTask(pTask);
|
||||
stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref,
|
||||
pTask->info.delaySchedParam);
|
||||
|
||||
|
@ -63,7 +63,11 @@ int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int3
|
|||
pRunReq->reqType = execType;
|
||||
|
||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||
return tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg);
|
||||
int32_t code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg);
|
||||
if (code) {
|
||||
stError("vgId:%d failed to put msg into stream queue, code:%s, %x", vgId, tstrerror(code), taskId);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleTime = 0; }
|
||||
|
@ -76,7 +80,7 @@ void streamTaskResumeInFuture(SStreamTask* pTask) {
|
|||
pTask->status.schedIdleTime, ref);
|
||||
|
||||
// add one ref count for task
|
||||
streamMetaAcquireOneTask(pTask);
|
||||
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
|
||||
streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer,
|
||||
pTask->pMeta->vgId, "resume-task-tmr");
|
||||
}
|
||||
|
|
|
@ -343,7 +343,8 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen, int32_t* pWinCode) {
|
||||
int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen,
|
||||
int32_t* pWinCode) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
|
||||
|
@ -575,7 +576,7 @@ static void transformCursor(SStreamFileState* pFileState, SStreamStateCur* pCur)
|
|||
static void checkAndTransformCursor(SStreamFileState* pFileState, const uint64_t groupId, SArray* pWinStates,
|
||||
SStreamStateCur** ppCur) {
|
||||
SSessionKey key = {.groupId = groupId};
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(*ppCur, &key, NULL, NULL);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(getStateFileStore(pFileState), *ppCur, &key, NULL, NULL);
|
||||
if (taosArrayGetSize(pWinStates) > 0 &&
|
||||
(code == TSDB_CODE_FAILED || sessionStateKeyCompare(&key, pWinStates, 0) >= 0)) {
|
||||
if (!(*ppCur)) {
|
||||
|
@ -653,7 +654,7 @@ SStreamStateCur* countWinStateSeekKeyPrev(SStreamFileState* pFileState, const SS
|
|||
SSessionKey key = {0};
|
||||
void* pVal = NULL;
|
||||
int len = 0;
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, &key, &pVal, &len);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(getStateFileStore(pFileState), pCur, &key, &pVal, &len);
|
||||
if (code == TSDB_CODE_FAILED) {
|
||||
streamStateFreeCur(pCur);
|
||||
return pBuffCur;
|
||||
|
@ -667,7 +668,7 @@ SStreamStateCur* countWinStateSeekKeyPrev(SStreamFileState* pFileState, const SS
|
|||
}
|
||||
streamStateCurPrev(pFileStore, pCur);
|
||||
while (1) {
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, &key, &pVal, &len);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(NULL, pCur, &key, &pVal, &len);
|
||||
if (code == TSDB_CODE_FAILED) {
|
||||
streamStateCurNext(pFileStore, pCur);
|
||||
return pCur;
|
||||
|
@ -710,7 +711,7 @@ int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void
|
|||
*pKey = *(SSessionKey*)(pPos->pKey);
|
||||
} else {
|
||||
void* pData = NULL;
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, pKey, &pData, pVLen);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(getStateFileStore(pCur->pStreamFileState), pCur, pKey, &pData, pVLen);
|
||||
if (taosArrayGetSize(pWinStates) > 0 &&
|
||||
(code == TSDB_CODE_FAILED || sessionStateRangeKeyCompare(pKey, pWinStates, 0) >= 0)) {
|
||||
transformCursor(pCur->pStreamFileState, pCur);
|
||||
|
@ -915,7 +916,7 @@ _end:
|
|||
|
||||
int32_t getCountWinStateFromDisc(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
||||
SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentNext_rocksdb(pState, pKey);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, pKey, pVal, pVLen);
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, pKey, pVal, pVLen);
|
||||
streamStateFreeCur(pCur);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -923,7 +924,7 @@ int32_t getCountWinStateFromDisc(SStreamState* pState, SSessionKey* pKey, void**
|
|||
pCur = streamStateSessionSeekKeyPrev_rocksdb(pState, pKey);
|
||||
}
|
||||
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, pKey, pVal, pVLen);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pState, pCur, pKey, pVal, pVLen);
|
||||
streamStateFreeCur(pCur);
|
||||
return code;
|
||||
}
|
||||
|
@ -1060,7 +1061,8 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen) {
|
||||
int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal,
|
||||
int32_t* pVLen) {
|
||||
SSessionKey* pWinKey = pKey;
|
||||
const TSKEY gap = 0;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -302,7 +302,7 @@ SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key) {
|
|||
}
|
||||
|
||||
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
||||
return streamStateGetKVByCur_rocksdb(pCur, pKey, pVal, pVLen);
|
||||
return streamStateGetKVByCur_rocksdb(getStateFileStore(pCur->pStreamFileState), pCur, pKey, pVal, pVLen);
|
||||
}
|
||||
|
||||
int32_t streamStateFillGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
||||
|
@ -477,6 +477,7 @@ int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal
|
|||
if (!pStr) {
|
||||
if (onlyCache && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) {
|
||||
(*pWinCode) = TSDB_CODE_FAILED;
|
||||
goto _end;
|
||||
}
|
||||
(*pWinCode) = streamStateGetParName_rocksdb(pState, groupId, pVal);
|
||||
if ((*pWinCode) == TSDB_CODE_SUCCESS && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) {
|
||||
|
@ -528,6 +529,9 @@ void streamStateCopyBackend(SStreamState* src, SStreamState* dst) {
|
|||
}
|
||||
dst->dump = 1;
|
||||
dst->pTdbState->pOwner->pBackend = src->pTdbState->pOwner->pBackend;
|
||||
dst->pResultRowStore.resultRowPut = src->pResultRowStore.resultRowPut;
|
||||
dst->pResultRowStore.resultRowGet = src->pResultRowStore.resultRowGet;
|
||||
dst->pExprSupp = src->pExprSupp;
|
||||
return;
|
||||
}
|
||||
SStreamStateCur* createStreamStateCursor() {
|
||||
|
|
|
@ -258,10 +258,12 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
|||
|
||||
if (pTask->inputq.queue) {
|
||||
streamQueueClose(pTask->inputq.queue, pTask->id.taskId);
|
||||
pTask->inputq.queue = NULL;
|
||||
}
|
||||
|
||||
if (pTask->outputq.queue) {
|
||||
streamQueueClose(pTask->outputq.queue, pTask->id.taskId);
|
||||
pTask->outputq.queue = NULL;
|
||||
}
|
||||
|
||||
if (pTask->exec.qmsg) {
|
||||
|
@ -275,6 +277,7 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
|||
|
||||
if (pTask->exec.pWalReader != NULL) {
|
||||
walCloseReader(pTask->exec.pWalReader);
|
||||
pTask->exec.pWalReader = NULL;
|
||||
}
|
||||
|
||||
streamClearChkptReadyMsg(pTask->chkInfo.pActiveInfo);
|
||||
|
@ -286,7 +289,7 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
|||
if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
|
||||
tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pSchemaWrapper);
|
||||
taosMemoryFree(pTask->outputInfo.tbSink.pTSchema);
|
||||
tSimpleHashCleanup(pTask->outputInfo.tbSink.pTblInfo);
|
||||
tSimpleHashCleanup(pTask->outputInfo.tbSink.pTbInfo);
|
||||
tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pTagSchema);
|
||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
taosArrayDestroy(pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos);
|
||||
|
|
|
@ -501,9 +501,10 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
|
|||
if (pTrans == NULL) {
|
||||
ETaskStatus s = pSM->current.state;
|
||||
|
||||
if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP &&
|
||||
s != TASK_STATUS__UNINIT && s != TASK_STATUS__READY) {
|
||||
stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, GET_EVT_NAME(pSM->prev.evt));
|
||||
if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && s != TASK_STATUS__UNINIT &&
|
||||
s != TASK_STATUS__READY) {
|
||||
stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name,
|
||||
GET_EVT_NAME(pSM->prev.evt));
|
||||
}
|
||||
|
||||
// the pSM->prev.evt may be 0, so print string is not appropriate.
|
||||
|
@ -521,11 +522,15 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
|
|||
return TSDB_CODE_STREAM_INVALID_STATETRANS;
|
||||
}
|
||||
|
||||
// repeat pause will not overwrite the previous pause state
|
||||
if (pSM->current.state != TASK_STATUS__PAUSE || pTrans->next.state != TASK_STATUS__PAUSE) {
|
||||
keepPrevInfo(pSM);
|
||||
|
||||
pSM->current = pTrans->next;
|
||||
pSM->pActiveTrans = NULL;
|
||||
} else {
|
||||
stDebug("s-task:%s repeat pause evt recv, not update prev status", id);
|
||||
}
|
||||
|
||||
pSM->pActiveTrans = NULL;
|
||||
// todo remove it
|
||||
// todo: handle the error code
|
||||
// on success callback, add into lock if necessary, or maybe we should add an option for this?
|
||||
|
|
|
@ -56,7 +56,7 @@ void streamTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void* pParam, void*
|
|||
}
|
||||
}
|
||||
|
||||
stDebug("vgId:%d start %s tmr succ", vgId, pMsg);
|
||||
stTrace("vgId:%d start %s tmr succ", vgId, pMsg);
|
||||
}
|
||||
|
||||
void streamTmrStop(tmr_h tmrId) {
|
||||
|
|
|
@ -698,7 +698,7 @@ void flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, boo
|
|||
|
||||
int idx = streamStateGetCfIdx(pFileState->pFileStore, pFileState->cfName);
|
||||
|
||||
int32_t len = pFileState->rowSize + sizeof(uint64_t) + sizeof(int32_t) + 64;
|
||||
int32_t len = (pFileState->rowSize + sizeof(uint64_t) + sizeof(int32_t) + 64) * 2;
|
||||
char* buf = taosMemoryCalloc(1, len);
|
||||
if (!buf) {
|
||||
code = terrno;
|
||||
|
@ -849,7 +849,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId) {
|
|||
void* pVal = NULL;
|
||||
int32_t vlen = 0;
|
||||
SSessionKey key = {0};
|
||||
winRes = streamStateSessionGetKVByCur_rocksdb(pCur, &key, &pVal, &vlen);
|
||||
winRes = streamStateSessionGetKVByCur_rocksdb(getStateFileStore(pFileState), pCur, &key, &pVal, &vlen);
|
||||
if (winRes != TSDB_CODE_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
@ -903,7 +903,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) {
|
|||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
winCode = streamStateGetKVByCur_rocksdb(pCur, pNewPos->pKey, (const void**)&pVal, &vlen);
|
||||
winCode = streamStateGetKVByCur_rocksdb(getStateFileStore(pFileState), pCur, pNewPos->pKey, (const void**)&pVal, &vlen);
|
||||
if (winCode != TSDB_CODE_SUCCESS || pFileState->getTs(pNewPos->pKey) < pFileState->flushMark) {
|
||||
destroyRowBuffPos(pNewPos);
|
||||
SListNode* pNode = tdListPopTail(pFileState->usedBuffs);
|
||||
|
@ -912,6 +912,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) {
|
|||
break;
|
||||
}
|
||||
if (vlen != pFileState->rowSize) {
|
||||
qError("row size mismatch, expect:%d, actual:%d", pFileState->rowSize, vlen);
|
||||
code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
|
|
@ -228,17 +228,17 @@ void *backendOpen() {
|
|||
memset(&key, 0, sizeof(key));
|
||||
char *val = NULL;
|
||||
int32_t vlen = 0;
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCurr, &key, (void **)&val, &vlen);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(NULL, pCurr, &key, (void **)&val, &vlen);
|
||||
ASSERT(code == 0);
|
||||
pCurr = streamStateSessionSeekKeyPrev_rocksdb(p, &key);
|
||||
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCurr, &key, (void **)&val, &vlen);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(NULL, pCurr, &key, (void **)&val, &vlen);
|
||||
ASSERT(code == 0);
|
||||
|
||||
ASSERT(key.groupId == 0 && key.win.ekey == tsArray[tsArray.size() - 2]);
|
||||
|
||||
pCurr = streamStateSessionSeekKeyNext_rocksdb(p, &key);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCurr, &key, (void **)&val, &vlen);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(NULL, pCurr, &key, (void **)&val, &vlen);
|
||||
ASSERT(code == 0);
|
||||
ASSERT(vlen == strlen("Value"));
|
||||
ASSERT(key.groupId == 0 && key.win.skey == tsArray[tsArray.size() - 1]);
|
||||
|
|
|
@ -24,7 +24,6 @@ extern "C" {
|
|||
|
||||
#define TIMER_MAX_MS 0x7FFFFFFF
|
||||
#define PING_TIMER_MS 5000
|
||||
#define HEARTBEAT_TICK_NUM 20
|
||||
|
||||
typedef struct SSyncEnv {
|
||||
uint8_t isStart;
|
||||
|
|
|
@ -977,9 +977,10 @@ static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
|
|||
pData->logicClock = pSyncTimer->logicClock;
|
||||
pData->execTime = tsNow + pSyncTimer->timerMS;
|
||||
|
||||
sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, pData->rid, pData->destId.addr);
|
||||
sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64 " at %d", pSyncNode->vgId, pData->rid,
|
||||
pData->destId.addr, pSyncTimer->timerMS);
|
||||
|
||||
TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, (void*)(pData->rid),
|
||||
TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid),
|
||||
syncEnv()->pTimerManager, &pSyncTimer->pTimer));
|
||||
} else {
|
||||
code = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
|
@ -2711,7 +2712,8 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
|
|||
return;
|
||||
}
|
||||
|
||||
sTrace("vgId:%d, eq peer hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid, pData->destId.addr);
|
||||
sTrace("vgId:%d, peer hb timer execution, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid,
|
||||
pData->destId.addr);
|
||||
|
||||
if (pSyncNode->totalReplicaNum > 1) {
|
||||
int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock);
|
||||
|
@ -2753,13 +2755,12 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
|
|||
if (ret != 0) {
|
||||
sError("vgId:%d, failed to send heartbeat since %s", pSyncNode->vgId, tstrerror(ret));
|
||||
}
|
||||
} else {
|
||||
}
|
||||
|
||||
if (syncIsInit()) {
|
||||
// sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId);
|
||||
if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM,
|
||||
(void*)hbDataRid, syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) {
|
||||
sTrace("vgId:%d, reset peer hb timer at %d", pSyncNode->vgId, pSyncTimer->timerMS);
|
||||
if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid,
|
||||
syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) {
|
||||
sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code));
|
||||
syncNodeRelease(pSyncNode);
|
||||
syncHbTimerDataRelease(pData);
|
||||
|
|
|
@ -1446,6 +1446,9 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
|
|||
return ret;
|
||||
}
|
||||
ofpCell = tdbPageGetCell(ofp, 0);
|
||||
if (ofpCell == NULL) {
|
||||
return TSDB_CODE_INVALID_DATA_FMT;
|
||||
}
|
||||
|
||||
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
|
||||
bytes = nLeft;
|
||||
|
|
|
@ -282,6 +282,17 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList)
|
|||
}
|
||||
|
||||
static void walAlignVersions(SWal* pWal) {
|
||||
if (pWal->cfg.committed > 0 && pWal->cfg.committed != pWal->vers.snapshotVer) {
|
||||
wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is different from commited:%" PRId64
|
||||
". in vnode/mnode. align with it.",
|
||||
pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed);
|
||||
pWal->vers.snapshotVer = pWal->cfg.committed;
|
||||
}
|
||||
if (pWal->vers.snapshotVer < 0 && pWal->vers.firstVer > 0) {
|
||||
wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is an invalid value. align it with firstVer:%" PRId64 ".",
|
||||
pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->vers.firstVer);
|
||||
pWal->vers.snapshotVer = pWal->vers.firstVer;
|
||||
}
|
||||
if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) {
|
||||
wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId,
|
||||
pWal->vers.firstVer, pWal->vers.snapshotVer);
|
||||
|
@ -400,6 +411,17 @@ static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) {
|
|||
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
void printFileSet(SArray* fileSet) {
|
||||
int32_t sz = taosArrayGetSize(fileSet);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i);
|
||||
wInfo("firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 ", createTs:%" PRId64
|
||||
", closeTs:%" PRId64,
|
||||
pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, pFileInfo->createTs,
|
||||
pFileInfo->closeTs);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t walCheckAndRepairMeta(SWal* pWal) {
|
||||
// load log files, get first/snapshot/last version info
|
||||
int32_t code = 0;
|
||||
|
@ -460,6 +482,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) {
|
|||
|
||||
taosArraySort(actualLog, compareWalFileInfo);
|
||||
|
||||
wInfo("vgId:%d, wal path:%s, actual log file num:%d", pWal->cfg.vgId, pWal->path,
|
||||
(int32_t)taosArrayGetSize(actualLog));
|
||||
printFileSet(actualLog);
|
||||
|
||||
int metaFileNum = taosArrayGetSize(pWal->fileInfoSet);
|
||||
int actualFileNum = taosArrayGetSize(actualLog);
|
||||
int64_t firstVerPrev = pWal->vers.firstVer;
|
||||
|
@ -474,6 +500,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) {
|
|||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
wInfo("vgId:%d, wal path:%s, meta log file num:%d", pWal->cfg.vgId, pWal->path,
|
||||
(int32_t)taosArrayGetSize(pWal->fileInfoSet));
|
||||
printFileSet(pWal->fileInfoSet);
|
||||
|
||||
int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
|
||||
|
||||
// scan and determine the lastVer
|
||||
|
@ -533,6 +563,7 @@ int32_t walCheckAndRepairMeta(SWal* pWal) {
|
|||
// repair ts of files
|
||||
TAOS_CHECK_RETURN(walRepairLogFileTs(pWal, &updateMeta));
|
||||
|
||||
printFileSet(pWal->fileInfoSet);
|
||||
// update meta file
|
||||
if (updateMeta) {
|
||||
TAOS_CHECK_RETURN(walSaveMeta(pWal));
|
||||
|
@ -1124,6 +1155,10 @@ int32_t walLoadMeta(SWal* pWal) {
|
|||
(void)taosCloseFile(&pFile);
|
||||
taosMemoryFree(buf);
|
||||
|
||||
wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%d", pWal->cfg.vgId, fnameStr,
|
||||
(int32_t)taosArrayGetSize(pWal->fileInfoSet));
|
||||
printFileSet(pWal->fileInfoSet);
|
||||
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
||||
|
|
|
@ -91,6 +91,7 @@ static int32_t walInitLock(SWal *pWal) {
|
|||
}
|
||||
|
||||
SWal *walOpen(const char *path, SWalCfg *pCfg) {
|
||||
int32_t code = 0;
|
||||
SWal *pWal = taosMemoryCalloc(1, sizeof(SWal));
|
||||
if (pWal == NULL) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
|
@ -160,17 +161,20 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) {
|
|||
pWal->writeHead.magic = WAL_MAGIC;
|
||||
|
||||
// load meta
|
||||
if (walLoadMeta(pWal) < 0) {
|
||||
wInfo("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(terrno));
|
||||
code = walLoadMeta(pWal);
|
||||
if (code < 0) {
|
||||
wWarn("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(code));
|
||||
}
|
||||
|
||||
if (walCheckAndRepairMeta(pWal) < 0) {
|
||||
wError("vgId:%d, cannot open wal since repair meta file failed", pWal->cfg.vgId);
|
||||
code = walCheckAndRepairMeta(pWal);
|
||||
if (code < 0) {
|
||||
wError("vgId:%d, cannot open wal since repair meta file failed since %s", pWal->cfg.vgId, tstrerror(code));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (walCheckAndRepairIdx(pWal) < 0) {
|
||||
wError("vgId:%d, cannot open wal since repair idx file failed", pWal->cfg.vgId);
|
||||
code = walCheckAndRepairIdx(pWal);
|
||||
if (code < 0) {
|
||||
wError("vgId:%d, cannot open wal since repair idx file failed since %s", pWal->cfg.vgId, tstrerror(code));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
|
|
|
@ -127,6 +127,7 @@ class WalRetentionEnv : public ::testing::Test {
|
|||
SWalCfg cfg;
|
||||
cfg.rollPeriod = -1;
|
||||
cfg.segSize = -1;
|
||||
cfg.committed =-1;
|
||||
cfg.retentionPeriod = -1;
|
||||
cfg.retentionSize = 0;
|
||||
cfg.rollPeriod = 0;
|
||||
|
|
|
@ -805,7 +805,7 @@ int32_t taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, i
|
|||
char *ppp = strchr(inTimezoneStr, ',');
|
||||
int indexStr;
|
||||
if (pp == NULL || ppp == NULL) {
|
||||
indexStr = snprintf(winStr, sizeof(winStr), "TZ=UTC");
|
||||
indexStr = tsnprintf(winStr, sizeof(winStr), "TZ=UTC");
|
||||
} else {
|
||||
memcpy(winStr, "TZ=", 3);
|
||||
pp++;
|
||||
|
|
|
@ -200,6 +200,7 @@ void* taosArrayPop(SArray* pArray) {
|
|||
void* taosArrayGet(const SArray* pArray, size_t index) {
|
||||
if (NULL == pArray) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
uError("failed to return value from array of null ptr");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -449,7 +449,9 @@ int32_t tsDecompressDoubleImpAvx2(const char *input, const int32_t nelements, ch
|
|||
}
|
||||
return (int32_t)(out - output);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if __AVX512VL__
|
||||
// decode two timestamps in one loop.
|
||||
void tsDecompressTimestampAvx2(const char *const input, const int32_t nelements, char *const output, bool bigEndian) {
|
||||
int64_t *ostream = (int64_t *)output;
|
||||
|
@ -588,9 +590,7 @@ void tsDecompressTimestampAvx2(const char *const input, const int32_t nelements,
|
|||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if __AVX512VL__
|
||||
void tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output,
|
||||
bool UNUSED_PARAM(bigEndian)) {
|
||||
int64_t *ostream = (int64_t *)output;
|
||||
|
|
|
@ -317,7 +317,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CLOG_IS_NULL, "Transaction commitlog
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL, "Unable to establish connection While execute transaction and will continue in the background")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED, "Last Transaction not finished")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_SYNC_TIMEOUT, "Sync timeout While execute transaction and will continue in the background")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CTX_SWITCH, "Transaction context switch")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CTX_SWITCH, "Wrong transaction execution context")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CONFLICT_COMPACT, "Transaction not completed due to conflict with compact")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_UNKNOW_ERROR, "Unknown transaction error")
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ from frame.srvCtl import *
|
|||
from frame.caseBase import *
|
||||
from frame import *
|
||||
from frame.autogen import *
|
||||
from frame import epath
|
||||
# from frame.server.dnodes import *
|
||||
# from frame.server.cluster import *
|
||||
|
||||
|
@ -20,7 +21,9 @@ from frame.autogen import *
|
|||
class TDTestCase(TBase):
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
updatecfgDict = {'dDebugFlag':131}
|
||||
super(TDTestCase, self).init(conn, logSql, replicaVar=1, checkColName="c1")
|
||||
|
||||
self.valgrind = 0
|
||||
self.db = "test"
|
||||
self.stb = "meters"
|
||||
|
@ -50,9 +53,36 @@ class TDTestCase(TBase):
|
|||
tdSql.error("create encrypt_key '12345678abcdefghi'")
|
||||
tdSql.error("create database test ENCRYPT_ALGORITHM 'sm4'")
|
||||
|
||||
def recreate_dndoe_encrypt_key(self):
|
||||
"""
|
||||
Description: From the jira TS-5507, the encrypt key can be recreated.
|
||||
create:
|
||||
2024-09-23 created by Charles
|
||||
update:
|
||||
None
|
||||
"""
|
||||
# taosd path
|
||||
taosd_path = epath.binPath()
|
||||
tdLog.info(f"taosd_path: {taosd_path}")
|
||||
# dnode2 path
|
||||
dndoe2_path = tdDnodes.getDnodeDir(2)
|
||||
dnode2_data_path = os.sep.join([dndoe2_path, "data"])
|
||||
dnode2_cfg_path = os.sep.join([dndoe2_path, "cfg"])
|
||||
tdLog.info(f"dnode2_path: {dnode2_data_path}")
|
||||
# stop dnode2
|
||||
tdDnodes.stoptaosd(2)
|
||||
tdLog.info("stop dndoe2")
|
||||
# delete dndoe2 data
|
||||
cmd = f"rm -rf {dnode2_data_path}"
|
||||
os.system(cmd)
|
||||
# recreate the encrypt key for dnode2
|
||||
os.system(f"{os.sep.join([taosd_path, 'taosd'])} -y '1234567890' -c {dnode2_cfg_path}")
|
||||
tdLog.info("test case: recreate the encrypt key for dnode2 passed")
|
||||
|
||||
def run(self):
|
||||
self.create_encrypt_db_error()
|
||||
self.create_encrypt_db()
|
||||
self.recreate_dndoe_encrypt_key()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -18,6 +18,7 @@ import time
|
|||
import socket
|
||||
import json
|
||||
import toml
|
||||
import subprocess
|
||||
from frame.boundary import DataBoundary
|
||||
import taos
|
||||
from frame.log import *
|
||||
|
@ -1830,6 +1831,51 @@ class TDCom:
|
|||
if i == 1:
|
||||
self.record_history_ts = ts_value
|
||||
|
||||
def generate_query_result(self, inputfile, test_case):
|
||||
if not os.path.exists(inputfile):
|
||||
tdLog.exit(f"Input file '{inputfile}' does not exist.")
|
||||
else:
|
||||
self.query_result_file = f"./temp_{test_case}.result"
|
||||
os.system(f"taos -f {inputfile} | grep -v 'Query OK'|grep -v 'Copyright'| grep -v 'Welcome to the TDengine Command' > {self.query_result_file} ")
|
||||
return self.query_result_file
|
||||
|
||||
def compare_result_files(self, file1, file2):
|
||||
|
||||
try:
|
||||
# use subprocess.run to execute diff/fc commands
|
||||
# print(file1, file2)
|
||||
if platform.system().lower() != 'windows':
|
||||
cmd='diff'
|
||||
result = subprocess.run([cmd, "-u", "--color", file1, file2], text=True, capture_output=True)
|
||||
else:
|
||||
cmd='fc'
|
||||
result = subprocess.run([cmd, file1, file2], text=True, capture_output=True)
|
||||
# if result is not empty, print the differences and files name. Otherwise, the files are identical.
|
||||
if result.stdout:
|
||||
tdLog.debug(f"Differences between {file1} and {file2}")
|
||||
tdLog.notice(f"\r\n{result.stdout}")
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
except FileNotFoundError:
|
||||
tdLog.debug("The 'diff' command is not found. Please make sure it's installed and available in your PATH.")
|
||||
except Exception as e:
|
||||
tdLog.debug(f"An error occurred: {e}")
|
||||
|
||||
|
||||
def compare_testcase_result(self, inputfile,expected_file,test_case):
|
||||
test_reulst_file = self.generate_query_result(inputfile,test_case)
|
||||
|
||||
if self.compare_result_files(expected_file, test_reulst_file ):
|
||||
tdLog.info("Test passed: Result files are identical.")
|
||||
os.system(f"rm -f {test_reulst_file}")
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
tdLog.exit(f"{caller.lineno}(line:{caller.lineno}) failed: sqlfile is {inputfile}, expect_file:{expected_file} != reult_file:{test_reulst_file} ")
|
||||
|
||||
tdLog.exit("Test failed: Result files are different.")
|
||||
|
||||
|
||||
def is_json(msg):
|
||||
if isinstance(msg, str):
|
||||
try:
|
||||
|
@ -1864,4 +1910,6 @@ def dict2toml(in_dict: dict, file:str):
|
|||
with open(file, 'w') as f:
|
||||
toml.dump(in_dict, f)
|
||||
|
||||
|
||||
|
||||
tdCom = TDCom()
|
||||
|
|
|
@ -0,0 +1,114 @@
|
|||
|
||||
taos> select pi()
|
||||
pi() |
|
||||
============================
|
||||
3.141592653589793 |
|
||||
|
||||
taos> select pi() + 1
|
||||
pi() + 1 |
|
||||
============================
|
||||
4.141592653589793 |
|
||||
|
||||
taos> select pi() - 1
|
||||
pi() - 1 |
|
||||
============================
|
||||
2.141592653589793 |
|
||||
|
||||
taos> select pi() * 2
|
||||
pi() * 2 |
|
||||
============================
|
||||
6.283185307179586 |
|
||||
|
||||
taos> select pi() / 2
|
||||
pi() / 2 |
|
||||
============================
|
||||
1.570796326794897 |
|
||||
|
||||
taos> select pi() from ts_4893.meters limit 5
|
||||
pi() |
|
||||
============================
|
||||
3.141592653589793 |
|
||||
3.141592653589793 |
|
||||
3.141592653589793 |
|
||||
3.141592653589793 |
|
||||
3.141592653589793 |
|
||||
|
||||
taos> select pi() + 1 from ts_4893.meters limit 1
|
||||
pi() + 1 |
|
||||
============================
|
||||
4.141592653589793 |
|
||||
|
||||
taos> select pi() - 1 from ts_4893.meters limit 1
|
||||
pi() - 1 |
|
||||
============================
|
||||
2.141592653589793 |
|
||||
|
||||
taos> select pi() * 2 from ts_4893.meters limit 1
|
||||
pi() * 2 |
|
||||
============================
|
||||
6.283185307179586 |
|
||||
|
||||
taos> select pi() / 2 from ts_4893.meters limit 1
|
||||
pi() / 2 |
|
||||
============================
|
||||
1.570796326794897 |
|
||||
|
||||
taos> select pi() + pi() from ts_4893.meters limit 1
|
||||
pi() + pi() |
|
||||
============================
|
||||
6.283185307179586 |
|
||||
|
||||
taos> select pi() - pi() from ts_4893.meters limit 1
|
||||
pi() - pi() |
|
||||
============================
|
||||
0.000000000000000 |
|
||||
|
||||
taos> select pi() * pi() from ts_4893.meters limit 1
|
||||
pi() * pi() |
|
||||
============================
|
||||
9.869604401089358 |
|
||||
|
||||
taos> select pi() / pi() from ts_4893.meters limit 1
|
||||
pi() / pi() |
|
||||
============================
|
||||
1.000000000000000 |
|
||||
|
||||
taos> select pi() + id from ts_4893.meters order by ts limit 5
|
||||
pi() + id |
|
||||
============================
|
||||
3.141592653589793 |
|
||||
4.141592653589793 |
|
||||
5.141592653589793 |
|
||||
6.141592653589793 |
|
||||
7.141592653589793 |
|
||||
|
||||
taos> select abs(pi())
|
||||
abs(pi()) |
|
||||
============================
|
||||
3.141592653589793 |
|
||||
|
||||
taos> select pow(pi(), 2)
|
||||
pow(pi(), 2) |
|
||||
============================
|
||||
9.869604401089358 |
|
||||
|
||||
taos> select sqrt(pi())
|
||||
sqrt(pi()) |
|
||||
============================
|
||||
1.772453850905516 |
|
||||
|
||||
taos> select cast(pi() as int)
|
||||
cast(pi() as int) |
|
||||
====================
|
||||
3 |
|
||||
|
||||
taos> select pi()
|
||||
pi() |
|
||||
============================
|
||||
3.141592653589793 |
|
||||
|
||||
taos> select substring_index(null, '.', 2)
|
||||
substring_index(null, '.', 2) |
|
||||
================================
|
||||
NULL |
|
||||
|
Can't render this file because it has a wrong number of fields in line 90.
|
|
@ -1,20 +1,21 @@
|
|||
select pi();
|
||||
select pi() + 1;
|
||||
select pi() - 1;
|
||||
select pi() * 2;
|
||||
select pi() / 2;
|
||||
select pi() from ts_4893.meters limit 5;
|
||||
select pi() + 1 from ts_4893.meters limit 1;
|
||||
select pi() - 1 from ts_4893.meters limit 1;
|
||||
select pi() * 2 from ts_4893.meters limit 1;
|
||||
select pi() / 2 from ts_4893.meters limit 1;
|
||||
select pi() + pi() from ts_4893.meters limit 1;
|
||||
select pi() - pi() from ts_4893.meters limit 1;
|
||||
select pi() * pi() from ts_4893.meters limit 1;
|
||||
select pi() / pi() from ts_4893.meters limit 1;
|
||||
select pi() + id from ts_4893.meters order by ts limit 5;
|
||||
select abs(pi());
|
||||
select pow(pi(), 2);
|
||||
select sqrt(pi());
|
||||
select cast(pi() as int);
|
||||
select pi();
|
||||
select pi()
|
||||
select pi() + 1
|
||||
select pi() - 1
|
||||
select pi() * 2
|
||||
select pi() / 2
|
||||
select pi() from ts_4893.meters limit 5
|
||||
select pi() + 1 from ts_4893.meters limit 1
|
||||
select pi() - 1 from ts_4893.meters limit 1
|
||||
select pi() * 2 from ts_4893.meters limit 1
|
||||
select pi() / 2 from ts_4893.meters limit 1
|
||||
select pi() + pi() from ts_4893.meters limit 1
|
||||
select pi() - pi() from ts_4893.meters limit 1
|
||||
select pi() * pi() from ts_4893.meters limit 1
|
||||
select pi() / pi() from ts_4893.meters limit 1
|
||||
select pi() + id from ts_4893.meters order by ts limit 5
|
||||
select abs(pi())
|
||||
select pow(pi(), 2)
|
||||
select sqrt(pi())
|
||||
select cast(pi() as int)
|
||||
select pi()
|
||||
select substring_index(null, '.', 2)
|
||||
|
|
|
@ -17,14 +17,15 @@ import random
|
|||
|
||||
import taos
|
||||
import frame
|
||||
import frame.etool
|
||||
|
||||
from frame.etool import *
|
||||
from frame.log import *
|
||||
from frame.cases import *
|
||||
from frame.sql import *
|
||||
from frame.caseBase import *
|
||||
from frame import *
|
||||
|
||||
from frame import etool
|
||||
from frame.common import *
|
||||
|
||||
class TDTestCase(TBase):
|
||||
updatecfgDict = {
|
||||
|
@ -84,8 +85,16 @@ class TDTestCase(TBase):
|
|||
tdSql.error(err_statement)
|
||||
err_statement = ''
|
||||
|
||||
def test_normal_query_new(self, testCase):
|
||||
# read sql from .sql file and execute
|
||||
tdLog.info(f"test normal query.")
|
||||
self.sqlFile = etool.curFile(__file__, f"in/{testCase}.in")
|
||||
self.ansFile = etool.curFile(__file__, f"ans/{testCase}_1.csv")
|
||||
|
||||
tdCom.compare_testcase_result(self.sqlFile, self.ansFile, testCase)
|
||||
|
||||
def test_pi(self):
|
||||
self.test_normal_query("pi")
|
||||
self.test_normal_query_new("pi")
|
||||
|
||||
def test_round(self):
|
||||
self.test_normal_query("round")
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
# army-test
|
||||
#
|
||||
,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2
|
||||
,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py
|
||||
,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py -N 3 -M 3
|
||||
,,n,army,python3 ./test.py -f storage/s3/s3Basic.py -N 3
|
||||
,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2
|
||||
,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py
|
||||
|
|
|
@ -225,6 +225,7 @@ python3 test.py -f query/distinctOneColTb.py
|
|||
python3 ./test.py -f query/filter.py
|
||||
python3 ./test.py -f query/filterCombo.py
|
||||
python3 ./test.py -f query/queryNormal.py
|
||||
python3 ./test.py -f query/not.py
|
||||
python3 ./test.py -f query/queryError.py
|
||||
python3 ./test.py -f query/filterAllIntTypes.py
|
||||
python3 ./test.py -f query/filterFloatAndDouble.py
|
||||
|
|
|
@ -139,6 +139,7 @@ python3 ./test.py -f query/querySort.py
|
|||
python3 ./test.py -f query/queryJoin.py
|
||||
python3 ./test.py -f query/filterCombo.py
|
||||
python3 ./test.py -f query/queryNormal.py
|
||||
python3 ./test.py -f query/not.py
|
||||
python3 ./test.py -f query/select_last_crash.py
|
||||
python3 ./test.py -f query/queryNullValueTest.py
|
||||
python3 ./test.py -f query/queryInsertValue.py
|
||||
|
|
|
@ -133,4 +133,17 @@ if $data13 != -111 then
|
|||
goto loop1
|
||||
endi
|
||||
|
||||
print step 2====================
|
||||
|
||||
sql create database test vgroups 1 ;
|
||||
sql use test;
|
||||
sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int);
|
||||
sql create table t1 using st tags(1,1,1);
|
||||
sql create table t2 using st tags(2,2,2);
|
||||
|
||||
sql_error create stream streams1 trigger max_delay 4000a ignore update 0 ignore expired 0 into streamtST1 as select _wstart, count(*) from st interval(5s);
|
||||
sql_error create stream streams2 trigger max_delay 4s ignore update 0 ignore expired 0 into streamtST2 as select _wstart, count(*) from st interval(5s);
|
||||
sql create stream streams3 trigger max_delay 5000a ignore update 0 ignore expired 0 into streamtST3 as select _wstart, count(*) from st interval(5s);
|
||||
sql create stream streams4 trigger max_delay 5s ignore update 0 ignore expired 0 into streamtST4 as select _wstart, count(*) from st interval(5s);
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -48,15 +48,15 @@ sql create table t1 using st tags(1);
|
|||
sql create table t2 using st tags(2);
|
||||
|
||||
sql create stream stream2 trigger window_close into streamt2 as select _wstart, sum(a) from st interval(10s);
|
||||
sql create stream stream3 trigger max_delay 1s into streamt3 as select _wstart, sum(a) from st interval(10s);
|
||||
sql create stream stream3 trigger max_delay 5s into streamt3 as select _wstart, sum(a) from st interval(10s);
|
||||
sql create stream stream4 trigger window_close into streamt4 as select _wstart, sum(a) from t1 interval(10s);
|
||||
sql create stream stream5 trigger max_delay 1s into streamt5 as select _wstart, sum(a) from t1 interval(10s);
|
||||
sql create stream stream5 trigger max_delay 5s into streamt5 as select _wstart, sum(a) from t1 interval(10s);
|
||||
sql create stream stream6 trigger window_close into streamt6 as select _wstart, sum(a) from st session(ts, 10s);
|
||||
sql create stream stream7 trigger max_delay 1s into streamt7 as select _wstart, sum(a) from st session(ts, 10s);
|
||||
sql create stream stream7 trigger max_delay 5s into streamt7 as select _wstart, sum(a) from st session(ts, 10s);
|
||||
sql create stream stream8 trigger window_close into streamt8 as select _wstart, sum(a) from t1 session(ts, 10s);
|
||||
sql create stream stream9 trigger max_delay 1s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s);
|
||||
sql create stream stream9 trigger max_delay 5s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s);
|
||||
sql create stream stream10 trigger window_close into streamt10 as select _wstart, sum(a) from t1 state_window(b);
|
||||
sql create stream stream11 trigger max_delay 1s into streamt11 as select _wstart, sum(a) from t1 state_window(b);
|
||||
sql create stream stream11 trigger max_delay 5s into streamt11 as select _wstart, sum(a) from t1 state_window(b);
|
||||
|
||||
run tsim/stream/checkTaskStatus.sim
|
||||
|
||||
|
@ -138,12 +138,12 @@ if $rows != 2 then
|
|||
goto loop1
|
||||
endi
|
||||
|
||||
print step 1 max delay 2s
|
||||
print step 1 max delay 5s
|
||||
sql create database test3 vgroups 4;
|
||||
sql use test3;
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
|
||||
sql create stream stream13 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 interval(10s);
|
||||
sql create stream stream13 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 interval(10s);
|
||||
|
||||
run tsim/stream/checkTaskStatus.sim
|
||||
|
||||
|
@ -172,8 +172,8 @@ $now02 = $data02
|
|||
$now12 = $data12
|
||||
|
||||
|
||||
print step1 max delay 2s......... sleep 3s
|
||||
sleep 3000
|
||||
print step1 max delay 5s......... sleep 6s
|
||||
sleep 6000
|
||||
|
||||
sql select * from streamt13;
|
||||
|
||||
|
@ -188,7 +188,7 @@ if $data12 != $now12 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print step 2 max delay 2s
|
||||
print step 2 max delay 5s
|
||||
|
||||
sql create database test4 vgroups 4;
|
||||
sql use test4;
|
||||
|
@ -197,7 +197,7 @@ sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,t
|
|||
sql create table t1 using st tags(1,1,1);
|
||||
sql create table t2 using st tags(2,2,2);
|
||||
|
||||
sql create stream stream14 trigger max_delay 2s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s);
|
||||
sql create stream stream14 trigger max_delay 5s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s);
|
||||
|
||||
run tsim/stream/checkTaskStatus.sim
|
||||
|
||||
|
@ -234,8 +234,8 @@ $now12 = $data12
|
|||
$now22 = $data22
|
||||
$now32 = $data32
|
||||
|
||||
print step2 max delay 2s......... sleep 3s
|
||||
sleep 3000
|
||||
print step2 max delay 5s......... sleep 6s
|
||||
sleep 6000
|
||||
|
||||
sql select * from streamt14 order by 2;
|
||||
print $data00 $data01 $data02
|
||||
|
@ -264,8 +264,8 @@ if $data32 != $now32 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print step2 max delay 2s......... sleep 3s
|
||||
sleep 3000
|
||||
print step2 max delay 5s......... sleep 6s
|
||||
sleep 6000
|
||||
|
||||
sql select * from streamt14 order by 2;
|
||||
print $data00 $data01 $data02
|
||||
|
@ -294,12 +294,12 @@ if $data32 != $now32 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print step 2 max delay 2s
|
||||
print step 2 max delay 5s
|
||||
sql create database test15 vgroups 4;
|
||||
sql use test15;
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
|
||||
sql create stream stream15 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s);
|
||||
sql create stream stream15 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s);
|
||||
|
||||
run tsim/stream/checkTaskStatus.sim
|
||||
|
||||
|
@ -328,8 +328,8 @@ $now02 = $data02
|
|||
$now12 = $data12
|
||||
|
||||
|
||||
print step1 max delay 2s......... sleep 3s
|
||||
sleep 3000
|
||||
print step1 max delay 5s......... sleep 6s
|
||||
sleep 6000
|
||||
|
||||
sql select * from streamt13;
|
||||
|
||||
|
@ -344,8 +344,8 @@ if $data12 != $now12 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print step1 max delay 2s......... sleep 3s
|
||||
sleep 3000
|
||||
print step1 max delay 5s......... sleep 6s
|
||||
sleep 6000
|
||||
|
||||
sql select * from streamt13;
|
||||
|
||||
|
@ -362,12 +362,12 @@ endi
|
|||
|
||||
print session max delay over
|
||||
|
||||
print step 3 max delay 2s
|
||||
print step 3 max delay 5s
|
||||
sql create database test16 vgroups 4;
|
||||
sql use test16;
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
|
||||
sql create stream stream16 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 state_window(a);
|
||||
sql create stream stream16 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 state_window(a);
|
||||
|
||||
run tsim/stream/checkTaskStatus.sim
|
||||
|
||||
|
@ -396,8 +396,8 @@ $now02 = $data02
|
|||
$now12 = $data12
|
||||
|
||||
|
||||
print step1 max delay 2s......... sleep 3s
|
||||
sleep 3000
|
||||
print step1 max delay 5s......... sleep 6s
|
||||
sleep 6000
|
||||
|
||||
sql select * from streamt13;
|
||||
|
||||
|
@ -412,8 +412,8 @@ if $data12 != $now12 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print step1 max delay 2s......... sleep 3s
|
||||
sleep 3000
|
||||
print step1 max delay 5s......... sleep 6s
|
||||
sleep 6000
|
||||
|
||||
sql select * from streamt13;
|
||||
|
||||
|
@ -430,12 +430,12 @@ endi
|
|||
|
||||
print state max delay over
|
||||
|
||||
print step 4 max delay 2s
|
||||
print step 4 max delay 5s
|
||||
sql create database test17 vgroups 4;
|
||||
sql use test17;
|
||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||
|
||||
sql create stream stream17 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9;
|
||||
sql create stream stream17 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9;
|
||||
|
||||
run tsim/stream/checkTaskStatus.sim
|
||||
|
||||
|
@ -467,8 +467,8 @@ $now02 = $data02
|
|||
$now12 = $data12
|
||||
|
||||
|
||||
print step1 max delay 2s......... sleep 3s
|
||||
sleep 3000
|
||||
print step1 max delay 5s......... sleep 6s
|
||||
sleep 6000
|
||||
|
||||
sql select * from streamt13;
|
||||
|
||||
|
@ -483,8 +483,8 @@ if $data12 != $now12 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print step1 max delay 2s......... sleep 3s
|
||||
sleep 3000
|
||||
print step1 max delay 5s......... sleep 6s
|
||||
sleep 6000
|
||||
|
||||
sql select * from streamt13;
|
||||
|
||||
|
|
|
@ -907,7 +907,7 @@ class TDTestCase:
|
|||
|
||||
## {. . .}
|
||||
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
|
||||
tdSql.checkRows(13)
|
||||
tdSql.checkRows(12)
|
||||
tdSql.checkData(0, 0, 5)
|
||||
tdSql.checkData(1, 0, 5)
|
||||
tdSql.checkData(2, 0, 10)
|
||||
|
@ -920,7 +920,6 @@ class TDTestCase:
|
|||
tdSql.checkData(9, 0, 15)
|
||||
tdSql.checkData(10, 0, 15)
|
||||
tdSql.checkData(11, 0, 15)
|
||||
tdSql.checkData(12, 0, None)
|
||||
|
||||
## {} ...
|
||||
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(next)")
|
||||
|
@ -958,12 +957,10 @@ class TDTestCase:
|
|||
|
||||
## ..{.}
|
||||
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)")
|
||||
tdSql.checkRows(5)
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0, 0, 15)
|
||||
tdSql.checkData(1, 0, 15)
|
||||
tdSql.checkData(2, 0, 15)
|
||||
tdSql.checkData(3, 0, None)
|
||||
tdSql.checkData(4, 0, None)
|
||||
|
||||
## ... {}
|
||||
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)")
|
||||
|
@ -1275,7 +1272,7 @@ class TDTestCase:
|
|||
tdSql.checkData(8, 1, True)
|
||||
|
||||
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
|
||||
tdSql.checkRows(13)
|
||||
tdSql.checkRows(12)
|
||||
tdSql.checkCols(3)
|
||||
|
||||
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
|
||||
|
@ -1290,7 +1287,6 @@ class TDTestCase:
|
|||
tdSql.checkData(9, 0, '2020-02-01 00:00:13.000')
|
||||
tdSql.checkData(10, 0, '2020-02-01 00:00:14.000')
|
||||
tdSql.checkData(11, 0, '2020-02-01 00:00:15.000')
|
||||
tdSql.checkData(12, 0, '2020-02-01 00:00:16.000')
|
||||
|
||||
tdSql.checkData(0, 1, True)
|
||||
tdSql.checkData(1, 1, False)
|
||||
|
@ -1304,7 +1300,6 @@ class TDTestCase:
|
|||
tdSql.checkData(9, 1, True)
|
||||
tdSql.checkData(10, 1, True)
|
||||
tdSql.checkData(11, 1, False)
|
||||
tdSql.checkData(12, 1, True)
|
||||
|
||||
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)")
|
||||
tdSql.checkRows(6)
|
||||
|
@ -1682,13 +1677,9 @@ class TDTestCase:
|
|||
|
||||
## | . | { | .} |
|
||||
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(next)")
|
||||
tdSql.checkRows(6)
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, 15)
|
||||
tdSql.checkData(1, 0, 15)
|
||||
tdSql.checkData(2, 0, None)
|
||||
tdSql.checkData(3, 0, None)
|
||||
tdSql.checkData(4, 0, None)
|
||||
tdSql.checkData(5, 0, None)
|
||||
|
||||
# test fill linear
|
||||
|
||||
|
@ -2741,7 +2732,7 @@ class TDTestCase:
|
|||
tdSql.checkData(4, i, 15)
|
||||
|
||||
tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)")
|
||||
tdSql.checkRows(5)
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkCols(4)
|
||||
|
||||
for i in range (tdSql.queryCols):
|
||||
|
@ -2837,7 +2828,7 @@ class TDTestCase:
|
|||
|
||||
# test fill next
|
||||
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(next)")
|
||||
tdSql.checkRows(19)
|
||||
tdSql.checkRows(18)
|
||||
tdSql.checkCols(3)
|
||||
|
||||
tdSql.checkData(0, 0, '2020-02-02 00:00:00.000')
|
||||
|
@ -2860,7 +2851,6 @@ class TDTestCase:
|
|||
tdSql.checkData(15, 2, None)
|
||||
tdSql.checkData(16, 2, None)
|
||||
tdSql.checkData(17, 2, None)
|
||||
tdSql.checkData(18, 2, None)
|
||||
|
||||
tdSql.checkData(17, 0, '2020-02-02 00:00:17.000')
|
||||
|
||||
|
@ -3091,7 +3081,7 @@ class TDTestCase:
|
|||
|
||||
# test fill linear
|
||||
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(linear)")
|
||||
tdSql.checkRows(18)
|
||||
tdSql.checkRows(17)
|
||||
tdSql.checkCols(3)
|
||||
|
||||
tdSql.checkData(0, 0, '2020-02-02 00:00:01.000')
|
||||
|
@ -3113,9 +3103,8 @@ class TDTestCase:
|
|||
tdSql.checkData(14, 2, None)
|
||||
tdSql.checkData(15, 2, None)
|
||||
tdSql.checkData(16, 2, None)
|
||||
tdSql.checkData(17, 2, None)
|
||||
|
||||
tdSql.checkData(17, 0, '2020-02-02 00:00:18.000')
|
||||
tdSql.checkData(16, 0, '2020-02-02 00:00:17.000')
|
||||
|
||||
tdLog.printNoPrefix("==========step13:test error cases")
|
||||
|
||||
|
@ -3231,7 +3220,7 @@ class TDTestCase:
|
|||
tdSql.checkData(17, 1, True)
|
||||
|
||||
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
|
||||
tdSql.checkRows(19)
|
||||
tdSql.checkRows(18)
|
||||
|
||||
tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
|
||||
tdSql.checkData(0, 1, True)
|
||||
|
@ -3254,12 +3243,9 @@ class TDTestCase:
|
|||
tdSql.checkData(15, 2, 15)
|
||||
tdSql.checkData(16, 2, 17)
|
||||
tdSql.checkData(17, 2, 17)
|
||||
tdSql.checkData(18, 2, None)
|
||||
|
||||
tdSql.checkData(17, 0, '2020-02-01 00:00:17.000')
|
||||
tdSql.checkData(17, 1, False)
|
||||
tdSql.checkData(18, 0, '2020-02-01 00:00:18.000')
|
||||
tdSql.checkData(18, 1, True)
|
||||
|
||||
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
|
||||
tdSql.checkRows(17)
|
||||
|
@ -3376,24 +3362,24 @@ class TDTestCase:
|
|||
|
||||
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
|
||||
|
||||
tdSql.checkRows(57)
|
||||
for i in range(0, 19):
|
||||
tdSql.checkRows(48)
|
||||
for i in range(0, 14):
|
||||
tdSql.checkData(i, 0, 'ctb1')
|
||||
|
||||
for i in range(19, 38):
|
||||
for i in range(14, 30):
|
||||
tdSql.checkData(i, 0, 'ctb2')
|
||||
|
||||
for i in range(38, 57):
|
||||
for i in range(30, 48):
|
||||
tdSql.checkData(i, 0, 'ctb3')
|
||||
|
||||
tdSql.checkData(0, 1, '2020-02-01 00:00:00.000')
|
||||
tdSql.checkData(18, 1, '2020-02-01 00:00:18.000')
|
||||
tdSql.checkData(13, 1, '2020-02-01 00:00:13.000')
|
||||
|
||||
tdSql.checkData(19, 1, '2020-02-01 00:00:00.000')
|
||||
tdSql.checkData(37, 1, '2020-02-01 00:00:18.000')
|
||||
tdSql.checkData(14, 1, '2020-02-01 00:00:00.000')
|
||||
tdSql.checkData(29, 1, '2020-02-01 00:00:15.000')
|
||||
|
||||
tdSql.checkData(38, 1, '2020-02-01 00:00:00.000')
|
||||
tdSql.checkData(56, 1, '2020-02-01 00:00:18.000')
|
||||
tdSql.checkData(30, 1, '2020-02-01 00:00:00.000')
|
||||
tdSql.checkData(47, 1, '2020-02-01 00:00:17.000')
|
||||
|
||||
for i in range(0, 2):
|
||||
tdSql.checkData(i, 3, 1)
|
||||
|
@ -3404,33 +3390,24 @@ class TDTestCase:
|
|||
for i in range(8, 14):
|
||||
tdSql.checkData(i, 3, 13)
|
||||
|
||||
for i in range(14, 19):
|
||||
tdSql.checkData(i, 3, None)
|
||||
|
||||
for i in range(19, 23):
|
||||
for i in range(14, 18):
|
||||
tdSql.checkData(i, 3, 3)
|
||||
|
||||
for i in range(23, 29):
|
||||
for i in range(18, 24):
|
||||
tdSql.checkData(i, 3, 9)
|
||||
|
||||
for i in range(29, 35):
|
||||
for i in range(24, 30):
|
||||
tdSql.checkData(i, 3, 15)
|
||||
|
||||
for i in range(35, 38):
|
||||
tdSql.checkData(i, 3, None)
|
||||
|
||||
for i in range(38, 44):
|
||||
for i in range(30, 36):
|
||||
tdSql.checkData(i, 3, 5)
|
||||
|
||||
for i in range(44, 50):
|
||||
for i in range(36, 42):
|
||||
tdSql.checkData(i, 3, 11)
|
||||
|
||||
for i in range(50, 56):
|
||||
for i in range(42, 48):
|
||||
tdSql.checkData(i, 3, 17)
|
||||
|
||||
for i in range(56, 57):
|
||||
tdSql.checkData(i, 3, None)
|
||||
|
||||
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
|
||||
|
||||
tdSql.checkRows(39)
|
||||
|
@ -3473,7 +3450,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(90)
|
||||
|
||||
tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
|
||||
tdSql.checkRows(171)
|
||||
tdSql.checkRows(90)
|
||||
|
||||
tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
|
||||
tdSql.checkRows(9)
|
||||
|
@ -3490,7 +3467,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(48)
|
||||
|
||||
tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
|
||||
tdSql.checkRows(57)
|
||||
tdSql.checkRows(48)
|
||||
|
||||
tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
|
||||
tdSql.checkRows(39)
|
||||
|
@ -4386,7 +4363,7 @@ class TDTestCase:
|
|||
|
||||
tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)")
|
||||
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkRows(9)
|
||||
tdSql.checkData(0, 1, False)
|
||||
tdSql.checkData(1, 1, True)
|
||||
tdSql.checkData(2, 1, False)
|
||||
|
@ -4396,8 +4373,6 @@ class TDTestCase:
|
|||
tdSql.checkData(6, 1, True)
|
||||
tdSql.checkData(7, 1, False)
|
||||
tdSql.checkData(8, 1, False)
|
||||
tdSql.checkData(9, 1, True)
|
||||
tdSql.checkData(10, 1, True)
|
||||
|
||||
tdSql.checkData(0, 2, 1)
|
||||
tdSql.checkData(1, 2, 3)
|
||||
|
@ -4408,13 +4383,11 @@ class TDTestCase:
|
|||
tdSql.checkData(6, 2, 8)
|
||||
tdSql.checkData(7, 2, 8)
|
||||
tdSql.checkData(8, 2, 9)
|
||||
tdSql.checkData(9, 2, None)
|
||||
tdSql.checkData(10, 2, None)
|
||||
|
||||
|
||||
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)")
|
||||
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkRows(9)
|
||||
tdSql.checkData(0, 1, False)
|
||||
tdSql.checkData(1, 1, True)
|
||||
tdSql.checkData(2, 1, False)
|
||||
|
@ -4424,9 +4397,6 @@ class TDTestCase:
|
|||
tdSql.checkData(6, 1, True)
|
||||
tdSql.checkData(7, 1, False)
|
||||
tdSql.checkData(8, 1, False)
|
||||
tdSql.checkData(9, 1, True)
|
||||
tdSql.checkData(10, 1, True)
|
||||
|
||||
|
||||
tdSql.checkData(0, 2, 1)
|
||||
tdSql.checkData(1, 2, 3)
|
||||
|
@ -4437,8 +4407,6 @@ class TDTestCase:
|
|||
tdSql.checkData(6, 2, 8)
|
||||
tdSql.checkData(7, 2, 8)
|
||||
tdSql.checkData(8, 2, 9)
|
||||
tdSql.checkData(9, 2, None)
|
||||
tdSql.checkData(10, 2, None)
|
||||
|
||||
# super table
|
||||
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
|
||||
|
@ -4475,7 +4443,7 @@ class TDTestCase:
|
|||
|
||||
tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
|
||||
|
||||
tdSql.checkRows(9)
|
||||
tdSql.checkRows(8)
|
||||
tdSql.checkData(0, 1, False)
|
||||
tdSql.checkData(1, 1, True)
|
||||
tdSql.checkData(2, 1, True)
|
||||
|
@ -4484,7 +4452,6 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 1, True)
|
||||
tdSql.checkData(6, 1, False)
|
||||
tdSql.checkData(7, 1, False)
|
||||
tdSql.checkData(8, 1, True)
|
||||
|
||||
tdSql.checkData(0, 2, 1)
|
||||
tdSql.checkData(1, 2, 9)
|
||||
|
@ -4494,12 +4461,11 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 2, 13)
|
||||
tdSql.checkData(6, 2, 13)
|
||||
tdSql.checkData(7, 2, 15)
|
||||
tdSql.checkData(8, 2, None)
|
||||
|
||||
|
||||
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
|
||||
|
||||
tdSql.checkRows(9)
|
||||
tdSql.checkRows(8)
|
||||
tdSql.checkData(0, 1, False)
|
||||
tdSql.checkData(1, 1, True)
|
||||
tdSql.checkData(2, 1, True)
|
||||
|
@ -4508,7 +4474,6 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 1, True)
|
||||
tdSql.checkData(6, 1, False)
|
||||
tdSql.checkData(7, 1, False)
|
||||
tdSql.checkData(8, 1, True)
|
||||
|
||||
tdSql.checkData(0, 2, 1)
|
||||
tdSql.checkData(1, 2, 9)
|
||||
|
@ -4518,37 +4483,36 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 2, 13)
|
||||
tdSql.checkData(6, 2, 13)
|
||||
tdSql.checkData(7, 2, 15)
|
||||
tdSql.checkData(8, 2, None)
|
||||
|
||||
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
|
||||
|
||||
tdSql.checkRows(18)
|
||||
for i in range(0, 9):
|
||||
tdSql.checkRows(15)
|
||||
for i in range(0, 7):
|
||||
tdSql.checkData(i, 0, 'ctb1_null')
|
||||
|
||||
for i in range(9, 18):
|
||||
for i in range(7, 15):
|
||||
tdSql.checkData(i, 0, 'ctb2_null')
|
||||
|
||||
tdSql.checkData(0, 1, '2020-02-01 00:00:01.000')
|
||||
tdSql.checkData(8, 1, '2020-02-01 00:00:17.000')
|
||||
tdSql.checkData(6, 1, '2020-02-01 00:00:13.000')
|
||||
|
||||
tdSql.checkData(9, 1, '2020-02-01 00:00:01.000')
|
||||
tdSql.checkData(17, 1, '2020-02-01 00:00:17.000')
|
||||
tdSql.checkData(7, 1, '2020-02-01 00:00:01.000')
|
||||
tdSql.checkData(14, 1, '2020-02-01 00:00:15.000')
|
||||
|
||||
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
|
||||
|
||||
tdSql.checkRows(18)
|
||||
for i in range(0, 9):
|
||||
tdSql.checkRows(15)
|
||||
for i in range(0, 7):
|
||||
tdSql.checkData(i, 0, 'ctb1_null')
|
||||
|
||||
for i in range(9, 18):
|
||||
for i in range(7, 15):
|
||||
tdSql.checkData(i, 0, 'ctb2_null')
|
||||
|
||||
tdSql.checkData(0, 1, '2020-02-01 00:00:01.000')
|
||||
tdSql.checkData(8, 1, '2020-02-01 00:00:17.000')
|
||||
tdSql.checkData(6, 1, '2020-02-01 00:00:13.000')
|
||||
|
||||
tdSql.checkData(9, 1, '2020-02-01 00:00:01.000')
|
||||
tdSql.checkData(17, 1, '2020-02-01 00:00:17.000')
|
||||
tdSql.checkData(7, 1, '2020-02-01 00:00:01.000')
|
||||
tdSql.checkData(14, 1, '2020-02-01 00:00:15.000')
|
||||
|
||||
# fill linear
|
||||
# normal table
|
||||
|
|
|
@ -0,0 +1,132 @@
|
|||
from wsgiref.headers import tspecials
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import tdCom
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.dbname = "db"
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def notConditionTest(self):
|
||||
dbname = "nottest"
|
||||
stbname = "st1"
|
||||
|
||||
tdsql = tdCom.newTdSql()
|
||||
tdsql.execute(f"create database if not exists {dbname}")
|
||||
|
||||
stype = ["INT", "INT UNSIGNED", "BIGINT", "BIGINT UNSIGNED", "DOUBLE", "FLOAT", "SMALLINT", "SMALLINT UNSIGNED", "TINYINT", "TINYINT UNSIGNED"]
|
||||
|
||||
for type_name in stype:
|
||||
tdsql.execute(f"drop table if exists {dbname}.{stbname}")
|
||||
tdsql.execute(f"create table if not exists {dbname}.{stbname} (ts timestamp, v1 {type_name}) tags(t1 {type_name})")
|
||||
tdsql.execute(f"insert into {dbname}.sub_1 using {dbname}.{stbname} tags(1) values({self.ts}, 10)")
|
||||
tdsql.execute(f"insert into {dbname}.sub_2 using {dbname}.{stbname} tags(2) values({self.ts + 1000}, 20)")
|
||||
tdsql.execute(f"insert into {dbname}.sub_3 using {dbname}.{stbname} tags(3) values({self.ts + 2000}, 30)")
|
||||
|
||||
# Test case 1: NOT IN
|
||||
tdsql.query(f"select t1, * from {dbname}.{stbname} where t1 not in (1, 2) order by t1")
|
||||
tdsql.checkRows(1)
|
||||
tdsql.checkData(0, 0, 3)
|
||||
|
||||
# Test case 2: NOT BETWEEN
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where v1 not between 10 and 20 order by t1")
|
||||
tdsql.checkRows(1)
|
||||
tdsql.checkData(0, 1, 30)
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not(v1 not between 10 and 20) order by t1")
|
||||
tdsql.checkRows(2)
|
||||
|
||||
# Test case 4: NOT EQUAL
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where v1 != 20 order by t1")
|
||||
tdsql.checkRows(2)
|
||||
tdsql.checkData(0, 1, 10)
|
||||
tdsql.checkData(1, 1, 30)
|
||||
|
||||
# Test case 8: NOT (v1 < 20 OR v1 > 30)
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 > 30) order by t1")
|
||||
tdsql.checkRows(2)
|
||||
tdsql.checkData(0, 1, 20)
|
||||
tdsql.checkData(1, 1, 30)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 >= 30) order by t1")
|
||||
tdsql.checkRows(1)
|
||||
|
||||
# Test case 9: NOT (t1 != 1)
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) order by t1")
|
||||
tdsql.checkRows(1)
|
||||
tdsql.checkData(0, 1, 10)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where (t1 != 1) or not (v1 == 20) order by t1")
|
||||
tdsql.checkRows(3)
|
||||
tdsql.checkData(0, 1, 10)
|
||||
tdsql.checkData(1, 1, 20)
|
||||
tdsql.checkData(2, 1, 30)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not((t1 != 1) or not (v1 == 20)) order by t1")
|
||||
tdsql.checkRows(0)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 20) order by t1")
|
||||
tdsql.checkRows(0)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1) and not (v1 != 20)) order by t1")
|
||||
tdsql.checkRows(3)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 10) order by t1")
|
||||
tdsql.checkRows(1)
|
||||
tdsql.checkData(0, 1, 10)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 > 2) order by t1")
|
||||
tdsql.checkRows(2)
|
||||
tdsql.checkData(0, 1, 10)
|
||||
tdsql.checkData(1, 1, 20)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 == 2) order by t1")
|
||||
tdsql.checkRows(2)
|
||||
tdsql.checkData(0, 1, 10)
|
||||
tdsql.checkData(1, 1, 30)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not (v1 > 10 and v1 < 30) order by t1")
|
||||
tdsql.checkRows(2)
|
||||
tdsql.checkData(0, 1, 10)
|
||||
tdsql.checkData(1, 1, 30)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 > 30)) order by t1")
|
||||
tdsql.checkRows(1)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 >= 30)) order by t1")
|
||||
tdsql.checkRows(2)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1)) order by t1")
|
||||
tdsql.checkRows(2)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 > 2)) order by t1")
|
||||
tdsql.checkRows(1)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 == 2)) order by t1")
|
||||
tdsql.checkRows(1)
|
||||
|
||||
tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 > 10 and v1 < 30)) order by t1")
|
||||
tdsql.checkRows(1)
|
||||
|
||||
def run(self):
|
||||
dbname = "db"
|
||||
tdSql.prepare()
|
||||
|
||||
self.notConditionTest()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,66 @@
|
|||
import random
|
||||
import string
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
from util.sqlset import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'slowLogThresholdTest': ''}
|
||||
updatecfgDict["slowLogThresholdTest"] = 0
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def getPath(self, tool="taosBenchmark"):
|
||||
if (platform.system().lower() == 'windows'):
|
||||
tool = tool + ".exe"
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
paths = []
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ((tool) in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
paths.append(os.path.join(root, tool))
|
||||
break
|
||||
if (len(paths) == 0):
|
||||
tdLog.exit("taosBenchmark not found!")
|
||||
return
|
||||
else:
|
||||
tdLog.info("taosBenchmark found in %s" % paths[0])
|
||||
return paths[0]
|
||||
|
||||
def taosBenchmark(self, param):
|
||||
binPath = self.getPath()
|
||||
cmd = f"{binPath} {param}"
|
||||
tdLog.info(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
def testSlowQuery(self):
|
||||
self.taosBenchmark(" -d db -t 2 -v 2 -n 1000000 -y")
|
||||
sql = "select count(*) from db.meters"
|
||||
for i in range(10):
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0, 0, 2 * 1000000)
|
||||
|
||||
def run(self):
|
||||
self.testSlowQuery()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -105,6 +105,10 @@ class TDTestCase:
|
|||
tdSql.query(f"select * from ts3724.`stb2.`")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select * from ts5528.device_log_yuelan_cs1")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, '{"deviceId":"星宇公司-861701069493741","headers":{"_uid":"4e3599eacd62834995c77b38ad95f88d","creatorId":"1199596756811550720","deviceNmae":"861701069493741","productId":"yuelan","productName":"悦蓝cat1穿戴设备"},"messageType":"REPORT_PROPERTY","properties":{"lat":35.265527067449185,"lng":118.49713144245987,"location":"118.49713144245987,35.265527067449185"},"timestamp":1728719963230}')
|
||||
tdSql.checkData(1, 1, '{"deviceId":"星宇公司-861701069065507","headers":{"_uid":"9045d6b78b4ffaf1e2d244e912ebbff8","creatorId":"1199596756811550720","deviceNmae":"861701069065507","productId":"yuelan","productName":"悦蓝cat1穿戴设备"},"messageType":"REPORT_PROPERTY","properties":{"lat":36.788241914043425,"lng":119.15042325460891,"location":"119.15042325460891,36.788241914043425"},"timestamp":1728719964105}')
|
||||
# tdSql.query(f"select * from td24559.stb order by _ts")
|
||||
# tdSql.checkRows(4)
|
||||
# tdSql.checkData(0, 2, "POINT (4.343000 89.342000)")
|
||||
|
|
|
@ -24,45 +24,6 @@ class TDTestCase:
|
|||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
# def consume_TS_4674_Test(self):
|
||||
#
|
||||
# os.system("nohup taosBenchmark -y -B 1 -t 4 -S 1000 -n 1000000 -i 1000 -v 1 -a 3 > /dev/null 2>&1 &")
|
||||
# time.sleep()
|
||||
# tdSql.execute(f'create topic topic_all with meta as database test')
|
||||
# consumer_dict = {
|
||||
# "group.id": "g1",
|
||||
# "td.connect.user": "root",
|
||||
# "td.connect.pass": "taosdata",
|
||||
# "auto.offset.reset": "earliest",
|
||||
# }
|
||||
# consumer = Consumer(consumer_dict)
|
||||
#
|
||||
# try:
|
||||
# consumer.subscribe(["topic_all"])
|
||||
# except TmqError:
|
||||
# tdLog.exit(f"subscribe error")
|
||||
#
|
||||
# try:
|
||||
# while True:
|
||||
# res = consumer.poll(5)
|
||||
# if not res:
|
||||
# print(f"null")
|
||||
# continue
|
||||
# val = res.value()
|
||||
# if val is None:
|
||||
# print(f"null")
|
||||
# continue
|
||||
# cnt = 0;
|
||||
# for block in val:
|
||||
# cnt += len(block.fetchall())
|
||||
#
|
||||
# print(f"block {cnt} rows")
|
||||
#
|
||||
# finally:
|
||||
# consumer.close()
|
||||
|
||||
def get_leader(self):
|
||||
tdLog.debug("get leader")
|
||||
tdSql.query("show vnodes")
|
||||
|
@ -74,19 +35,20 @@ class TDTestCase:
|
|||
|
||||
def balance_vnode(self):
|
||||
leader_before = self.get_leader()
|
||||
|
||||
while True:
|
||||
leader_after = -1
|
||||
tdSql.query("balance vgroup leader")
|
||||
while True:
|
||||
leader_after = -1
|
||||
tdLog.debug("balancing vgroup leader")
|
||||
while True:
|
||||
tdLog.debug("get new vgroup leader")
|
||||
leader_after = self.get_leader()
|
||||
if leader_after != -1 :
|
||||
break;
|
||||
break
|
||||
else:
|
||||
time.sleep(1)
|
||||
if leader_after != leader_before:
|
||||
tdLog.debug("leader changed")
|
||||
break;
|
||||
break
|
||||
else :
|
||||
time.sleep(1)
|
||||
|
||||
|
@ -115,7 +77,7 @@ class TDTestCase:
|
|||
except TmqError:
|
||||
tdLog.exit(f"subscribe error")
|
||||
|
||||
cnt = 0;
|
||||
cnt = 0
|
||||
balance = False
|
||||
try:
|
||||
while True:
|
||||
|
|
|
@ -92,7 +92,7 @@ class TDTestCase:
|
|||
def run(self):
|
||||
for fill_history_value in [None, 1]:
|
||||
for watermark in [None, random.randint(20, 30)]:
|
||||
self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(1, 3)}s", fill_history_value=fill_history_value)
|
||||
self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(5, 8)}s", fill_history_value=fill_history_value)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -245,6 +245,8 @@ python3 ./test.py -f 2-query/min.py -P
|
|||
python3 ./test.py -f 2-query/min.py -P -R
|
||||
python3 ./test.py -f 2-query/normal.py -P
|
||||
python3 ./test.py -f 2-query/normal.py -P -R
|
||||
python3 ./test.py -f 2-query/not.py -P
|
||||
python3 ./test.py -f 2-query/not.py -P -R
|
||||
python3 ./test.py -f 2-query/mode.py -P
|
||||
python3 ./test.py -f 2-query/mode.py -P -R
|
||||
python3 ./test.py -f 2-query/Now.py -P
|
||||
|
@ -427,6 +429,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 2
|
|||
python3 ./test.py -f 2-query/max.py -P -Q 2
|
||||
python3 ./test.py -f 2-query/min.py -P -Q 2
|
||||
python3 ./test.py -f 2-query/normal.py -P -Q 2
|
||||
python3 ./test.py -f 2-query/not.py -P -Q 2
|
||||
python3 ./test.py -f 2-query/mode.py -P -Q 2
|
||||
python3 ./test.py -f 2-query/count.py -P -Q 2
|
||||
python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 2
|
||||
|
@ -526,6 +529,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 3
|
|||
python3 ./test.py -f 2-query/max.py -P -Q 3
|
||||
python3 ./test.py -f 2-query/min.py -P -Q 3
|
||||
python3 ./test.py -f 2-query/normal.py -P -Q 3
|
||||
python3 ./test.py -f 2-query/not.py -P -Q 3
|
||||
python3 ./test.py -f 2-query/mode.py -P -Q 3
|
||||
python3 ./test.py -f 2-query/count.py -P -Q 3
|
||||
python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 3
|
||||
|
@ -624,6 +628,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 4
|
|||
python3 ./test.py -f 2-query/max.py -P -Q 4
|
||||
python3 ./test.py -f 2-query/min.py -P -Q 4
|
||||
python3 ./test.py -f 2-query/normal.py -P -Q 4
|
||||
python3 ./test.py -f 2-query/not.py -P -Q 4
|
||||
python3 ./test.py -f 2-query/mode.py -P -Q 4
|
||||
python3 ./test.py -f 2-query/count.py -P -Q 4
|
||||
python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 4
|
||||
|
|
|
@ -925,3 +925,4 @@ python3 ./test.py -f 99-TDcase/TD-20582.py
|
|||
python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3
|
||||
python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3
|
||||
python3 ./test.py -f eco-system/meta/database/keep_time_offset.py
|
||||
python3 ./test.py -f 2-query/slow_query_basic.py
|
||||
|
|
|
@ -8,15 +8,14 @@
|
|||
"confirm_parameter_prompt": "no",
|
||||
"continue_if_fail": "yes",
|
||||
"databases": "dbrate",
|
||||
"query_times": 20,
|
||||
"query_times": 5,
|
||||
"query_mode": "taosc",
|
||||
"specified_table_query": {
|
||||
"query_interval": 0,
|
||||
"concurrent": 10,
|
||||
"threads": 10,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select count(*) from meters",
|
||||
"result": "./query_result.txt"
|
||||
"sql": "select count(*) from meters"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -16,18 +16,20 @@
|
|||
{
|
||||
"dbinfo": {
|
||||
"name": "dbrate",
|
||||
"vgroups": 1,
|
||||
"drop": "yes",
|
||||
"vgroups": 2
|
||||
"wal_retention_size": 1,
|
||||
"wal_retention_period": 1
|
||||
},
|
||||
"super_tables": [
|
||||
{
|
||||
"name": "meters",
|
||||
"child_table_exists": "no",
|
||||
"childtable_count": 10,
|
||||
"childtable_count": 1,
|
||||
"childtable_prefix": "d",
|
||||
"insert_mode": "@STMT_MODE",
|
||||
"interlace_rows": @INTERLACE_MODE,
|
||||
"insert_rows": 100000,
|
||||
"insert_rows": 10000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||
"auto_create_table": "no",
|
||||
|
|
|
@ -34,28 +34,6 @@ def exec(command, show=True):
|
|||
print(f"exec {command}\n")
|
||||
return os.system(command)
|
||||
|
||||
# run return output and error
|
||||
def run(command, timeout = 60, show=True):
|
||||
if(show):
|
||||
print(f"run {command} timeout={timeout}s\n")
|
||||
|
||||
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
process.wait(timeout)
|
||||
|
||||
output = process.stdout.read().decode(encoding="gbk")
|
||||
error = process.stderr.read().decode(encoding="gbk")
|
||||
|
||||
return output, error
|
||||
|
||||
# return list after run
|
||||
def runRetList(command, timeout=10, first=True):
|
||||
output,error = run(command, timeout)
|
||||
if first:
|
||||
return output.splitlines()
|
||||
else:
|
||||
return error.splitlines()
|
||||
|
||||
|
||||
def readFileContext(filename):
|
||||
file = open(filename)
|
||||
context = file.read()
|
||||
|
@ -78,6 +56,27 @@ def appendFileContext(filename, context):
|
|||
except:
|
||||
print(f"appand file error context={context} .")
|
||||
|
||||
# run return output and error
|
||||
def run(command, show=True):
|
||||
# out to file
|
||||
out = "out.txt"
|
||||
err = "err.txt"
|
||||
ret = exec(command + f" 1>{out} 2>{err}", True)
|
||||
|
||||
# read from file
|
||||
output = readFileContext(out)
|
||||
error = readFileContext(err)
|
||||
|
||||
return output, error
|
||||
|
||||
# return list after run
|
||||
def runRetList(command, first=True):
|
||||
output,error = run(command)
|
||||
if first:
|
||||
return output.splitlines()
|
||||
else:
|
||||
return error.splitlines()
|
||||
|
||||
def getFolderSize(folder):
|
||||
total_size = 0
|
||||
for dirpath, dirnames, filenames in os.walk(folder):
|
||||
|
@ -134,8 +133,6 @@ def getMatch(datatype, algo):
|
|||
|
||||
|
||||
def generateJsonFile(stmt, interlace):
|
||||
print(f"doTest stmt: {stmt} interlace_rows={interlace}\n")
|
||||
|
||||
# replace datatype
|
||||
context = readFileContext(templateFile)
|
||||
# replace compress
|
||||
|
@ -202,12 +199,23 @@ def writeTemplateInfo(resultFile):
|
|||
vgroups = findContextValue(context, "vgroups")
|
||||
childCount = findContextValue(context, "childtable_count")
|
||||
insertRows = findContextValue(context, "insert_rows")
|
||||
line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n"
|
||||
bindVGroup = findContextValue(context, "thread_bind_vgroup")
|
||||
nThread = findContextValue(context, "thread_count")
|
||||
batch = findContextValue(context, "num_of_records_per_req")
|
||||
|
||||
if bindVGroup.lower().find("yes") != -1:
|
||||
nThread = vgroups
|
||||
line = f"thread_bind_vgroup = {bindVGroup}\n"
|
||||
line += f"vgroups = {vgroups}\n"
|
||||
line += f"childtable_count = {childCount}\n"
|
||||
line += f"insert_rows = {insertRows}\n"
|
||||
line += f"insertThreads = {nThread}\n"
|
||||
line += f"batchSize = {batch}\n\n"
|
||||
print(line)
|
||||
appendFileContext(resultFile, line)
|
||||
|
||||
|
||||
def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
|
||||
def totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed):
|
||||
global Number
|
||||
# flush
|
||||
command = 'taos -s "flush database dbrate;"'
|
||||
|
@ -220,7 +228,7 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
|
|||
# read compress rate
|
||||
command = 'taos -s "show table distributed dbrate.meters\G;"'
|
||||
rets = runRetList(command)
|
||||
print(rets)
|
||||
#print(rets)
|
||||
|
||||
str1 = rets[5]
|
||||
arr = str1.split(" ")
|
||||
|
@ -234,7 +242,6 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
|
|||
str2 = arr[6]
|
||||
pos = str2.find("=[")
|
||||
rate = str2[pos+2:]
|
||||
print("rate =" + rate)
|
||||
|
||||
# total data file size
|
||||
#dataSize = getFolderSize(f"{dataDir}/vnode/")
|
||||
|
@ -242,42 +249,79 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
|
|||
|
||||
# appand to file
|
||||
|
||||
# %("No", "stmtMode", "interlaceRows", "spent", "spent-real", "writeSpeed", "write-real", "query-QPS", "dataSize", "rate")
|
||||
Number += 1
|
||||
context = "%10s %10s %15s %10s %10s %30s %15s\n"%( Number, stmt, interlace, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed)
|
||||
context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%(
|
||||
Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " r/s", writeReal + " r/s",
|
||||
min, avg, p90, p99, max + "ms",
|
||||
querySpeed, str(totalSize) + " MB", rate + "%")
|
||||
|
||||
showLog(context)
|
||||
appendFileContext(resultFile, context)
|
||||
|
||||
def cutEnd(line, start, endChar):
|
||||
pos = line.find(endChar, start)
|
||||
if pos == -1:
|
||||
return line[start:]
|
||||
return line[start : pos]
|
||||
|
||||
def findValue(context, pos, key, endChar,command):
|
||||
pos = context.find(key, pos)
|
||||
if pos == -1:
|
||||
print(f"error, run command={command} output not found \"{key}\" keyword. context={context}")
|
||||
exit(1)
|
||||
pos += len(key)
|
||||
value = cutEnd(context, pos, endChar)
|
||||
return (value, pos)
|
||||
|
||||
def testWrite(jsonFile):
|
||||
command = f"taosBenchmark -f {jsonFile}"
|
||||
output, context = run(command, 60000)
|
||||
print(context)
|
||||
|
||||
# SUCC: Spent 0.960248 (real 0.947154) seconds to insert rows: 100000 with 1 thread(s) into dbrate 104139.76 (real 105579.45) records/second
|
||||
|
||||
# find second real
|
||||
pos = context.find("(real ")
|
||||
# spent
|
||||
key = "Spent "
|
||||
pos = -1
|
||||
pos1 = 0
|
||||
while pos1 != -1: # find last "Spent "
|
||||
pos1 = context.find(key, pos1)
|
||||
if pos1 != -1:
|
||||
pos = pos1 # update last found
|
||||
pos1 += len(key)
|
||||
if pos == -1:
|
||||
print(f"error, run command={command} output not found first \"(real\" keyword. error={context}")
|
||||
print(f"error, run command={command} output not found \"{key}\" keyword. context={context}")
|
||||
exit(1)
|
||||
pos = context.find("(real ", pos + 5)
|
||||
pos += len(key)
|
||||
spent = cutEnd(context, pos, ".")
|
||||
|
||||
# spent-real
|
||||
spentReal, pos = findValue(context, pos, "(real ", ".", command)
|
||||
|
||||
# writeSpeed
|
||||
key = "into "
|
||||
pos = context.find(key, pos)
|
||||
if pos == -1:
|
||||
print(f"error, run command={command} output not found second \"(real\" keyword. error={context}")
|
||||
print(f"error, run command={command} output not found \"{key}\" keyword. context={context}")
|
||||
exit(1)
|
||||
pos += len(key)
|
||||
writeSpeed, pos = findValue(context, pos, " ", ".", command)
|
||||
# writeReal
|
||||
writeReal, pos = findValue(context, pos, "(real ", ".", command)
|
||||
|
||||
pos += 5
|
||||
length = len(context)
|
||||
while pos < length and context[pos] == ' ':
|
||||
pos += 1
|
||||
end = context.find(".", pos)
|
||||
if end == -1:
|
||||
print(f"error, run command={command} output not found second \".\" keyword. error={context}")
|
||||
exit(1)
|
||||
# delay
|
||||
min, pos = findValue(context, pos, "min: ", ",", command)
|
||||
avg, pos = findValue(context, pos, "avg: ", ",", command)
|
||||
p90, pos = findValue(context, pos, "p90: ", ",", command)
|
||||
p99, pos = findValue(context, pos, "p99: ", ",", command)
|
||||
max, pos = findValue(context, pos, "max: ", "ms", command)
|
||||
|
||||
speed = context[pos: end]
|
||||
#print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n")
|
||||
return speed
|
||||
return (spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max)
|
||||
|
||||
def testQuery():
|
||||
command = f"taosBenchmark -f json/query.json"
|
||||
lines = runRetList(command, 60000)
|
||||
lines = runRetList(command)
|
||||
# INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485
|
||||
speed = None
|
||||
|
||||
|
@ -308,13 +352,13 @@ def doTest(stmt, interlace, resultFile):
|
|||
|
||||
# run taosBenchmark
|
||||
t1 = time.time()
|
||||
writeSpeed = testWrite(jsonFile)
|
||||
spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max = testWrite(jsonFile)
|
||||
t2 = time.time()
|
||||
# total write speed
|
||||
querySpeed = testQuery()
|
||||
|
||||
# total compress rate
|
||||
totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed)
|
||||
totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed)
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -333,7 +377,17 @@ def main():
|
|||
# json info
|
||||
writeTemplateInfo(resultFile)
|
||||
# head
|
||||
context = "\n%10s %10s %15s %10s %10s %30s %15s\n"%("No", "stmtMode", "interlaceRows", "dataSize", "rate", "writeSpeed", "query-QPS")
|
||||
'''
|
||||
context = "%3s %8s %10s %10s %10s %15s %15s %10s %10s %10s %10s %10s %8s %8s %8s\n"%(
|
||||
"No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real",
|
||||
"min", "avg", "p90", "p99", "max",
|
||||
"query-QPS", "dataSize", "rate")
|
||||
'''
|
||||
context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%(
|
||||
"No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real",
|
||||
"min", "avg", "p90", "p99", "max",
|
||||
"query-QPS", "dataSize", "rate")
|
||||
|
||||
appendFileContext(resultFile, context)
|
||||
|
||||
|
||||
|
|
|
@ -8,15 +8,14 @@
|
|||
"confirm_parameter_prompt": "no",
|
||||
"continue_if_fail": "yes",
|
||||
"databases": "dbrate",
|
||||
"query_times": 20,
|
||||
"query_times": 5,
|
||||
"query_mode": "taosc",
|
||||
"specified_table_query": {
|
||||
"query_interval": 0,
|
||||
"concurrent": 10,
|
||||
"threads": 10,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select * from meters",
|
||||
"result": "./query_res0.txt"
|
||||
"sql": "select * from meters"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -34,28 +34,6 @@ def exec(command, show=True):
|
|||
print(f"exec {command}\n")
|
||||
return os.system(command)
|
||||
|
||||
# run return output and error
|
||||
def run(command, timeout = 60, show=True):
|
||||
if(show):
|
||||
print(f"run {command} timeout={timeout}s\n")
|
||||
|
||||
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
process.wait(timeout)
|
||||
|
||||
output = process.stdout.read().decode(encoding="gbk")
|
||||
error = process.stderr.read().decode(encoding="gbk")
|
||||
|
||||
return output, error
|
||||
|
||||
# return list after run
|
||||
def runRetList(command, timeout=10, first=True):
|
||||
output,error = run(command, timeout)
|
||||
if first:
|
||||
return output.splitlines()
|
||||
else:
|
||||
return error.splitlines()
|
||||
|
||||
|
||||
def readFileContext(filename):
|
||||
file = open(filename)
|
||||
context = file.read()
|
||||
|
@ -78,6 +56,27 @@ def appendFileContext(filename, context):
|
|||
except:
|
||||
print(f"appand file error context={context} .")
|
||||
|
||||
# run return output and error
|
||||
def run(command, show=True):
|
||||
# out to file
|
||||
out = "out.txt"
|
||||
err = "err.txt"
|
||||
ret = exec(command + f" 1>{out} 2>{err}", True)
|
||||
|
||||
# read from file
|
||||
output = readFileContext(out)
|
||||
error = readFileContext(err)
|
||||
|
||||
return output, error
|
||||
|
||||
# return list after run
|
||||
def runRetList(command, first=True):
|
||||
output,error = run(command)
|
||||
if first:
|
||||
return output.splitlines()
|
||||
else:
|
||||
return error.splitlines()
|
||||
|
||||
def getFolderSize(folder):
|
||||
total_size = 0
|
||||
for dirpath, dirnames, filenames in os.walk(folder):
|
||||
|
@ -134,8 +133,6 @@ def getMatch(datatype, algo):
|
|||
|
||||
|
||||
def generateJsonFile(algo):
|
||||
print(f"doTest algo: {algo} \n")
|
||||
|
||||
# replace datatype
|
||||
context = readFileContext(templateFile)
|
||||
# replace compress
|
||||
|
@ -192,30 +189,34 @@ def findContextValue(context, label):
|
|||
ends = [',','}',']', 0]
|
||||
while context[end] not in ends:
|
||||
end += 1
|
||||
|
||||
print(f"start = {start} end={end}\n")
|
||||
return context[start:end]
|
||||
|
||||
|
||||
def writeTemplateInfo(resultFile):
|
||||
# create info
|
||||
context = readFileContext(templateFile)
|
||||
dbname = findContextValue(context, "name")
|
||||
vgroups = findContextValue(context, "vgroups")
|
||||
childCount = findContextValue(context, "childtable_count")
|
||||
insertRows = findContextValue(context, "insert_rows")
|
||||
line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n"
|
||||
print(line)
|
||||
appendFileContext(resultFile, line)
|
||||
return dbname
|
||||
|
||||
|
||||
def totalCompressRate(algo, resultFile, writeSpeed, querySpeed):
|
||||
global Number
|
||||
# flush
|
||||
loop = 30
|
||||
|
||||
|
||||
while loop > 0:
|
||||
loop -= 1
|
||||
|
||||
# flush database
|
||||
command = 'taos -s "flush database dbrate;"'
|
||||
rets = exec(command)
|
||||
command = 'taos -s "compact database dbrate;"'
|
||||
rets = exec(command)
|
||||
waitCompactFinish(60)
|
||||
exec(command)
|
||||
time.sleep(1)
|
||||
|
||||
# read compress rate
|
||||
command = 'taos -s "show table distributed dbrate.meters\G;"'
|
||||
|
@ -235,13 +236,14 @@ def totalCompressRate(algo, resultFile, writeSpeed, querySpeed):
|
|||
pos = str2.find("=[")
|
||||
rate = str2[pos+2:]
|
||||
print("rate =" + rate)
|
||||
if rate != "0.00":
|
||||
break
|
||||
|
||||
# total data file size
|
||||
#dataSize = getFolderSize(f"{dataDir}/vnode/")
|
||||
#dataSizeMB = int(dataSize/1024/1024)
|
||||
|
||||
# appand to file
|
||||
|
||||
Number += 1
|
||||
context = "%10s %10s %10s %10s %30s %15s\n"%( Number, algo, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed)
|
||||
showLog(context)
|
||||
|
@ -273,18 +275,22 @@ def testWrite(jsonFile):
|
|||
|
||||
speed = context[pos: end]
|
||||
#print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n")
|
||||
|
||||
# flush database
|
||||
command = 'taos -s "flush database dbrate;"'
|
||||
exec(command)
|
||||
return speed
|
||||
|
||||
def testQuery():
|
||||
command = f"taosBenchmark -f json/query.json"
|
||||
lines = runRetList(command, 60000)
|
||||
lines = runRetList(command)
|
||||
# INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485
|
||||
speed = None
|
||||
|
||||
for i in range(20, len(lines)):
|
||||
for i in range(0, len(lines)):
|
||||
# find second real
|
||||
context = lines[i]
|
||||
pos = context.find("the QPS of all threads:")
|
||||
context = lines[26]
|
||||
if pos == -1 :
|
||||
continue
|
||||
pos += 24
|
||||
|
@ -300,8 +306,6 @@ def testQuery():
|
|||
|
||||
def doTest(algo, resultFile):
|
||||
print(f"doTest algo: {algo} \n")
|
||||
#cleanAndStartTaosd()
|
||||
|
||||
|
||||
# json
|
||||
jsonFile = generateJsonFile(algo)
|
||||
|
|
|
@ -2098,12 +2098,46 @@ int sml_td29373_Test() {
|
|||
return code;
|
||||
}
|
||||
|
||||
int sml_ts5528_test(){
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
|
||||
TAOS_RES *pRes = taos_query(taos, "drop database if exists ts5528");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "create database if not exists ts5528");
|
||||
taos_free_result(pRes);
|
||||
|
||||
// check column name duplication
|
||||
char *sql[] = {
|
||||
"device_log_yuelan_cs1,deviceId=861701069493741 content=\"{\\\"deviceId\\\":\\\"星宇公司-861701069493741\\\",\\\"headers\\\":{\\\"_uid\\\":\\\"4e3599eacd62834995c77b38ad95f88d\\\",\\\"creatorId\\\":\\\"1199596756811550720\\\",\\\"deviceNmae\\\":\\\"861701069493741\\\",\\\"productId\\\":\\\"yuelan\\\",\\\"productName\\\":\\\"悦蓝cat1穿戴设备\\\"},\\\"messageType\\\":\\\"REPORT_PROPERTY\\\",\\\"properties\\\":{\\\"lat\\\":35.265527067449185,\\\"lng\\\":118.49713144245987,\\\"location\\\":\\\"118.49713144245987,35.265527067449185\\\"},\\\"timestamp\\\":1728719963230}\",createTime=1728719963230i64,id=\"4e3599eacd62834995c77b38ad95f88d\",messageId=\"\",timestamp=1728719963230i64,type=\"reportProperty\" 1728719963230",
|
||||
"device_log_yuelan_cs1,deviceId=861701069065507 content=\"{\\\"deviceId\\\":\\\"星宇公司-861701069065507\\\",\\\"headers\\\":{\\\"_uid\\\":\\\"9045d6b78b4ffaf1e2d244e912ebbff8\\\",\\\"creatorId\\\":\\\"1199596756811550720\\\",\\\"deviceNmae\\\":\\\"861701069065507\\\",\\\"productId\\\":\\\"yuelan\\\",\\\"productName\\\":\\\"悦蓝cat1穿戴设备\\\"},\\\"messageType\\\":\\\"REPORT_PROPERTY\\\",\\\"properties\\\":{\\\"lat\\\":36.788241914043425,\\\"lng\\\":119.15042325460891,\\\"location\\\":\\\"119.15042325460891,36.788241914043425\\\"},\\\"timestamp\\\":1728719964105}\",createTime=1728719964105i64,id=\"9045d6b78b4ffaf1e2d244e912ebbff8\",messageId=\"\",timestamp=1728719964105i64,type=\"reportProperty\" 1728719964105",
|
||||
};
|
||||
pRes = taos_query(taos, "use ts5528");
|
||||
taos_free_result(pRes);
|
||||
|
||||
for( int i = 0; i < 2; i++){
|
||||
int32_t totalRows = 0;
|
||||
pRes = taos_schemaless_insert_raw(taos, sql[i], strlen(sql[i]), &totalRows, TSDB_SML_LINE_PROTOCOL,
|
||||
TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||
int code = taos_errno(pRes);
|
||||
taos_free_result(pRes);
|
||||
if (code != 0) {
|
||||
taos_close(taos);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
taos_close(taos);
|
||||
printf("%s result success\n", __FUNCTION__);
|
||||
return 0;
|
||||
}
|
||||
int main(int argc, char *argv[]) {
|
||||
if (argc == 2) {
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, argv[1]);
|
||||
}
|
||||
|
||||
int ret = 0;
|
||||
ret = sml_ts5528_test();
|
||||
ASSERT(!ret);
|
||||
ret = sml_td29691_Test();
|
||||
ASSERT(ret);
|
||||
ret = sml_td29373_Test();
|
||||
|
|
|
@ -1275,10 +1275,10 @@ void* ombProduceThreadFunc(void* param) {
|
|||
msgsOfSql = remainder;
|
||||
}
|
||||
int len = 0;
|
||||
len += snprintf(sqlBuf + len, MAX_SQL_LEN - len, "insert into %s values ", ctbName);
|
||||
len += tsnprintf(sqlBuf + len, MAX_SQL_LEN - len, "insert into %s values ", ctbName);
|
||||
for (int j = 0; j < msgsOfSql; j++) {
|
||||
int64_t timeStamp = taosGetTimestampNs();
|
||||
len += snprintf(sqlBuf + len, MAX_SQL_LEN - len, "(%" PRId64 ", \"%s\")", timeStamp, g_payload);
|
||||
len += tsnprintf(sqlBuf + len, MAX_SQL_LEN - len, "(%" PRId64 ", \"%s\")", timeStamp, g_payload);
|
||||
sendMsgs++;
|
||||
pInfo->totalProduceMsgs++;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue