diff --git a/.gitmodules b/.gitmodules index 049b39abfb..156226d544 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,6 +4,9 @@ [submodule "src/connector/grafanaplugin"] path = src/connector/grafanaplugin url = https://github.com/taosdata/grafanaplugin +[submodule "tests/examples/rust"] + path = tests/examples/rust + url = https://github.com/songtianyi/tdengine-rust-bindings.git [submodule "src/connector/hivemq-tdengine-extension"] path = src/connector/hivemq-tdengine-extension - url = https://github.com/huskar-t/hivemq-tdengine-extension.git + url = https://github.com/huskar-t/hivemq-tdengine-extension.git \ No newline at end of file diff --git a/src/client/inc/tscLocalMerge.h b/src/client/inc/tscLocalMerge.h index 0af8c8b576..ce67344b03 100644 --- a/src/client/inc/tscLocalMerge.h +++ b/src/client/inc/tscLocalMerge.h @@ -62,6 +62,7 @@ typedef struct SLocalReducer { bool hasUnprocessedRow; tOrderDescriptor * pDesc; SColumnModel * resColModel; + SColumnModel* finalModel; tExtMemBuffer ** pExtMemBuffer; // disk-based buffer SFillInfo* pFillInfo; // interpolation support structure char* pFinalRes; // result data after interpo @@ -74,7 +75,8 @@ typedef struct SLocalReducer { typedef struct SRetrieveSupport { tExtMemBuffer ** pExtMemBuffer; // for build loser tree tOrderDescriptor *pOrderDescriptor; - SColumnModel * pFinalColModel; // colModel for final result + SColumnModel* pFinalColModel; // colModel for final result + SColumnModel* pFFColModel; int32_t subqueryIndex; // index of current vnode in vnode list SSqlObj * pParentSql; tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to @@ -96,7 +98,7 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF * create local reducer to launch the second-stage reduce process at client site */ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, - SColumnModel *finalModel, SSqlObj* pSql); + SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql); void tscDestroyLocalReducer(SSqlObj *pSql); diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index 3406dcd858..3226f70528 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -41,6 +41,8 @@ int32_t tscHandleInsertRetry(SSqlObj* pSql); void tscBuildResFromSubqueries(SSqlObj *pSql); TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult); +char *getArithemicInputSrc(void *param, const char *name, int32_t colId); + #ifdef __cplusplus } #endif diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 6628e30874..223fb5d226 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -125,6 +125,7 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t */ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo); bool tscIsTWAQuery(SQueryInfo* pQueryInfo); +bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo); bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex); bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex); @@ -158,7 +159,7 @@ SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t ind TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index); void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo); -void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo); +void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo); int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index); void tscFieldInfoClear(SFieldInfo* pFieldInfo); @@ -167,15 +168,15 @@ static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQue int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2); -void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex); +void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes); int32_t tscGetResRowLength(SArray* pExprList); SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, bool isTagCol); + int16_t size, int16_t resColId, int16_t interSize, bool isTagCol); SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, bool isTagCol); + int16_t size, int16_t resColId, int16_t interSize, bool isTagCol); SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type, int16_t size); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 4c85af4919..6b3d97d6f9 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -136,6 +136,7 @@ typedef struct SSqlExpr { int16_t numOfParams; // argument value of each function tVariant param[3]; // parameters are not more than 3 int32_t offset; // sub result column value of arithmetic expression. + int16_t resColId; // result column id } SSqlExpr; typedef struct SColumnIndex { @@ -251,6 +252,7 @@ typedef struct SQueryInfo { int64_t clauseLimit; // limit for current sub clause int64_t prjOffset; // offset value in the original sql expression, only applied at client side int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX + int16_t resColumnId; // result column id } SQueryInfo; typedef struct { @@ -462,17 +464,16 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo); -static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) { +static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex, int32_t offset) { SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex); - assert(pInfo->pSqlExpr != NULL); - int32_t type = pInfo->pSqlExpr->resType; - int32_t bytes = pInfo->pSqlExpr->resBytes; + int32_t type = pInfo->field.type; + int32_t bytes = pInfo->field.bytes; - char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row); + char* pData = pRes->data + (int32_t)(offset * pRes->numOfRows + bytes * pRes->row); // user defined constant value output columns - if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) { + if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) { if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { pData = pInfo->pSqlExpr->param[1].pz; pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen; @@ -517,6 +518,7 @@ extern SRpcCorEpSet tscMgmtEpSet; extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo); void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables); +int16_t getNewResColId(SQueryInfo* pQueryInfo); #ifdef __cplusplus } diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 99c03c6580..e9e8214c4c 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -351,7 +351,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) { SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i); if (pSup->pSqlExpr != NULL) { - tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); + tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0); } else { // todo add } diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index f5a27311f2..35dc94f37b 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -2695,17 +2695,18 @@ static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) { } SAPercentileInfo *pOutput = getAPerctInfo(pCtx); - SHistogramInfo * pHisto = pOutput->pHisto; + SHistogramInfo *pHisto = pOutput->pHisto; if (pHisto->numOfElems <= 0) { memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1)); pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); } else { + //TODO(dengyihao): avoid memcpy pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); - SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN); - tHistogramDestroy(&pOutput->pHisto); - pOutput->pHisto = pRes; + memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN); + pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo)); + tHistogramDestroy(&pRes); } SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 44dffab56f..310c4c6657 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -162,7 +162,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, - (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, (TSDB_COL_NAME_LEN - 1), false); + (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false); rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE); @@ -172,7 +172,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE), - typeColLength, false); + -1000, typeColLength, false); rowLen += typeColLength + VARSTR_HEADER_SIZE; @@ -182,7 +182,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t), - sizeof(int32_t), false); + -1000, sizeof(int32_t), false); rowLen += sizeof(int32_t); @@ -192,7 +192,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols, pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE), - noteColLength, false); + -1000, noteColLength, false); rowLen += noteColLength + VARSTR_HEADER_SIZE; return rowLen; @@ -407,8 +407,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const } SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); - pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, - f.bytes, f.bytes - VARSTR_HEADER_SIZE, false); + pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false); rowLen += f.bytes; @@ -422,7 +421,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, - (int16_t)(ddlLen + VARSTR_HEADER_SIZE), ddlLen, false); + (int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false); rowLen += ddlLen + VARSTR_HEADER_SIZE; @@ -619,7 +618,11 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, for (int32_t i = 0; i < numOfRows; ++i) { uint8_t type = pSchema[i].type; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes); + int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE; + if (type == TSDB_DATA_TYPE_NCHAR) { + bytes = bytes/TSDB_NCHAR_SIZE; + } + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes); } else { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName); } @@ -642,7 +645,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, for (int32_t i = 0; i < numOfRows; ++i) { uint8_t type = pSchema[i].type; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes); + int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE; + if (type == TSDB_DATA_TYPE_NCHAR) { + bytes = bytes/TSDB_NCHAR_SIZE; + } + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes); } else { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName); } @@ -652,7 +659,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, for (int32_t i = numOfRows; i < totalRows; i++) { uint8_t type = pSchema[i].type; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes); + int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE; + if (type == TSDB_DATA_TYPE_NCHAR) { + bytes = bytes/TSDB_NCHAR_SIZE; + } + snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes); } else { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName); } diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 84a4ef9a16..b07c7ca66d 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -13,14 +13,15 @@ * along with this program. If not, see . */ +#include "tscLocalMerge.h" +#include "tscSubquery.h" #include "os.h" +#include "qAst.h" #include "tlosertree.h" +#include "tscLog.h" #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" -#include "tutil.h" -#include "tscLog.h" -#include "tscLocalMerge.h" typedef struct SCompareParam { SLocalDataSource **pLocalData; @@ -29,6 +30,8 @@ typedef struct SCompareParam { int32_t groupOrderType; } SCompareParam; +static void doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize); + int32_t treeComparator(const void *pLeft, const void *pRight, void *param) { int32_t pLeftIdx = *(int32_t *)pLeft; int32_t pRightIdx = *(int32_t *)pRight; @@ -132,28 +135,41 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc } static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) { - int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); + int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo); int32_t offset = 0; SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo)); for(int32_t i = 0; i < numOfCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - - pFillCol[i].col.bytes = pExpr->resBytes; - pFillCol[i].col.type = (int8_t)pExpr->resType; - pFillCol[i].col.colId = pExpr->colInfo.colId; - pFillCol[i].flag = pExpr->colInfo.flag; - pFillCol[i].col.offset = offset; - pFillCol[i].functionId = pExpr->functionId; - pFillCol[i].fillVal.i = pQueryInfo->fillVal[i]; - offset += pExpr->resBytes; + SInternalField* pIField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i); + + if (pIField->pArithExprInfo == NULL) { + SSqlExpr* pExpr = pIField->pSqlExpr; + + pFillCol[i].col.bytes = pExpr->resBytes; + pFillCol[i].col.type = (int8_t)pExpr->resType; + pFillCol[i].col.colId = pExpr->colInfo.colId; + pFillCol[i].flag = pExpr->colInfo.flag; + pFillCol[i].col.offset = offset; + pFillCol[i].functionId = pExpr->functionId; + pFillCol[i].fillVal.i = pQueryInfo->fillVal[i]; + } else { + pFillCol[i].col.bytes = pIField->field.bytes; + pFillCol[i].col.type = (int8_t)pIField->field.type; + pFillCol[i].col.colId = -100; + pFillCol[i].flag = TSDB_COL_NORMAL; + pFillCol[i].col.offset = offset; + pFillCol[i].functionId = -1; + pFillCol[i].fillVal.i = pQueryInfo->fillVal[i]; + } + + offset += pFillCol[i].col.bytes; } return pFillCol; } void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc, - SColumnModel *finalmodel, SSqlObj* pSql) { + SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; @@ -342,8 +358,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd return; } - size_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo); - pReducer->pTempBuffer->num = 0; tscCreateResPointerInfo(pRes, pQueryInfo); @@ -372,7 +386,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd if (pQueryInfo->fillType != TSDB_FILL_NONE) { SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, - 4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, + 4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, tinfo.precision, pQueryInfo->fillType, pFillCol, pSql); } } @@ -491,7 +505,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) { pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo); if (pLocalReducer->pCtx != NULL) { - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + for (int32_t i = 0; i < numOfExprs; ++i) { SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i]; tVariantDestroy(&pCtx->tag); @@ -555,7 +570,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm if (numOfGroupByCols > 0) { if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { - int32_t startCols = pQueryInfo->fieldsInfo.numOfOutput - pQueryInfo->groupbyExpr.numOfGroupCols; + int32_t numOfInternalOutput = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + int32_t startCols = numOfInternalOutput - pQueryInfo->groupbyExpr.numOfGroupCols; // the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) { @@ -674,6 +690,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr pSchema[i].bytes = pExpr->resBytes; pSchema[i].type = (int8_t)pExpr->resType; + tstrncpy(pSchema[i].name, pExpr->aliasName, tListLen(pSchema[i].name)); + rlen += pExpr->resBytes; } @@ -736,8 +754,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr } *pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity); - tfree(pSchema); + tfree(pSchema); return TSDB_CODE_SUCCESS; } @@ -966,10 +984,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo); } + int32_t offset = 0; for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i); memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows)); + offset += pField->bytes; } pRes->numOfRowsGroup += pRes->numOfRows; @@ -1222,6 +1241,10 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur tColModelCompact(pModel, pResBuf, pModel->capacity); + if (tscIsSecondStageQuery(pQueryInfo)) { + doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalRowSize); + } + #ifdef _DEBUG_VIEW printf("final result before interpo:\n"); // tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num); @@ -1588,3 +1611,44 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) pRes->pLocalReducer->pResultBuf->num = numOfRes; pRes->data = pRes->pLocalReducer->pResultBuf->data; } + +void doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) { + char* pbuf = calloc(1, pOutput->num * rowSize); + + size_t size = tscNumOfFields(pQueryInfo); + SArithmeticSupport arithSup = {0}; + + // todo refactor + arithSup.offset = 0; + arithSup.numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + arithSup.exprList = pQueryInfo->exprList; + arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES); + + for(int32_t k = 0; k < arithSup.numOfCols; ++k) { + SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); + arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->offset); + } + + int32_t offset = 0; + + for (int i = 0; i < size; ++i) { + SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); + + // calculate the result from several other columns + if (pSup->pArithExprInfo != NULL) { + arithSup.pArithExpr = pSup->pArithExprInfo; + tExprTreeCalcTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithemicInputSrc); + } else { + SSqlExpr* pExpr = pSup->pSqlExpr; + memcpy(pbuf + pOutput->num * offset, pExpr->offset * pOutput->num + pOutput->data, pExpr->resBytes * pOutput->num); + } + + offset += pSup->field.bytes; + } + + assert(finalRowSize <= rowSize); + memcpy(pOutput->data, pbuf, pOutput->num * finalRowSize); + + tfree(pbuf); + tfree(arithSup.data); +} \ No newline at end of file diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index c9115a8324..a44a158f93 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1148,6 +1148,10 @@ int tsParseInsertSql(SSqlObj *pSql) { index = 0; sToken = tStrGetToken(str, &index, false, 0, NULL); + if (sToken.type != TK_STRING && sToken.type != TK_ID) { + code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); + goto _error; + } str += index; if (sToken.n == 0) { code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index c13de00136..b55326bbd3 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -52,7 +52,8 @@ typedef struct SConvertFunc { int32_t originFuncId; int32_t execFuncId; } SConvertFunc; -static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex); + +static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex); static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo); static char* getAccountId(SSqlObj* pSql); @@ -127,6 +128,10 @@ static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo); static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index); static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid); +int16_t getNewResColId(SQueryInfo* pQueryInfo) { + return pQueryInfo->resColumnId--; +} + static uint8_t convertOptr(SStrToken *pToken) { switch (pToken->type) { case TK_LT: @@ -1274,6 +1279,7 @@ static void tscInsertPrimaryTSSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX}; tscColumnListInsert(pQueryInfo->colList, &tsCol); } + static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSQLExprItem* pItem) { const char* msg1 = "invalid column name, illegal column type, or columns in arithmetic expression from two tables"; const char* msg2 = "invalid arithmetic expression in select clause"; @@ -1305,7 +1311,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t SColumnIndex index = {.tableIndex = tableIndex}; SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), - sizeof(double), false); + -1000, sizeof(double), false); char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->token.z; size_t len = MIN(sizeof(pExpr->aliasName), pItem->pNode->token.n + 1); @@ -1321,6 +1327,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } + // check for if there is a tag in the arithmetic express size_t numOfNode = taosArrayGetSize(colList); for(int32_t k = 0; k < numOfNode; ++k) { SColIndex* pIndex = taosArrayGet(colList, k); @@ -1346,9 +1353,9 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t char* c = tbufGetData(&bw, false); // set the serialized binary string as the parameter of arithmetic expression - addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex); - + addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len); insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr); + // add ts column tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); @@ -1380,6 +1387,10 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t pArithExprInfo->interBytes = sizeof(double); pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE; + pArithExprInfo->base.functionId = TSDB_FUNC_ARITHM; + pArithExprInfo->base.numOfParams = 1; + pArithExprInfo->base.resColId = getNewResColId(pQueryInfo); + int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &pArithExprInfo->uid); if (ret != TSDB_CODE_SUCCESS) { tExprTreeDestroy(&pArithExprInfo->pExpr, NULL); @@ -1388,14 +1399,30 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t pInfo->pArithExprInfo = pArithExprInfo; } + + SBufferWriter bw = tbufInitWriter(NULL, false); + + TRY(0) { + exprTreeToBinary(&bw, pInfo->pArithExprInfo->pExpr); + } CATCH(code) { + tbufCloseWriter(&bw); + UNUSED(code); + // TODO: other error handling + } END_TRY + + SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base; + pFuncMsg->arg[0].argBytes = (int16_t) tbufTell(&bw); + pFuncMsg->arg[0].argValue.pz = tbufGetData(&bw, true); + pFuncMsg->arg[0].argType = TSDB_DATA_TYPE_BINARY; + +// tbufCloseWriter(&bw); // TODO there is a memory leak } return TSDB_CODE_SUCCESS; } - static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) { - SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex); + SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; @@ -1540,7 +1567,7 @@ int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnLi return TSDB_CODE_SUCCESS; } -SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex) { +SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; int32_t numOfCols = tscGetNumOfColumns(pTableMeta); @@ -1552,20 +1579,22 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t c if (functionId == TSDB_FUNC_TAGPRJ) { index.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta); - tscColumnListInsert(pTableMetaInfo->tagColList, &index); } else { index.columnIndex = colIndex; } - - return tscSqlExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, - pSchema->bytes, functionId == TSDB_FUNC_TAGPRJ); + + int16_t colId = getNewResColId(pQueryInfo); + return tscSqlExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, pSchema->bytes, + (functionId == TSDB_FUNC_TAGPRJ)); } SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) { + int16_t colId = getNewResColId(pQueryInfo); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type, - pColSchema->bytes, pColSchema->bytes, TSDB_COL_IS_TAG(flag)); + pColSchema->bytes, colId, pColSchema->bytes, TSDB_COL_IS_TAG(flag)); tstrncpy(pExpr->aliasName, pColSchema->name, sizeof(pExpr->aliasName)); SColumnList ids = getColumnList(1, pIndex->tableIndex, pIndex->columnIndex); @@ -1601,7 +1630,7 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum } for (int32_t j = 0; j < numOfTotalColumns; ++j) { - SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos + j, j, pIndex->tableIndex); + SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, j, pIndex->tableIndex); tstrncpy(pExpr->aliasName, pSchema[j].name, sizeof(pExpr->aliasName)); pIndex->columnIndex = j; @@ -1710,7 +1739,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS bytes = pSchema->bytes; } - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false); + SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pQueryInfo), bytes, false); tstrncpy(pExpr->aliasName, name, tListLen(pExpr->aliasName)); if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) { @@ -1804,7 +1833,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false); } else if (sqlOptr == TK_INTEGER) { // select count(1) from table1 char buf[8] = {0}; int64_t val = -1; @@ -1816,7 +1845,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (val == 1) { index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false); } else { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -1836,12 +1865,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, isTag); + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, isTag); } } else { // count(*) is equalled to count(primary_timestamp_key) index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize; - pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false); + pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false); } pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); @@ -1928,7 +1957,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col colIndex += 1; SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, - TSDB_KEYSIZE, false); + getNewResColId(pQueryInfo), TSDB_KEYSIZE, false); SColumnList ids = getColumnList(1, 0, 0); insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName, pExpr); @@ -1939,7 +1968,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, resultType, resultSize, resultSize, false); + SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); if (optr == TK_LEASTSQUARES) { /* set the leastsquares parameters */ @@ -1948,14 +1977,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return TSDB_CODE_TSC_INVALID_SQL; } - addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES, 0); + addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES); memset(val, 0, tListLen(val)); if (tVariantDump(&pParamElem[2].pNode->val, val, TSDB_DATA_TYPE_DOUBLE, true) < 0) { return TSDB_CODE_TSC_INVALID_SQL; } - addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0); + addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); } SColumnList ids = {0}; @@ -2180,8 +2209,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tscInsertPrimaryTSSourceColumn(pQueryInfo, &index); colIndex += 1; // the first column is ts - pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, resultSize, false); - addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0); + pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); + addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); } else { tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); @@ -2198,8 +2227,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col // todo REFACTOR // set the first column ts for top/bottom query SColumnIndex index1 = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX}; - pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, - TSDB_KEYSIZE, false); + pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pQueryInfo), + TSDB_KEYSIZE, false); tstrncpy(pExpr->aliasName, aAggs[TSDB_FUNC_TS].aName, sizeof(pExpr->aliasName)); const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; @@ -2209,8 +2238,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col colIndex += 1; // the first column is ts - pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, resultSize, false); - addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t), 0); + pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false); + addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t)); } memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName)); @@ -2694,7 +2723,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) { } } - tscFieldInfoUpdateOffsetForInterResult(pQueryInfo); + tscFieldInfoUpdateOffset(pQueryInfo); return TSDB_CODE_SUCCESS; } @@ -2922,7 +2951,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo) { if (QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { - tscFieldInfoUpdateOffsetForInterResult(pQueryInfo); + tscFieldInfoUpdateOffset(pQueryInfo); } else { tscFieldInfoUpdateOffset(pQueryInfo); } @@ -4437,7 +4466,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - size_t size = tscSqlExprNumOfExprs(pQueryInfo); + size_t size = tscNumOfFields(pQueryInfo); if (pQueryInfo->fillVal == NULL) { pQueryInfo->fillVal = calloc(size, sizeof(int64_t)); @@ -4451,12 +4480,8 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery } else if (strncasecmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_NULL; for (int32_t i = START_INTERPO_COL_IDX; i < size; ++i) { - TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type); - } else { - setNull((char*)&pQueryInfo->fillVal[i], pFields->type, pFields->bytes); - }; + TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); + setNull((char*)&pQueryInfo->fillVal[i], pField->type, pField->bytes); } } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_PREV; @@ -4487,15 +4512,15 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery int32_t j = 1; for (int32_t i = startPos; i < numOfFillVal; ++i, ++j) { - TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); + TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type); + if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull((char*) &pQueryInfo->fillVal[i], pField->type); continue; } tVariant* p = taosArrayGet(pFillToken, j); - int32_t ret = tVariantDump(p, (char*)&pQueryInfo->fillVal[i], pFields->type, true); + int32_t ret = tVariantDump(p, (char*)&pQueryInfo->fillVal[i], pField->type, true); if (ret != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg); } @@ -4505,12 +4530,12 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery tVariantListItem* lastItem = taosArrayGetLast(pFillToken); for (int32_t i = numOfFillVal; i < size; ++i) { - TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); + TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) { - setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type); + if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull((char*) &pQueryInfo->fillVal[i], pField->type); } else { - tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pFields->type, true); + tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pField->type, true); } } } @@ -5447,7 +5472,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClau int16_t type = pTagSchema->type; int16_t bytes = pTagSchema->bytes; - pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, bytes, true); + pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); pExpr->colInfo.flag = TSDB_COL_TAG; // NOTE: tag column does not add to source column list @@ -5750,7 +5775,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo if (TSDB_COL_IS_TAG(pColIndex->flag)) { SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, bytes, true); + SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName)); tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); @@ -5913,7 +5938,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ SColumnIndex ind = {0}; SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT, - tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false); + tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, getNewResColId(pQueryInfo), tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false); const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name; tstrncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName)); @@ -6585,6 +6610,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS if (strcmp((*pExpr)->pSchema->name, p1->aliasName) == 0) { (*pExpr)->pSchema->type = (uint8_t)p1->resType; (*pExpr)->pSchema->bytes = p1->resBytes; + (*pExpr)->pSchema->colId = p1->resColId; if (uid != NULL) { *uid = p1->uid; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index dc55e3f159..e8b6cb284e 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -698,7 +698,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->queryType = htonl(pQueryInfo->type); size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo); - pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); + pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number // set column list ids size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); @@ -760,12 +760,15 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_TSC_INVALID_SQL; } + assert(pExpr->resColId < 0); + pSqlFuncExpr->colInfo.colId = htons(pExpr->colInfo.colId); pSqlFuncExpr->colInfo.colIndex = htons(pExpr->colInfo.colIndex); pSqlFuncExpr->colInfo.flag = htons(pExpr->colInfo.flag); pSqlFuncExpr->functionId = htons(pExpr->functionId); pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams); + pSqlFuncExpr->resColId = htons(pExpr->resColId); pMsg += sizeof(SSqlFuncMsg); for (int32_t j = 0; j < pExpr->numOfParams; ++j) { @@ -783,7 +786,73 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pSqlFuncExpr = (SSqlFuncMsg *)pMsg; } - + + if(tscIsSecondStageQuery(pQueryInfo)) { + size_t output = tscNumOfFields(pQueryInfo); + pQueryMsg->secondStageOutput = htonl((int32_t) output); + + SSqlFuncMsg *pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg; + + for (int32_t i = 0; i < output; ++i) { + SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); + SSqlExpr *pExpr = pField->pSqlExpr; + if (pExpr != NULL) { + if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) { + tscError("%p table schema is not matched with parsed sql", pSql); + return TSDB_CODE_TSC_INVALID_SQL; + } + + pSqlFuncExpr1->colInfo.colId = htons(pExpr->colInfo.colId); + pSqlFuncExpr1->colInfo.colIndex = htons(pExpr->colInfo.colIndex); + pSqlFuncExpr1->colInfo.flag = htons(pExpr->colInfo.flag); + + pSqlFuncExpr1->functionId = htons(pExpr->functionId); + pSqlFuncExpr1->numOfParams = htons(pExpr->numOfParams); + pMsg += sizeof(SSqlFuncMsg); + + for (int32_t j = 0; j < pExpr->numOfParams; ++j) { + // todo add log + pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExpr->param[j].nType); + pSqlFuncExpr1->arg[j].argBytes = htons(pExpr->param[j].nLen); + + if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) { + memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen); + pMsg += pExpr->param[j].nLen; + } else { + pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64Key); + } + } + + pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg; + } else { + assert(pField->pArithExprInfo != NULL); + SExprInfo* pExprInfo = pField->pArithExprInfo; + + pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId); + pSqlFuncExpr1->functionId = htons(pExprInfo->base.functionId); + pSqlFuncExpr1->numOfParams = htons(pExprInfo->base.numOfParams); + pMsg += sizeof(SSqlFuncMsg); + + for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) { + // todo add log + pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExprInfo->base.arg[j].argType); + pSqlFuncExpr1->arg[j].argBytes = htons(pExprInfo->base.arg[j].argBytes); + + if (pExprInfo->base.arg[j].argType == TSDB_DATA_TYPE_BINARY) { + memcpy(pMsg, pExprInfo->base.arg[j].argValue.pz, pExprInfo->base.arg[j].argBytes); + pMsg += pExprInfo->base.arg[j].argBytes; + } else { + pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExprInfo->base.arg[j].argValue.i64); + } + } + + pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg; + } + } + } else { + pQueryMsg->secondStageOutput = 0; + } + // serialize the table info (sid, uid, tags) pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg); @@ -810,7 +879,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } if (pQueryInfo->fillType != TSDB_FILL_NONE) { - for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { + for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) { *((int64_t *)pMsg) = htobe64(pQueryInfo->fillVal[i]); pMsg += sizeof(pQueryInfo->fillVal[0]); } @@ -1946,7 +2015,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f); pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, - pTableSchema[i].type, pTableSchema[i].bytes, pTableSchema[i].bytes, false); + pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pQueryInfo), pTableSchema[i].bytes, false); } pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutput; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 5f8a2eb6b7..a7b859b294 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -28,7 +28,6 @@ #include "tutil.h" #include "ttimer.h" #include "tscProfile.h" -#include "ttimer.h" static bool validImpl(const char* str, size_t maxsize) { if (str == NULL) { @@ -482,7 +481,7 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) { assert(0); for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) { - tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); + tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0); } *rows = pRes->tsrow; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index eb32e2490a..bc522d4007 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1642,9 +1642,9 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { } tExtMemBuffer ** pMemoryBuf = NULL; - tOrderDescriptor *pDesc = NULL; - SColumnModel * pModel = NULL; - + tOrderDescriptor *pDesc = NULL; + SColumnModel *pModel = NULL; + pRes->qhandle = 0x1; // hack the qhandle check const uint32_t nBufferSize = (1u << 16); // 64KB @@ -1707,7 +1707,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { trs->subqueryIndex = i; trs->pParentSql = pSql; trs->pFinalColModel = pModel; - + SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL); if (pNew == NULL) { tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); @@ -1762,10 +1762,6 @@ static void tscFreeRetrieveSup(SSqlObj *pSql) { } tscDebug("%p start to free subquery supp obj:%p", pSql, trsupport); -// int32_t index = trsupport->subqueryIndex; -// SSqlObj *pParentSql = trsupport->pParentSql; - -// assert(pSql == pParentSql->pSubs[index]); tfree(trsupport->localBuffer); tfree(trsupport); } @@ -1956,7 +1952,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0); tscClearInterpInfo(pPQueryInfo); - tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, pParentSql); + tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql); tscDebug("%p build loser tree completed", pParentSql); pParentSql->res.precision = pSql->res.precision; @@ -2418,7 +2414,7 @@ static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pF } } -static char *getArithemicInputSrc(void *param, const char *name, int32_t colId) { +char *getArithemicInputSrc(void *param, const char *name, int32_t colId) { SArithmeticSupport *pSupport = (SArithmeticSupport *) param; int32_t index = -1; @@ -2449,48 +2445,22 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult) { SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); size_t size = tscNumOfFields(pQueryInfo); + int32_t offset = 0; + for (int i = 0; i < size; ++i) { - SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); - if (pSup->pSqlExpr != NULL) { - tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i); - } + tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, offset); + TAOS_FIELD *pField = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); + + offset += pField->bytes; // primary key column cannot be null in interval query, no need to check if (i == 0 && pQueryInfo->interval.interval > 0) { continue; } - TAOS_FIELD *pField = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i); if (pRes->tsrow[i] != NULL && pField->type == TSDB_DATA_TYPE_NCHAR) { transferNcharData(pSql, i, pField); } - - // calculate the result from several other columns - if (pSup->pArithExprInfo != NULL) { - if (pRes->pArithSup == NULL) { - pRes->pArithSup = (SArithmeticSupport*)calloc(1, sizeof(SArithmeticSupport)); - } - - pRes->pArithSup->offset = 0; - pRes->pArithSup->pArithExpr = pSup->pArithExprInfo; - pRes->pArithSup->numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); - pRes->pArithSup->exprList = pQueryInfo->exprList; - pRes->pArithSup->data = calloc(pRes->pArithSup->numOfCols, POINTER_BYTES); - - if (pRes->buffer[i] == NULL) { - TAOS_FIELD* field = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); - pRes->buffer[i] = malloc(field->bytes); - } - - for(int32_t k = 0; k < pRes->pArithSup->numOfCols; ++k) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k); - pRes->pArithSup->data[k] = (pRes->data + pRes->numOfRows* pExpr->offset) + pRes->row*pExpr->resBytes; - } - - tExprTreeCalcTraverse(pRes->pArithSup->pArithExpr->pExpr, 1, pRes->buffer[i], pRes->pArithSup, - TSDB_ORDER_ASC, getArithemicInputSrc); - pRes->tsrow[i] = (unsigned char*)pRes->buffer[i]; - } } pRes->row++; // index increase one-step diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 080ef9f2d2..27824fc1ff 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -219,6 +219,24 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) { return true; } +bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo) { + size_t numOfOutput = tscNumOfFields(pQueryInfo); + size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); + + if (numOfOutput == numOfExprs) { + return false; + } + + for(int32_t i = 0; i < numOfOutput; ++i) { + SExprInfo* pExprInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pArithExprInfo; + if (pExprInfo != NULL) { + return true; + } + } + + return false; +} + bool tscIsTWAQuery(SQueryInfo* pQueryInfo) { size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfExprs; ++i) { @@ -855,28 +873,11 @@ void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) { SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); pExpr->offset = 0; - - for (int32_t i = 1; i < numOfExprs; ++i) { - SSqlExpr* prev = taosArrayGetP(pQueryInfo->exprList, i - 1); - SSqlExpr* p = taosArrayGetP(pQueryInfo->exprList, i); - - p->offset = prev->offset + prev->resBytes; - } -} -void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo) { - if (tscSqlExprNumOfExprs(pQueryInfo) == 0) { - return; - } - - SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); - pExpr->offset = 0; - - size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo); for (int32_t i = 1; i < numOfExprs; ++i) { SSqlExpr* prev = taosArrayGetP(pQueryInfo->exprList, i - 1); SSqlExpr* p = taosArrayGetP(pQueryInfo->exprList, i); - + p->offset = prev->offset + prev->resBytes; } } @@ -944,6 +945,14 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { if (pInfo->pArithExprInfo != NULL) { tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL); + + SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base; + for(int32_t j = 0; j < pFuncMsg->numOfParams; ++j) { + if (pFuncMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) { + tfree(pFuncMsg->arg[j].argValue.pz); + } + } + tfree(pInfo->pArithExprInfo); } } @@ -955,7 +964,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { } static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, int32_t colType) { + int16_t size, int16_t resColId, int16_t interSize, int32_t colType) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex); SSqlExpr* pExpr = calloc(1, sizeof(SSqlExpr)); @@ -988,8 +997,9 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol pExpr->resType = type; pExpr->resBytes = size; + pExpr->resColId = resColId; pExpr->interBytes = interSize; - + if (pTableMetaInfo->pTableMeta) { pExpr->uid = pTableMetaInfo->pTableMeta->id.uid; } @@ -998,20 +1008,20 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol } SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, bool isTagCol) { + int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) { int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList); if (index == num) { - return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol); + return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); } - SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol); + SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayInsert(pQueryInfo->exprList, index, &pExpr); return pExpr; } SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, - int16_t size, int16_t interSize, bool isTagCol) { - SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol); + int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) { + SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayPush(pQueryInfo->exprList, &pExpr); return pExpr; } @@ -1039,16 +1049,14 @@ size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo) { return taosArrayGetSize(pQueryInfo->exprList); } -void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex) { - if (pExpr == NULL || argument == NULL || bytes == 0) { - return; - } +void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) { + assert (pExpr != NULL || argument != NULL || bytes != 0); // set parameter value // transfer to tVariant from byte data/no ascii data tVariantCreateFromBinary(&pExpr->param[pExpr->numOfParams], argument, bytes, type); - pExpr->numOfParams += 1; + assert(pExpr->numOfParams <= 3); } @@ -1601,6 +1609,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) { pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES); pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX; + pQueryInfo->resColumnId= -1000; } int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 294544ed47..f93412ffec 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -197,7 +197,8 @@ public class TSDBConnection implements Connection { public SQLWarning getWarnings() throws SQLException { //todo: implement getWarnings according to the warning messages returned from TDengine - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return null; +// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } public void clearWarnings() throws SQLException { diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 25b77d6845..681fa44929 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -392,6 +392,7 @@ typedef struct SColIndex { typedef struct SSqlFuncMsg { int16_t functionId; int16_t numOfParams; + int16_t resColId; // result column id, id of the current output column SColIndex colInfo; struct ArgElem { @@ -461,11 +462,6 @@ typedef struct STimeWindow { TSKEY ekey; } STimeWindow; -/* - * the outputCols is equalled to or larger than numOfCols - * e.g., select min(colName), max(colName), avg(colName) from table - * the outputCols will be 3 while the numOfCols is 1. - */ typedef struct { SMsgHead head; STimeWindow window; @@ -483,13 +479,14 @@ typedef struct { uint32_t queryType; // denote another query process int16_t numOfOutput; // final output columns numbers int16_t tagNameRelType; // relation of tag criteria and tbname criteria - int16_t fillType; // interpolate type - uint64_t fillVal; // default value array list - int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed - int32_t tsLen; // total length of ts comp block - int32_t tsNumOfBlocks; // ts comp block numbers - int32_t tsOrder; // ts comp block order - int32_t numOfTags; // number of tags columns involved + int16_t fillType; // interpolate type + uint64_t fillVal; // default value array list + int32_t secondStageOutput; + int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed + int32_t tsLen; // total length of ts comp block + int32_t tsNumOfBlocks; // ts comp block numbers + int32_t tsOrder; // ts comp block order + int32_t numOfTags; // number of tags columns involved SColumnInfo colList[]; } SQueryTableMsg; diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 5d6b141a9b..88f07ee602 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -40,19 +40,22 @@ typedef struct { enum _show_db_index { TSDB_SHOW_DB_NAME_INDEX, TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, TSDB_SHOW_DB_DAYS_INDEX, TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_TABLES_INDEX, - TSDB_SHOW_DB_ROWS_INDEX, TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_ABLOCKS_INDEX, - TSDB_SHOW_DB_TBLOCKS_INDEX, - TSDB_SHOW_DB_CTIME_INDEX, - TSDB_SHOW_DB_CLOG_INDEX, + TSDB_SHOW_DB_BLOCKS_INDEX, + TSDB_SHOW_DB_MINROWS_INDEX, + TSDB_SHOW_DB_MAXROWS_INDEX, + TSDB_SHOW_DB_WALLEVEL_INDEX, + TSDB_SHOW_DB_FSYNC_INDEX, TSDB_SHOW_DB_COMP_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_UPDATE_INDEX, + TSDB_SHOW_DB_STATUS_INDEX, TSDB_MAX_SHOW_DB }; @@ -90,17 +93,23 @@ extern char version[]; typedef struct { char name[TSDB_DB_NAME_LEN + 1]; - int32_t replica; - int32_t days; - int32_t keep; - int32_t tables; - int32_t rows; - int32_t cache; - int32_t ablocks; - int32_t tblocks; - int32_t ctime; - int32_t clog; - int32_t comp; + int32_t tables; + int32_t vgroups; + int16_t replications; + int16_t quorum; + int16_t daysPerFile; + int16_t daysToKeep; + int16_t daysToKeep1; + int16_t daysToKeep2; + int32_t cacheBlockSize; //MB + int32_t totalBlocks; + int32_t minRowsPerFileBlock; + int32_t maxRowsPerFileBlock; + int8_t walLevel; + int32_t fsyncPeriod; + int8_t compression; + int8_t precision; // time resolution + int8_t update; } SDbInfo; typedef struct { @@ -173,6 +182,7 @@ static struct argp_option options[] = { {"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3}, {"end-time", 'E', "END_TIME", 0, "End time to dump.", 3}, {"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, + {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, @@ -200,6 +210,7 @@ struct arguments { int64_t start_time; int64_t end_time; int32_t data_batch; + int32_t max_sql_len; int32_t table_batch; // num of table which will be dump into one output file. bool allow_sys; // other options @@ -298,6 +309,17 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'N': arguments->data_batch = atoi(arg); break; + case 'L': + { + int32_t len = atoi(arg); + if (len > TSDB_MAX_ALLOWED_SQL_LEN) { + len = TSDB_MAX_ALLOWED_SQL_LEN; + } else if (len < TSDB_MAX_SQL_LEN) { + len = TSDB_MAX_SQL_LEN; + } + arguments->max_sql_len = len; + break; + } case 't': arguments->table_batch = atoi(arg); break; @@ -360,6 +382,7 @@ struct arguments tsArguments = { 0, INT64_MAX, 1, + TSDB_MAX_SQL_LEN, 1, false, // other options @@ -415,7 +438,9 @@ int main(int argc, char *argv[]) { printf("start_time: %" PRId64 "\n", tsArguments.start_time); printf("end_time: %" PRId64 "\n", tsArguments.end_time); printf("data_batch: %d\n", tsArguments.data_batch); + printf("max_sql_len: %d\n", tsArguments.max_sql_len); printf("table_batch: %d\n", tsArguments.table_batch); + printf("thread_num: %d\n", tsArguments.thread_num); printf("allow_sys: %d\n", tsArguments.allow_sys); printf("abort: %d\n", tsArguments.abort); printf("isDumpIn: %d\n", tsArguments.isDumpIn); @@ -682,8 +707,8 @@ int taosDumpOut(struct arguments *arguments) { TAOS_FIELD *fields = taos_fetch_fields(result); while ((row = taos_fetch_row(result)) != NULL) { - // sys database name : 'monitor', but subsequent version changed to 'log' - if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "monitor", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 && + // sys database name : 'log', but subsequent version changed to 'log' + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 && (!arguments->allow_sys)) continue; @@ -711,20 +736,27 @@ int taosDumpOut(struct arguments *arguments) { } strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - #if 0 - dbInfos[count]->replica = (int)(*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX])); - dbInfos[count]->days = (int)(*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX])); - dbInfos[count]->keep = *((int *)row[TSDB_SHOW_DB_KEEP_INDEX]); - dbInfos[count]->tables = *((int *)row[TSDB_SHOW_DB_TABLES_INDEX]); - dbInfos[count]->rows = *((int *)row[TSDB_SHOW_DB_ROWS_INDEX]); - dbInfos[count]->cache = *((int *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->ablocks = *((int *)row[TSDB_SHOW_DB_ABLOCKS_INDEX]); - dbInfos[count]->tblocks = (int)(*((int16_t *)row[TSDB_SHOW_DB_TBLOCKS_INDEX])); - dbInfos[count]->ctime = *((int *)row[TSDB_SHOW_DB_CTIME_INDEX]); - dbInfos[count]->clog = (int)(*((int8_t *)row[TSDB_SHOW_DB_CLOG_INDEX])); - dbInfos[count]->comp = (int)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); +#if 0 + if (arguments->with_property) { + dbInfos[count]->tables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + dbInfos[count]->replications = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + dbInfos[count]->daysPerFile = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); + dbInfos[count]->daysToKeep1; + dbInfos[count]->daysToKeep2; + dbInfos[count]->cacheBlockSize = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->totalBlocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + dbInfos[count]->minRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + dbInfos[count]->maxRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + dbInfos[count]->walLevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + dbInfos[count]->fsyncPeriod = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + dbInfos[count]->compression = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); + dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); + } #endif - count++; if (arguments->databases) { @@ -1037,10 +1069,13 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name); if (isDumpProperty) { + #if 0 pstr += sprintf(pstr, - " REPLICA %d DAYS %d KEEP %d TABLES %d ROWS %d CACHE %d ABLOCKS %d TBLOCKS %d CTIME %d CLOG %d COMP %d", - dbInfo->replica, dbInfo->days, dbInfo->keep, dbInfo->tables, dbInfo->rows, dbInfo->cache, - dbInfo->ablocks, dbInfo->tblocks, dbInfo->ctime, dbInfo->clog, dbInfo->comp); + "TABLES %d vgroups %d REPLICA %d quorum %d DAYS %d KEEP %d CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION %s UPDATE %d", + dbInfo->tables, dbInfo->vgroups, dbInfo->replications, dbInfo->quorum, dbInfo->daysPerFile, dbInfo->daysToKeep, dbInfo->cacheBlockSize, + dbInfo->totalBlocks, dbInfo->minRowsPerFileBlock, dbInfo->maxRowsPerFileBlock, dbInfo->walLevel, dbInfo->fsyncPeriod, dbInfo->compression, + dbInfo->precision, dbInfo->update); + #endif } pstr += sprintf(pstr, ";"); @@ -1459,7 +1494,8 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* return -1; } - char* tmpBuffer = (char *)calloc(1, COMMAND_SIZE); + int32_t sql_buf_len = arguments->max_sql_len; + char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); if (tmpBuffer == NULL) { fprintf(stderr, "failed to allocate memory\n"); free(tmpCommand); @@ -1502,85 +1538,83 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* return -1; } - char sqlStr[8] = "\0"; - if (arguments->mysqlFlag) { - sprintf(sqlStr, "INSERT"); - } else { - sprintf(sqlStr, "IMPORT"); - } - int rowFlag = 0; + int32_t curr_sqlstr_len = 0; + int32_t total_sqlstr_len = 0; count = 0; while ((row = taos_fetch_row(tmpResult)) != NULL) { pstr = tmpBuffer; + curr_sqlstr_len = 0; int32_t* length = taos_fetch_lengths(tmpResult); // act len if (count == 0) { - pstr += sprintf(pstr, "%s INTO %s VALUES (", sqlStr, tbname); - } else { + total_sqlstr_len = 0; + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s VALUES (", tbname); + } else { if (arguments->mysqlFlag) { if (0 == rowFlag) { - pstr += sprintf(pstr, "("); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); rowFlag++; } else { - pstr += sprintf(pstr, ", ("); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ("); } } else { - pstr += sprintf(pstr, "("); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); } } for (int col = 0; col < numFields; col++) { - if (col != 0) pstr += sprintf(pstr, ", "); + if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); if (row[col] == NULL) { - pstr += sprintf(pstr, "NULL"); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); continue; } switch (fields[col].type) { case TSDB_DATA_TYPE_BOOL: - pstr += sprintf(pstr, "%d", ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); break; case TSDB_DATA_TYPE_TINYINT: - pstr += sprintf(pstr, "%d", *((int8_t *)row[col])); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col])); break; case TSDB_DATA_TYPE_SMALLINT: - pstr += sprintf(pstr, "%d", *((int16_t *)row[col])); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col])); break; case TSDB_DATA_TYPE_INT: - pstr += sprintf(pstr, "%d", *((int32_t *)row[col])); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col])); break; case TSDB_DATA_TYPE_BIGINT: - pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col])); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *((int64_t *)row[col])); break; case TSDB_DATA_TYPE_FLOAT: - pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col])); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col])); break; case TSDB_DATA_TYPE_DOUBLE: - pstr += sprintf(pstr, "%f", GET_DOUBLE_VAL(row[col])); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col])); break; case TSDB_DATA_TYPE_BINARY: - *(pstr++) = '\''; + //*(pstr++) = '\''; converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - pstr = stpcpy(pstr, tbuf); - *(pstr++) = '\''; + //pstr = stpcpy(pstr, tbuf); + //*(pstr++) = '\''; + pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; case TSDB_DATA_TYPE_NCHAR: convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - pstr += sprintf(pstr, "\'%s\'", tbuf); + pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; case TSDB_DATA_TYPE_TIMESTAMP: if (!arguments->mysqlFlag) { - pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *(int64_t *)row[col]); } else { char buf[64] = "\0"; int64_t ts = *((int64_t *)row[col]); time_t tt = (time_t)(ts / 1000); struct tm *ptm = localtime(&tt); strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); - pstr += sprintf(pstr, "\'%s.%03d\'", buf, (int)(ts % 1000)); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'", buf, (int)(ts % 1000)); } break; default: @@ -1588,13 +1622,15 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* } } - pstr += sprintf(pstr, ") "); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") "); totalRows++; count++; fprintf(fp, "%s", tmpBuffer); - if (count >= arguments->data_batch) { + total_sqlstr_len += curr_sqlstr_len; + + if ((count >= arguments->data_batch) || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { fprintf(fp, ";\n"); count = 0; } //else { diff --git a/src/os/inc/osSocket.h b/src/os/inc/osSocket.h index cbfdedef48..baf7687dd0 100644 --- a/src/os/inc/osSocket.h +++ b/src/os/inc/osSocket.h @@ -37,7 +37,7 @@ extern "C" { #endif #ifndef TAOS_OS_DEF_EPOLL - #define TAOS_EPOLL_WAIT_TIME -1 + #define TAOS_EPOLL_WAIT_TIME 500 #endif #ifdef TAOS_RANDOM_NETWORK_FAIL diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 22397d0314..6b4b188c5e 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -152,7 +152,10 @@ typedef struct SQuery { SLimitVal limit; int32_t rowSize; SSqlGroupbyExpr* pGroupbyExpr; - SExprInfo* pSelectExpr; + SExprInfo* pExpr1; + SExprInfo* pExpr2; + int32_t numOfExpr2; + SColumnInfo* colList; SColumnInfo* tagColList; int32_t numOfFilterCols; diff --git a/src/query/inc/qHistogram.h b/src/query/inc/qHistogram.h index bb058449e8..442e61750b 100644 --- a/src/query/inc/qHistogram.h +++ b/src/query/inc/qHistogram.h @@ -43,7 +43,8 @@ typedef struct SHistogramInfo { int32_t numOfElems; int32_t numOfEntries; int32_t maxEntries; - + double min; + double max; #if defined(USE_ARRAYLIST) SHistBin* elems; #else @@ -52,9 +53,6 @@ typedef struct SHistogramInfo { int32_t maxIndex; bool ordered; #endif - - double min; - double max; } SHistogramInfo; SHistogramInfo* tHistogramCreate(int32_t numOfBins); diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 5d649261a6..93dca42fdc 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -48,7 +48,7 @@ static FORCE_INLINE SResultRow *getResultRow(SWindowResInfo *pWindowResInfo, int } #define curTimeWindowIndex(_winres) ((_winres)->curIndex) -#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1) +#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pExpr1[1].base.arg->argValue.i64:1) bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot); @@ -62,7 +62,7 @@ static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int3 int32_t realRowId = (int32_t)(pResult->rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery)); return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage + - pQuery->pSelectExpr[columnIndex].bytes * realRowId; + pQuery->pExpr1[columnIndex].bytes * realRowId; } bool isNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval); diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h index 84ca78d822..32cbb56c62 100644 --- a/src/query/inc/tsqlfunction.h +++ b/src/query/inc/tsqlfunction.h @@ -128,7 +128,7 @@ typedef struct SArithmeticSupport { SExprInfo *pArithExpr; int32_t numOfCols; SColumnInfo *colList; - SArray* exprList; // client side used + void *exprList; // client side used int32_t offset; char** data; } SArithmeticSupport; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 59622d9213..869f57f117 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -242,7 +242,7 @@ int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) { int64_t maxOutput = 0; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pSelectExpr[j].base.functionId; + int32_t functionId = pQuery->pExpr1[j].base.functionId; /* * ts, tag, tagprj function can not decide the output number of current query @@ -337,7 +337,7 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) { int32_t numOfSelectivity = 0; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functId = pQuery->pSelectExpr[i].base.functionId; + int32_t functId = pQuery->pExpr1[i].base.functionId; if (functId == TSDB_FUNC_TAG_DUMMY || functId == TSDB_FUNC_TS_DUMMY) { hasTags = true; continue; @@ -357,7 +357,7 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) { bool isProjQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functId = pQuery->pSelectExpr[i].base.functionId; + int32_t functId = pQuery->pExpr1[i].base.functionId; if (functId != TSDB_FUNC_PRJ && functId != TSDB_FUNC_TAGPRJ) { return false; } @@ -366,7 +366,7 @@ bool isProjQuery(SQuery *pQuery) { return true; } -bool isTSCompQuery(SQuery *pQuery) { return pQuery->pSelectExpr[0].base.functionId == TSDB_FUNC_TS_COMP; } +bool isTSCompQuery(SQuery *pQuery) { return pQuery->pExpr1[0].base.functionId == TSDB_FUNC_TS_COMP; } static bool limitResults(SQueryRuntimeEnv* pRuntimeEnv) { SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); @@ -387,7 +387,7 @@ static bool limitResults(SQueryRuntimeEnv* pRuntimeEnv) { static bool isTopBottomQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS) { continue; } @@ -401,12 +401,12 @@ static bool isTopBottomQuery(SQuery *pQuery) { } static bool hasTagValOutput(SQuery* pQuery) { - SExprInfo *pExprInfo = &pQuery->pSelectExpr[0]; + SExprInfo *pExprInfo = &pQuery->pExpr1[0]; if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) { return true; } else { // set tag value, by which the results are aggregated. for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) { - SExprInfo *pLocalExprInfo = &pQuery->pSelectExpr[idx]; + SExprInfo *pLocalExprInfo = &pQuery->pExpr1[idx]; // ts_comp column required the tag value for join filter if (TSDB_COL_IS_TAG(pLocalExprInfo->base.colInfo.flag)) { @@ -784,7 +784,7 @@ static void doBlockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, bool closed pCtx[k].size = forwardStep; pCtx[k].startOffset = (QUERY_IS_ASC_QUERY(pQuery)) ? offset : offset - (forwardStep - 1); - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + int32_t functionId = pQuery->pExpr1[k].base.functionId; if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) { pCtx[k].ptsList = &tsCol[pCtx[k].startOffset]; } @@ -813,7 +813,7 @@ static void doRowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, bool closed, for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { pCtx[k].nStartQueryTimestamp = pWin->skey; - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { aAggs[functionId].xFunctionF(&pCtx[k], offset); } @@ -922,9 +922,9 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas char *dataBlock = NULL; SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t functionId = pQuery->pSelectExpr[col].base.functionId; + int32_t functionId = pQuery->pExpr1[col].base.functionId; if (functionId == TSDB_FUNC_ARITHM) { - sas->pArithExpr = &pQuery->pSelectExpr[col]; + sas->pArithExpr = &pQuery->pExpr1[col]; sas->offset = 0; sas->colList = pQuery->colList; @@ -954,9 +954,9 @@ static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas } } else { // other type of query function - SColIndex *pCol = &pQuery->pSelectExpr[col].base.colInfo; + SColIndex *pCol = &pQuery->pExpr1[col].base.colInfo; if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) { - SColIndex* pColIndex = &pQuery->pSelectExpr[col].base.colInfo; + SColIndex* pColIndex = &pQuery->pExpr1[col].base.colInfo; SColumnInfoData *p = taosArrayGet(pDataBlock, pColIndex->colIndex); assert(p->info.colId == pColIndex->colId); @@ -1067,7 +1067,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * * tag_prj function are changed to be TSDB_FUNC_TAG_DUMMY */ for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { aAggs[functionId].xFunction(&pCtx[k]); } @@ -1075,7 +1075,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * } for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - if (pQuery->pSelectExpr[i].base.functionId != TSDB_FUNC_ARITHM) { + if (pQuery->pExpr1[i].base.functionId != TSDB_FUNC_ARITHM) { continue; } @@ -1375,7 +1375,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - int32_t functionId = pQuery->pSelectExpr[k].base.functionId; + int32_t functionId = pQuery->pExpr1[k].base.functionId; if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { aAggs[functionId].xFunctionF(&pCtx[k], offset); } @@ -1404,7 +1404,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS // todo refactor: extract method for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - if (pQuery->pSelectExpr[i].base.functionId != TSDB_FUNC_ARITHM) { + if (pQuery->pExpr1[i].base.functionId != TSDB_FUNC_ARITHM) { continue; } @@ -1464,11 +1464,11 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY *tsCol, SDataBlockInfo* pBlockInfo, SDataStatis *pStatis, void *param, int32_t colIndex, int32_t vgId) { - int32_t functionId = pQuery->pSelectExpr[colIndex].base.functionId; - int32_t colId = pQuery->pSelectExpr[colIndex].base.colInfo.colId; + int32_t functionId = pQuery->pExpr1[colIndex].base.functionId; + int32_t colId = pQuery->pExpr1[colIndex].base.colInfo.colId; SDataStatis *tpField = NULL; - pCtx->hasNull = hasNullValue(&pQuery->pSelectExpr[colIndex].base.colInfo, pStatis, &tpField); + pCtx->hasNull = hasNullValue(&pQuery->pExpr1[colIndex].base.colInfo, pStatis, &tpField); pCtx->aInputElemBuf = inputData; if (tpField != NULL) { @@ -1501,7 +1501,7 @@ void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData, TSKEY functionId == TSDB_FUNC_DIFF || (functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) { /* * least squares function needs two columns of input, currently, the x value of linear equation is set to - * timestamp column, and the y-value is the column specified in pQuery->pSelectExpr[i].colIdxInBuffer + * timestamp column, and the y-value is the column specified in pQuery->pExpr1[i].colIdxInBuffer * * top/bottom function needs timestamp to indicate when the * top/bottom values emerge, so does diff function @@ -1574,7 +1574,7 @@ static int32_t setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg *pSqlFuncMsg = &pQuery->pExpr1[i].base; if (pSqlFuncMsg->functionId == TSDB_FUNC_TAG_DUMMY || pSqlFuncMsg->functionId == TSDB_FUNC_TS_DUMMY) { tagLen += pCtx[i].outputBytes; @@ -1615,7 +1615,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order pRuntimeEnv->offset[0] = 0; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pSqlFuncMsg = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg *pSqlFuncMsg = &pQuery->pExpr1[i].base; SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; SColIndex* pIndex = &pSqlFuncMsg->colInfo; @@ -1649,13 +1649,13 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order assert(isValidDataType(pCtx->inputType)); pCtx->ptsOutputBuf = NULL; - pCtx->outputBytes = pQuery->pSelectExpr[i].bytes; - pCtx->outputType = pQuery->pSelectExpr[i].type; + pCtx->outputBytes = pQuery->pExpr1[i].bytes; + pCtx->outputType = pQuery->pExpr1[i].type; pCtx->order = pQuery->order.order; pCtx->functionId = pSqlFuncMsg->functionId; pCtx->stableQuery = pRuntimeEnv->stableQuery; - pCtx->interBufBytes = pQuery->pSelectExpr[i].interBytes; + pCtx->interBufBytes = pQuery->pExpr1[i].interBytes; pCtx->numOfParams = pSqlFuncMsg->numOfParams; for (int32_t j = 0; j < pCtx->numOfParams; ++j) { @@ -1672,7 +1672,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order int32_t functionId = pCtx->functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { - int32_t f = pQuery->pSelectExpr[0].base.functionId; + int32_t f = pQuery->pExpr1[0].base.functionId; assert(f == TSDB_FUNC_TS || f == TSDB_FUNC_TS_DUMMY); pCtx->param[2].i64Key = order; @@ -1685,7 +1685,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order if (i > 0) { pRuntimeEnv->offset[i] = pRuntimeEnv->offset[i - 1] + pRuntimeEnv->pCtx[i - 1].outputBytes; - pRuntimeEnv->rowCellInfoOffset[i] = pRuntimeEnv->rowCellInfoOffset[i - 1] + sizeof(SResultRowCellInfo) + pQuery->pSelectExpr[i - 1].interBytes; + pRuntimeEnv->rowCellInfoOffset[i] = pRuntimeEnv->rowCellInfoOffset[i - 1] + sizeof(SResultRowCellInfo) + pQuery->pExpr1[i - 1].interBytes; } } @@ -1779,7 +1779,7 @@ static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pExprMsg = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg *pExprMsg = &pQuery->pExpr1[i].base; // ignore the ts_comp function if (i == 0 && pExprMsg->functionId == TSDB_FUNC_PRJ && pExprMsg->numOfParams == 1 && @@ -1802,7 +1802,7 @@ static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { // todo refactor with isLastRowQuery static bool isPointInterpoQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionID = pQuery->pSelectExpr[i].base.functionId; + int32_t functionID = pQuery->pExpr1[i].base.functionId; if (functionID == TSDB_FUNC_INTERP) { return true; } @@ -1814,7 +1814,7 @@ static bool isPointInterpoQuery(SQuery *pQuery) { // TODO REFACTOR:MERGE WITH CLIENT-SIDE FUNCTION static bool isSumAvgRateQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS) { continue; } @@ -1830,7 +1830,7 @@ static bool isSumAvgRateQuery(SQuery *pQuery) { static bool isFirstLastRowQuery(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionID = pQuery->pSelectExpr[i].base.functionId; + int32_t functionID = pQuery->pExpr1[i].base.functionId; if (functionID == TSDB_FUNC_LAST_ROW) { return true; } @@ -1841,7 +1841,7 @@ static bool isFirstLastRowQuery(SQuery *pQuery) { static bool needReverseScan(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG) { continue; } @@ -1852,7 +1852,7 @@ static bool needReverseScan(SQuery *pQuery) { if (functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) { // the scan order to acquire the last result of the specified column - int32_t order = (int32_t)pQuery->pSelectExpr[i].base.arg->argValue.i64; + int32_t order = (int32_t)pQuery->pExpr1[i].base.arg->argValue.i64; if (order != pQuery->order.order) { return true; } @@ -1868,7 +1868,7 @@ static bool needReverseScan(SQuery *pQuery) { */ static bool onlyQueryTags(SQuery* pQuery) { for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SExprInfo* pExprInfo = &pQuery->pSelectExpr[i]; + SExprInfo* pExprInfo = &pQuery->pExpr1[i]; int32_t functionId = pExprInfo->base.functionId; @@ -1911,7 +1911,7 @@ static void setScanLimitationByResultBuffer(SQuery *pQuery) { } else { bool hasMultioutput = false; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg *pExprMsg = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg *pExprMsg = &pQuery->pExpr1[i].base; if (pExprMsg->functionId == TSDB_FUNC_TS || pExprMsg->functionId == TSDB_FUNC_TS_DUMMY) { continue; } @@ -1945,7 +1945,7 @@ bool colIdCheck(SQuery *pQuery) { // the scan order is not matter static bool onlyOneQueryType(SQuery *pQuery, int32_t functId, int32_t functIdDst) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAG_DUMMY) { @@ -2175,7 +2175,7 @@ static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDat if (pRuntimeEnv->topBotQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) { return topbot_datablock_filter(&pCtx[i], functionId, (char *)&pDataStatis[i].min, (char *)&pDataStatis[i].max); } @@ -2266,7 +2266,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, SWindowResInfo * pW } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base; + SSqlFuncMsg* pSqlFunc = &pQuery->pExpr1[i].base; int32_t functionId = pSqlFunc->functionId; int32_t colId = pSqlFunc->colInfo.colId; @@ -2390,7 +2390,7 @@ static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capa } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t bytes = pQuery->pSelectExpr[i].bytes; + int32_t bytes = pQuery->pExpr1[i].bytes; assert(bytes > 0 && capacity > 0); char *tmp = realloc(pQuery->sdata[i], bytes * capacity + sizeof(tFilePage)); @@ -2421,7 +2421,7 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB int32_t newSize = (int32_t)(pRec->capacity + (pBlockInfo->rows - remain)); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t bytes = pQuery->pSelectExpr[i].bytes; + int32_t bytes = pQuery->pExpr1[i].bytes; assert(bytes > 0 && newSize > 0); char *tmp = realloc(pQuery->sdata[i], bytes * newSize + sizeof(tFilePage)); @@ -2435,7 +2435,7 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB // set the pCtx output buffer position pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data + pRec->rows * bytes; - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; } @@ -2599,7 +2599,7 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { SQuery *pQuery = pRuntimeEnv->pQuery; SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); - SExprInfo *pExprInfo = &pQuery->pSelectExpr[0]; + SExprInfo *pExprInfo = &pQuery->pExpr1[0]; if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) { assert(pExprInfo->base.numOfParams == 1); @@ -2610,7 +2610,7 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { } else { // set tag value, by which the results are aggregated. for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) { - SExprInfo* pLocalExprInfo = &pQuery->pSelectExpr[idx]; + SExprInfo* pLocalExprInfo = &pQuery->pExpr1[idx]; // ts_comp column required the tag value for join filter if (!TSDB_COL_IS_TAG(pLocalExprInfo->base.colInfo.flag)) { @@ -2652,7 +2652,7 @@ static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, SResultRow tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pageId); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (!mergeFlag) { pCtx[i].aOutputBuf = pCtx[i].aOutputBuf + pCtx[i].outputBytes; pCtx[i].currentStage = FIRST_STAGE_MERGE; @@ -2680,7 +2680,7 @@ static void doMerge(SQueryRuntimeEnv *pRuntimeEnv, int64_t timestamp, SResultRow } for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TAG_DUMMY) { continue; } @@ -2766,25 +2766,25 @@ void UNUSED_FUNC displayInterResult(tFilePage **pdata, SQueryRuntimeEnv* pRuntim for (int32_t j = 0; j < numOfRows; ++j) { for (int32_t i = 0; i < numOfCols; ++i) { - switch (pQuery->pSelectExpr[i].type) { + switch (pQuery->pExpr1[i].type) { case TSDB_DATA_TYPE_BINARY: { - int32_t type = pQuery->pSelectExpr[i].type; - printBinaryData(pQuery->pSelectExpr[i].base.functionId, pdata[i]->data + pQuery->pSelectExpr[i].bytes * j, + int32_t type = pQuery->pExpr1[i].type; + printBinaryData(pQuery->pExpr1[i].base.functionId, pdata[i]->data + pQuery->pExpr1[i].bytes * j, type); break; } case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%" PRId64 "\t", *(int64_t *)(pdata[i]->data + pQuery->pSelectExpr[i].bytes * j)); + printf("%" PRId64 "\t", *(int64_t *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); break; case TSDB_DATA_TYPE_INT: - printf("%d\t", *(int32_t *)(pdata[i]->data + pQuery->pSelectExpr[i].bytes * j)); + printf("%d\t", *(int32_t *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); break; case TSDB_DATA_TYPE_FLOAT: - printf("%f\t", *(float *)(pdata[i]->data + pQuery->pSelectExpr[i].bytes * j)); + printf("%f\t", *(float *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); break; case TSDB_DATA_TYPE_DOUBLE: - printf("%lf\t", *(double *)(pdata[i]->data + pQuery->pSelectExpr[i].bytes * j)); + printf("%lf\t", *(double *)(pdata[i]->data + pQuery->pExpr1[i].bytes * j)); break; } } @@ -2951,7 +2951,7 @@ int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResu SQuery* pQuery = pRuntimeEnv->pQuery; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pSelectExpr[j].base.functionId; + int32_t functionId = pQuery->pExpr1[j].base.functionId; /* * ts, tag, tagprj function can not decide the output number of current query @@ -3236,7 +3236,7 @@ static void disableFuncInReverseScanImpl(SQueryRuntimeEnv* pRuntimeEnv, SWindowR // open/close the specified query for each group result for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functId = pQuery->pSelectExpr[j].base.functionId; + int32_t functId = pQuery->pExpr1[j].base.functionId; SResultRowCellInfo* pInfo = getResultCell(pRuntimeEnv, pRow, j); if (((functId == TSDB_FUNC_FIRST || functId == TSDB_FUNC_FIRST_DST) && order == TSDB_ORDER_ASC) || @@ -3260,7 +3260,7 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { disableFuncInReverseScanImpl(pRuntimeEnv, pWindowResInfo, order); } else { // for simple result of table query, for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { // todo refactor - int32_t functId = pQuery->pSelectExpr[j].base.functionId; + int32_t functId = pQuery->pExpr1[j].base.functionId; SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[j]; if (pCtx->resultInfo == NULL) { @@ -3331,12 +3331,12 @@ void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { pCtx->resultInfo = pCellInfo; // set the timestamp output buffer for top/bottom/diff query - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; } - memset(pQuery->sdata[i]->data, 0, (size_t)(pQuery->pSelectExpr[i].bytes * pQuery->rec.capacity)); + memset(pQuery->sdata[i]->data, 0, (size_t)(pQuery->pExpr1[i].bytes * pQuery->rec.capacity)); } initCtxOutputBuf(pRuntimeEnv); @@ -3347,7 +3347,7 @@ void forwardCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, int64_t output) { // reset the execution contexts for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pSelectExpr[j].base.functionId; + int32_t functionId = pQuery->pExpr1[j].base.functionId; assert(functionId != TSDB_FUNC_DIFF); // set next output position @@ -3374,7 +3374,7 @@ void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int32_t functionId = pQuery->pSelectExpr[j].base.functionId; + int32_t functionId = pQuery->pExpr1[j].base.functionId; pRuntimeEnv->pCtx[j].currentStage = 0; SResultRowCellInfo* pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]); @@ -3412,7 +3412,7 @@ void skipResults(SQueryRuntimeEnv *pRuntimeEnv) { 0, pQuery->rec.rows); for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes; memmove(pQuery->sdata[i]->data, (char*)pQuery->sdata[i]->data + bytes * numOfSkip, (size_t)(pQuery->rec.rows * bytes)); @@ -3454,7 +3454,7 @@ bool needScanDataBlocksAgain(SQueryRuntimeEnv *pRuntimeEnv) { setResultOutputBuf(pRuntimeEnv, pResult); for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int16_t functId = pQuery->pSelectExpr[j].base.functionId; + int16_t functId = pQuery->pExpr1[j].base.functionId; if (functId == TSDB_FUNC_TS) { continue; } @@ -3467,7 +3467,7 @@ bool needScanDataBlocksAgain(SQueryRuntimeEnv *pRuntimeEnv) { } } else { for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - int16_t functId = pQuery->pSelectExpr[j].base.functionId; + int16_t functId = pQuery->pExpr1[j].base.functionId; if (functId == TSDB_FUNC_TS) { continue; } @@ -3680,7 +3680,7 @@ void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { setResultOutputBuf(pRuntimeEnv, buf); for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - aAggs[pQuery->pSelectExpr[j].base.functionId].xFinalize(&pRuntimeEnv->pCtx[j]); + aAggs[pQuery->pExpr1[j].base.functionId].xFinalize(&pRuntimeEnv->pCtx[j]); } /* @@ -3692,14 +3692,14 @@ void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { } else { for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { - aAggs[pQuery->pSelectExpr[j].base.functionId].xFinalize(&pRuntimeEnv->pCtx[j]); + aAggs[pQuery->pExpr1[j].base.functionId].xFinalize(&pRuntimeEnv->pCtx[j]); } } } static bool hasMainOutput(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TAGPRJ) { return true; @@ -3798,7 +3798,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; pCtx->aOutputBuf = getPosInResultPage(pRuntimeEnv, i, pResult, page); - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { pCtx->ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf; } @@ -3941,7 +3941,7 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) { bool requireTimestamp(SQuery *pQuery) { for (int32_t i = 0; i < pQuery->numOfOutput; i++) { - int32_t functionId = pQuery->pSelectExpr[i].base.functionId; + int32_t functionId = pQuery->pExpr1[i].base.functionId; if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_NEED_TS) != 0) { return true; } @@ -4130,14 +4130,27 @@ bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) { return false; } +static int16_t getNumOfFinalResCol(SQuery* pQuery) { + return pQuery->pExpr2 == NULL? pQuery->numOfOutput:pQuery->numOfExpr2; +} + static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data) { SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { - int32_t bytes = pQuery->pSelectExpr[col].bytes; + if (pQuery->pExpr2 == NULL) { + for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { + int32_t bytes = pQuery->pExpr1[col].bytes; - memmove(data, pQuery->sdata[col]->data, bytes * numOfRows); - data += bytes * numOfRows; + memmove(data, pQuery->sdata[col]->data, bytes * numOfRows); + data += bytes * numOfRows; + } + } else { + for (int32_t col = 0; col < pQuery->numOfExpr2; ++col) { + int32_t bytes = pQuery->pExpr2[col].bytes; + + memmove(data, pQuery->sdata[col]->data, bytes * numOfRows); + data += bytes * numOfRows; + } } int32_t numOfTables = (int32_t)taosArrayGetSize(pQInfo->arrTableIdInfo); @@ -4187,10 +4200,9 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int ret -= (int32_t)pQuery->limit.offset; // todo !!!!there exactly number of interpo is not valid. - // todo refactor move to the beginning of buffer for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - memmove(pDst[i]->data, pDst[i]->data + pQuery->pSelectExpr[i].bytes * pQuery->limit.offset, - ret * pQuery->pSelectExpr[i].bytes); + memmove(pDst[i]->data, pDst[i]->data + pQuery->pExpr1[i].bytes * pQuery->limit.offset, + ret * pQuery->pExpr1[i].bytes); } pQuery->limit.offset = 0; @@ -4310,6 +4322,56 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } } +static TSKEY doSkipIntervalProcess(SQueryRuntimeEnv* pRuntimeEnv, STimeWindow* win, SDataBlockInfo* pBlockInfo, STableQueryInfo* pTableQueryInfo) { + SQuery *pQuery = pRuntimeEnv->pQuery; + SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + + assert(pQuery->limit.offset == 0); + STimeWindow tw = *win; + getNextTimeWindow(pQuery, &tw); + + if ((tw.skey <= pBlockInfo->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (tw.ekey >= pBlockInfo->window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { + + // load the data block and check data remaining in current data block + // TODO optimize performance + SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); + SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); + + tw = *win; + int32_t startPos = + getNextQualifiedWindow(pRuntimeEnv, &tw, pBlockInfo, pColInfoData->pData, binarySearchForKey, -1); + assert(startPos >= 0); + + // set the abort info + pQuery->pos = startPos; + + // reset the query start timestamp + pTableQueryInfo->win.skey = ((TSKEY *)pColInfoData->pData)[startPos]; + pQuery->window.skey = pTableQueryInfo->win.skey; + TSKEY key = pTableQueryInfo->win.skey; + + pWindowResInfo->prevSKey = tw.skey; + int32_t index = pRuntimeEnv->windowResInfo.curIndex; + + int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); + pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index + + qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64, + GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, + pQuery->current->lastKey); + + return key; + } else { // do nothing + pQuery->window.skey = tw.skey; + pWindowResInfo->prevSKey = tw.skey; + + return tw.skey; + } + + return true; +} + static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { SQuery *pQuery = pRuntimeEnv->pQuery; *start = pQuery->current->lastKey; @@ -4358,49 +4420,13 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { (win.ekey >= blockInfo.window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { pQuery->limit.offset -= 1; pWindowResInfo->prevSKey = win.skey; - - getNextTimeWindow(pQuery, &tw); - } else { // current window does not ended in current data block, try next data block - getNextTimeWindow(pQuery, &tw); } + // current window does not ended in current data block, try next data block + getNextTimeWindow(pQuery, &tw); if (pQuery->limit.offset == 0) { - if ((tw.skey <= blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (tw.ekey >= blockInfo.window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { - // load the data block and check data remaining in current data block - // TODO optimize performance - SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); - SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); - - tw = win; - int32_t startPos = - getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); - assert(startPos >= 0); - - // set the abort info - pQuery->pos = startPos; - - // reset the query start timestamp - pTableQueryInfo->win.skey = ((TSKEY *)pColInfoData->pData)[startPos]; - pQuery->window.skey = pTableQueryInfo->win.skey; - *start = pTableQueryInfo->win.skey; - - pWindowResInfo->prevSKey = tw.skey; - int32_t index = pRuntimeEnv->windowResInfo.curIndex; - - int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, NULL, binarySearchForKey, pDataBlock); - pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index - - qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, - GET_QINFO_ADDR(pRuntimeEnv), blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey); - - return true; - } else { // do nothing - *start = tw.skey; - pQuery->window.skey = tw.skey; - pWindowResInfo->prevSKey = tw.skey; - return true; - } + *start = doSkipIntervalProcess(pRuntimeEnv, &win, &blockInfo, pTableQueryInfo); + return true; } /* @@ -4421,42 +4447,8 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { } if (pQuery->limit.offset == 0) { - if ((tw.skey <= blockInfo.window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (tw.ekey >= blockInfo.window.skey && !QUERY_IS_ASC_QUERY(pQuery))) { - // load the data block and check data remaining in current data block - // TODO optimize performance - SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); - SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0); - - tw = win; - int32_t startPos = - getNextQualifiedWindow(pRuntimeEnv, &tw, &blockInfo, pColInfoData->pData, binarySearchForKey, -1); - assert(startPos >= 0); - - // set the abort info - pQuery->pos = startPos; - - // reset the query start timestamp - pTableQueryInfo->win.skey = ((TSKEY *)pColInfoData->pData)[startPos]; - pQuery->window.skey = pTableQueryInfo->win.skey; - *start = pTableQueryInfo->win.skey; - - pWindowResInfo->prevSKey = tw.skey; - int32_t index = pRuntimeEnv->windowResInfo.curIndex; - - int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, NULL, binarySearchForKey, pDataBlock); - pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index - - qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, - GET_QINFO_ADDR(pRuntimeEnv), blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey); - - return true; - } else { // do nothing - *start = tw.skey; - pQuery->window.skey = tw.skey; - pWindowResInfo->prevSKey = tw.skey; - return true; - } + *start = doSkipIntervalProcess(pRuntimeEnv, &win, &blockInfo, pTableQueryInfo); + return true; } else { tw = win; int32_t startPos = @@ -4549,8 +4541,8 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) return terrno; } -static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) { - int32_t numOfCols = pQuery->numOfOutput; +static SFillColInfo* createFillColInfo(SQuery* pQuery) { + int32_t numOfCols = getNumOfFinalResCol(pQuery); int32_t offset = 0; SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo)); @@ -4558,8 +4550,9 @@ static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) { return NULL; } + // TODO refactor for(int32_t i = 0; i < numOfCols; ++i) { - SExprInfo* pExprInfo = &pQuery->pSelectExpr[i]; + SExprInfo* pExprInfo = (pQuery->pExpr2 == NULL)? &pQuery->pExpr1[i]:&pQuery->pExpr2[i]; pFillCol[i].col.bytes = pExprInfo->bytes; pFillCol[i].col.type = (int8_t)pExprInfo->type; @@ -4664,14 +4657,15 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo } if (pQuery->fillType != TSDB_FILL_NONE && !isPointInterpoQuery(pQuery)) { - SFillColInfo* pColInfo = taosCreateFillColInfo(pQuery); + SFillColInfo* pColInfo = createFillColInfo(pQuery); STimeWindow w = TSWINDOW_INITIALIZER; TSKEY sk = MIN(pQuery->window.skey, pQuery->window.ekey); TSKEY ek = MAX(pQuery->window.skey, pQuery->window.ekey); getAlignQueryTimeWindow(pQuery, pQuery->window.skey, sk, ek, &w); - pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, pQuery->numOfOutput, + int32_t numOfCols = getNumOfFinalResCol(pQuery); + pRuntimeEnv->pFillInfo = taosInitFillInfo(pQuery->order.order, w.skey, 0, (int32_t)pQuery->rec.capacity, numOfCols, pQuery->interval.sliding, pQuery->interval.slidingUnit, (int8_t)pQuery->precision, pQuery->fillType, pColInfo, pQInfo); } @@ -5324,6 +5318,74 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { qDebug("QInfo:%p points returned:%" PRId64 ", total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows); } + +static char *getArithemicInputSrc(void *param, const char *name, int32_t colId) { + SArithmeticSupport *pSupport = (SArithmeticSupport *) param; + SExprInfo* pExprInfo = (SExprInfo*) pSupport->exprList; + + int32_t index = -1; + for (int32_t i = 0; i < pSupport->numOfCols; ++i) { + if (colId == pExprInfo[i].base.resColId) { + index = i; + break; + } + } + + assert(index >= 0 && index < pSupport->numOfCols); + return pSupport->data[index] + pSupport->offset * pExprInfo[index].bytes; +} + +static void doSecondaryArithmeticProcess(SQuery* pQuery) { + SArithmeticSupport arithSup = {0}; + + tFilePage **data = calloc(pQuery->numOfExpr2, POINTER_BYTES); + for (int32_t i = 0; i < pQuery->numOfExpr2; ++i) { + int32_t bytes = pQuery->pExpr2[i].bytes; + data[i] = (tFilePage *)malloc(bytes * pQuery->rec.rows + sizeof(tFilePage)); + } + + arithSup.offset = 0; + arithSup.numOfCols = (int32_t)pQuery->numOfOutput; + arithSup.exprList = pQuery->pExpr1; + arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES); + + for (int32_t k = 0; k < arithSup.numOfCols; ++k) { + arithSup.data[k] = pQuery->sdata[k]->data; + } + + for (int i = 0; i < pQuery->numOfExpr2; ++i) { + SExprInfo *pExpr = &pQuery->pExpr2[i]; + + // calculate the result from several other columns + SSqlFuncMsg* pSqlFunc = &pExpr->base; + if (pSqlFunc->functionId != TSDB_FUNC_ARITHM) { + + for (int32_t j = 0; j < pQuery->numOfOutput; ++j) { + if (pSqlFunc->functionId == pQuery->pExpr1[j].base.functionId && + pSqlFunc->colInfo.colId == pQuery->pExpr1[j].base.colInfo.colId) { + memcpy(data[i]->data, pQuery->sdata[j]->data, pQuery->pExpr1[j].bytes * pQuery->rec.rows); + break; + } + } + } else { + arithSup.pArithExpr = pExpr; + tExprTreeCalcTraverse(arithSup.pArithExpr->pExpr, (int32_t)pQuery->rec.rows, data[i]->data, &arithSup, TSDB_ORDER_ASC, + getArithemicInputSrc); + } + } + + for (int32_t i = 0; i < pQuery->numOfExpr2; ++i) { + memcpy(pQuery->sdata[i]->data, data[i]->data, pQuery->pExpr2[i].bytes * pQuery->rec.rows); + } + + for (int32_t i = 0; i < pQuery->numOfExpr2; ++i) { + tfree(data[i]); + } + + tfree(data); + tfree(arithSup.data); +} + /* * in each query, this function will be called only once, no retry for further result. * @@ -5343,13 +5405,14 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) scanOneTableDataBlocks(pRuntimeEnv, pTableInfo->lastKey); finalizeQueryResult(pRuntimeEnv); + // since the numOfRows must be identical for all sql functions that are allowed to be executed simutaneously. + pQuery->rec.rows = getNumOfResult(pRuntimeEnv); + doSecondaryArithmeticProcess(pQuery); + if (IS_QUERY_KILLED(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } - // since the numOfRows must be identical for all sql functions that are allowed to be executed simutaneously. - pQuery->rec.rows = getNumOfResult(pRuntimeEnv); - skipResults(pRuntimeEnv); limitResults(pRuntimeEnv); } @@ -5469,8 +5532,15 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { clearFirstNTimeWindow(pRuntimeEnv, pQInfo->groupIndex); } + // no result generated, abort + if (pQuery->rec.rows == 0) { + break; + } + + doSecondaryArithmeticProcess(pQuery); + // the offset is handled at prepare stage if no interpolation involved - if (pQuery->fillType == TSDB_FILL_NONE || pQuery->rec.rows == 0) { + if (pQuery->fillType == TSDB_FILL_NONE) { limitResults(pRuntimeEnv); break; } else { @@ -5693,7 +5763,7 @@ static char *createTableIdList(SQueryTableMsg *pQueryMsg, char *pMsg, SArray **p * @param pExpr * @return */ -static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, SSqlFuncMsg ***pExpr, +static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, SSqlFuncMsg ***pExpr, SSqlFuncMsg ***pSecStageExpr, char **tagCond, char** tbnameCond, SColIndex **groupbyCols, SColumnInfo** tagCols) { int32_t code = TSDB_CODE_SUCCESS; @@ -5724,6 +5794,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pQueryMsg->tsNumOfBlocks = htonl(pQueryMsg->tsNumOfBlocks); pQueryMsg->tsOrder = htonl(pQueryMsg->tsOrder); pQueryMsg->numOfTags = htonl(pQueryMsg->numOfTags); + pQueryMsg->secondStageOutput = htonl(pQueryMsg->secondStageOutput); // query msg safety check if (!validateQueryMsg(pQueryMsg)) { @@ -5793,9 +5864,10 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex); pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId); - pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag); - pExprMsg->functionId = htons(pExprMsg->functionId); - pExprMsg->numOfParams = htons(pExprMsg->numOfParams); + pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag); + pExprMsg->functionId = htons(pExprMsg->functionId); + pExprMsg->numOfParams = htons(pExprMsg->numOfParams); + pExprMsg->resColId = htons(pExprMsg->resColId); pMsg += sizeof(SSqlFuncMsg); @@ -5831,6 +5903,49 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, goto _cleanup; } + if (pQueryMsg->secondStageOutput) { + pExprMsg = (SSqlFuncMsg *)pMsg; + *pSecStageExpr = calloc(pQueryMsg->secondStageOutput, POINTER_BYTES); + + for (int32_t i = 0; i < pQueryMsg->secondStageOutput; ++i) { + (*pSecStageExpr)[i] = pExprMsg; + + pExprMsg->colInfo.colIndex = htons(pExprMsg->colInfo.colIndex); + pExprMsg->colInfo.colId = htons(pExprMsg->colInfo.colId); + pExprMsg->colInfo.flag = htons(pExprMsg->colInfo.flag); + pExprMsg->functionId = htons(pExprMsg->functionId); + pExprMsg->numOfParams = htons(pExprMsg->numOfParams); + + pMsg += sizeof(SSqlFuncMsg); + + for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { + pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType); + pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes); + + if (pExprMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) { + pExprMsg->arg[j].argValue.pz = pMsg; + pMsg += pExprMsg->arg[j].argBytes; // one more for the string terminated char. + } else { + pExprMsg->arg[j].argValue.i64 = htobe64(pExprMsg->arg[j].argValue.i64); + } + } + + int16_t functionId = pExprMsg->functionId; + if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG_DUMMY) { + if (!TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { // ignore the column index check for arithmetic expression. + code = TSDB_CODE_QRY_INVALID_MSG; + goto _cleanup; + } + } else { +// if (!validateExprColumnInfo(pQueryMsg, pExprMsg)) { +// return TSDB_CODE_QRY_INVALID_MSG; +// } + } + + pExprMsg = (SSqlFuncMsg *)pMsg; + } + } + pMsg = createTableIdList(pQueryMsg, pMsg, pTableIdList); if (pQueryMsg->numOfGroupCols > 0) { // group by tag columns @@ -5936,8 +6051,8 @@ _cleanup: return code; } -static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) { - qDebug("qmsg:%p create arithmetic expr from binary string: %s", pQueryMsg, pArithExprInfo->base.arg[0].argValue.pz); +static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) { + qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg); tExprNode* pExprNode = NULL; TRY(TSDB_MAX_TAG_CONDITIONS) { @@ -5957,7 +6072,7 @@ static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTable return TSDB_CODE_SUCCESS; } -static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, +static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg, SColumnInfo* pTagCols) { *pExprInfo = NULL; int32_t code = TSDB_CODE_SUCCESS; @@ -5970,7 +6085,7 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo * bool isSuperTable = QUERY_IS_STABLE_QUERY(pQueryMsg->queryType); int16_t tagLen = 0; - for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) { + for (int32_t i = 0; i < numOfOutput; ++i) { pExprs[i].base = *pExprMsg[i]; pExprs[i].bytes = 0; @@ -5979,7 +6094,7 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo * // parse the arithmetic expression if (pExprs[i].base.functionId == TSDB_FUNC_ARITHM) { - code = buildAirthmeticExprFromMsg(&pExprs[i], pQueryMsg); + code = buildArithmeticExprFromMsg(&pExprs[i], pQueryMsg); if (code != TSDB_CODE_SUCCESS) { tfree(pExprs); @@ -6032,7 +6147,7 @@ static int32_t createQFunctionExprFromMsg(SQueryTableMsg *pQueryMsg, SExprInfo * } // TODO refactor - for (int32_t i = 0; i < pQueryMsg->numOfOutput; ++i) { + for (int32_t i = 0; i < numOfOutput; ++i) { pExprs[i].base = *pExprMsg[i]; int16_t functId = pExprs[i].base.functionId; @@ -6172,10 +6287,10 @@ static int32_t createFilterInfo(void *pQInfo, SQuery *pQuery) { } static void doUpdateExprColumnIndex(SQuery *pQuery) { - assert(pQuery->pSelectExpr != NULL && pQuery != NULL); + assert(pQuery->pExpr1 != NULL && pQuery != NULL); for (int32_t k = 0; k < pQuery->numOfOutput; ++k) { - SSqlFuncMsg *pSqlExprMsg = &pQuery->pSelectExpr[k].base; + SSqlFuncMsg *pSqlExprMsg = &pQuery->pExpr1[k].base; if (pSqlExprMsg->functionId == TSDB_FUNC_ARITHM) { continue; } @@ -6230,7 +6345,7 @@ static void calResultBufSize(SQuery* pQuery) { } static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, - STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery) { + SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery) { int16_t numOfCols = pQueryMsg->numOfCols; int16_t numOfOutput = pQueryMsg->numOfOutput; @@ -6256,7 +6371,9 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou pQuery->limit.offset = pQueryMsg->offset; pQuery->order.order = pQueryMsg->order; pQuery->order.orderColId = pQueryMsg->orderColId; - pQuery->pSelectExpr = pExprs; + pQuery->pExpr1 = pExprs; + pQuery->pExpr2 = pSecExprs; + pQuery->numOfExpr2 = pQueryMsg->secondStageOutput; pQuery->pGroupbyExpr = pGroupbyExpr; memcpy(&pQuery->interval, &pQueryMsg->interval, sizeof(pQuery->interval)); pQuery->fillType = pQueryMsg->fillType; @@ -6296,7 +6413,15 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou for (int32_t col = 0; col < pQuery->numOfOutput; ++col) { // allocate additional memory for interResults that are usually larger then final results - size_t size = (size_t)((pQuery->rec.capacity + 1) * pExprs[col].bytes + pExprs[col].interBytes + sizeof(tFilePage)); + // TODO refactor + int16_t bytes = 0; + if (pQuery->pExpr2 == NULL || col > pQuery->numOfExpr2) { + bytes = pExprs[col].bytes; + } else { + bytes = MAX(pQuery->pExpr2[col].bytes, pExprs[col].bytes); + } + + size_t size = (size_t)((pQuery->rec.capacity + 1) * bytes + pExprs[col].interBytes + sizeof(tFilePage)); pQuery->sdata[col] = (tFilePage *)calloc(1, size); if (pQuery->sdata[col] == NULL) { goto _cleanup; @@ -6509,6 +6634,22 @@ static void doDestroyTableQueryInfo(STableGroupInfo* pTableqinfoGroupInfo) { pTableqinfoGroupInfo->numOfTables = 0; } +static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) { + if (pExprInfo == NULL) { + assert(numOfExpr == 0); + return NULL; + } + + for (int32_t i = 0; i < numOfExpr; ++i) { + if (pExprInfo[i].pExpr != NULL) { + tExprNodeDestroy(pExprInfo[i].pExpr, NULL); + } + } + + tfree(pExprInfo); + return NULL; +} + static void freeQInfo(SQInfo *pQInfo) { if (!isValidQInfo(pQInfo)) { return; @@ -6540,22 +6681,8 @@ static void freeQInfo(SQInfo *pQInfo) { } } - if (pQuery->pSelectExpr != NULL) { - for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - SExprInfo *pExprInfo = &pQuery->pSelectExpr[i]; - - if (pExprInfo->pExpr != NULL) { - tExprTreeDestroy(&pExprInfo->pExpr, NULL); - } - } - - tfree(pQuery->pSelectExpr); - } - - if (pQuery->pGroupbyExpr != NULL) { - taosArrayDestroy(pQuery->pGroupbyExpr->columnInfo); - tfree(pQuery->pGroupbyExpr); - } + pQuery->pExpr1 = destroyQueryFuncExpr(pQuery->pExpr1, pQuery->numOfOutput); + pQuery->pExpr2 = destroyQueryFuncExpr(pQuery->pExpr2, pQuery->numOfExpr2); tfree(pQuery->tagColList); tfree(pQuery->pFilterInfo); @@ -6568,6 +6695,11 @@ static void freeQInfo(SQInfo *pQInfo) { tfree(pQuery->colList); } + if (pQuery->pGroupbyExpr != NULL) { + taosArrayDestroy(pQuery->pGroupbyExpr->columnInfo); + tfree(pQuery->pGroupbyExpr); + } + tfree(pQuery); } @@ -6670,16 +6802,19 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi int32_t code = TSDB_CODE_SUCCESS; - char *tagCond = NULL; - char *tbnameCond = NULL; + char *tagCond = NULL; + char *tbnameCond = NULL; SArray *pTableIdList = NULL; - SSqlFuncMsg **pExprMsg = NULL; - SExprInfo *pExprs = NULL; + SSqlFuncMsg **pExprMsg = NULL; + SSqlFuncMsg **pSecExprMsg = NULL; + SExprInfo *pExprs = NULL; + SExprInfo *pSecExprs = NULL; + SColIndex *pGroupColIndex = NULL; SColumnInfo *pTagColumnInfo = NULL; SSqlGroupbyExpr *pGroupbyExpr = NULL; - code = convertQueryMsg(pQueryMsg, &pTableIdList, &pExprMsg, &tagCond, &tbnameCond, &pGroupColIndex, &pTagColumnInfo); + code = convertQueryMsg(pQueryMsg, &pTableIdList, &pExprMsg, &pSecExprMsg, &tagCond, &tbnameCond, &pGroupColIndex, &pTagColumnInfo); if (code != TSDB_CODE_SUCCESS) { goto _over; } @@ -6696,10 +6831,16 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi goto _over; } - if ((code = createQFunctionExprFromMsg(pQueryMsg, &pExprs, pExprMsg, pTagColumnInfo)) != TSDB_CODE_SUCCESS) { + if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->numOfOutput, &pExprs, pExprMsg, pTagColumnInfo)) != TSDB_CODE_SUCCESS) { goto _over; } + if (pSecExprMsg != NULL) { + if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, &pSecExprs, pSecExprMsg, pTagColumnInfo)) != TSDB_CODE_SUCCESS) { + goto _over; + } + } + pGroupbyExpr = createGroupbyExprFromMsg(pQueryMsg, pGroupColIndex, &code); if ((pGroupbyExpr == NULL && pQueryMsg->numOfGroupCols != 0) || code != TSDB_CODE_SUCCESS) { goto _over; @@ -6757,8 +6898,10 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi goto _over; } - (*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, &tableGroupInfo, pTagColumnInfo, isSTableQuery); + (*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, pSecExprs, &tableGroupInfo, pTagColumnInfo, isSTableQuery); + pExprs = NULL; + pSecExprs = NULL; pGroupbyExpr = NULL; pTagColumnInfo = NULL; @@ -6773,13 +6916,19 @@ _over: free(tagCond); free(tbnameCond); free(pGroupColIndex); + if (pGroupbyExpr != NULL) { taosArrayDestroy(pGroupbyExpr->columnInfo); free(pGroupbyExpr); } + free(pTagColumnInfo); free(pExprs); + free(pSecExprs); + free(pExprMsg); + free(pSecExprMsg); + taosArrayDestroy(pTableIdList); for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) { @@ -7043,11 +7192,11 @@ static void buildTagQueryResult(SQInfo* pQInfo) { assert(num == pQInfo->tableqinfoGroupInfo.numOfTables); int32_t count = 0; - int32_t functionId = pQuery->pSelectExpr[0].base.functionId; + int32_t functionId = pQuery->pExpr1[0].base.functionId; if (functionId == TSDB_FUNC_TID_TAG) { // return the tags & table Id assert(pQuery->numOfOutput == 1); - SExprInfo* pExprInfo = &pQuery->pSelectExpr[0]; + SExprInfo* pExprInfo = &pQuery->pExpr1[0]; int32_t rsize = pExprInfo->bytes; count = 0; @@ -7121,7 +7270,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) { continue; } - SExprInfo* pExprInfo = pQuery->pSelectExpr; + SExprInfo* pExprInfo = pQuery->pExpr1; STableQueryInfo* item = taosArrayGetP(pa, i); char *data = NULL, *dst = NULL; diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 9edcdb0083..4481ff5e8f 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -225,7 +225,7 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { tSQLExprDestroy(pLeft); tSQLExprDestroy(pRight); - } else if (pLeft->nSQLOptr == TK_FLOAT || pRight->nSQLOptr == TK_FLOAT) { + } else if ((pLeft->nSQLOptr == TK_FLOAT && pRight->nSQLOptr == TK_INTEGER) || (pLeft->nSQLOptr == TK_INTEGER && pRight->nSQLOptr == TK_FLOAT)) { pExpr->val.nType = TSDB_DATA_TYPE_DOUBLE; pExpr->nSQLOptr = TK_FLOAT; diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index f057dc1a49..f4f89a8709 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -24,7 +24,7 @@ int32_t getOutputInterResultBufSize(SQuery* pQuery) { int32_t size = 0; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { - size += pQuery->pSelectExpr[i].interBytes; + size += pQuery->pExpr1[i].interBytes; } assert(size >= 0); @@ -237,7 +237,7 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pWindowRes) { SResultRowCellInfo *pResultInfo = &pWindowRes->pCellInfo[i]; char * s = getPosInResultPage(pRuntimeEnv, i, pWindowRes, page); - size_t size = pRuntimeEnv->pQuery->pSelectExpr[i].bytes; + size_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes; memset(s, 0, size); RESET_RESULT_INFO(pResultInfo); @@ -280,7 +280,7 @@ void copyResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *dst, const SResult tFilePage *srcpage = getResBufPage(pRuntimeEnv->pResultBuf, src->pageId); char * srcBuf = getPosInResultPage(pRuntimeEnv, i, (SResultRow *)src, srcpage); - size_t s = pRuntimeEnv->pQuery->pSelectExpr[i].bytes; + size_t s = pRuntimeEnv->pQuery->pExpr1[i].bytes; memcpy(dstBuf, srcBuf, s); } diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index bbabb5d47b..7b8cf3cda2 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -171,40 +171,17 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread } static void taosStopTcpThread(SThreadObj* pThreadObj) { - pThreadObj->stop = true; - eventfd_t fd = -1; - - // save thread into local variable since pThreadObj is freed when thread exits + // save thread into local variable and signal thread to stop pthread_t thread = pThreadObj->thread; - - if (taosComparePthread(pThreadObj->thread, pthread_self())) { + if (!taosCheckPthreadValid(thread)) { + return; + } + pThreadObj->stop = true; + if (taosComparePthread(thread, pthread_self())) { pthread_detach(pthread_self()); return; } - - if (taosCheckPthreadValid(pThreadObj->thread)) { - // signal the thread to stop, try graceful method first, - // and use pthread_cancel when failed - struct epoll_event event = { .events = EPOLLIN }; - fd = eventfd(1, 0); - if (fd == -1) { - // failed to create eventfd, call pthread_cancel instead, which may result in data corruption: - tError("%s, failed to create eventfd(%s)", pThreadObj->label, strerror(errno)); - pThreadObj->stop = true; - pthread_cancel(pThreadObj->thread); - } else if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) { - // failed to call epoll_ctl, call pthread_cancel instead, which may result in data corruption: - tError("%s, failed to call epoll_ctl(%s)", pThreadObj->label, strerror(errno)); - pthread_cancel(pThreadObj->thread); - } - } - - // at this step, pThreadObj has already been released - if (taosCheckPthreadValid(thread)) { - pthread_join(thread, NULL); - } - - if (fd != -1) taosCloseSocket(fd); + pthread_join(thread, NULL); } void taosStopTcpServer(void *handle) { diff --git a/src/sync/src/taosTcpPool.c b/src/sync/src/taosTcpPool.c index eade0222be..875528e66b 100644 --- a/src/sync/src/taosTcpPool.c +++ b/src/sync/src/taosTcpPool.c @@ -301,31 +301,14 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) { } static void taosStopPoolThread(SThreadObj *pThread) { + pthread_t thread = pThread->thread; + if (!taosCheckPthreadValid(thread)) { + return; + } pThread->stop = true; - - if (pThread->thread == pthread_self()) { + if (taosComparePthread(thread, pthread_self())) { pthread_detach(pthread_self()); return; } - - // save thread ID into a local variable, since pThread is freed when the thread exits - pthread_t thread = pThread->thread; - - // signal the thread to stop, try graceful method first, - // and use pthread_cancel when failed - struct epoll_event event = {.events = EPOLLIN}; - eventfd_t fd = eventfd(1, 0); - if (fd == -1) { - // failed to create eventfd, call pthread_cancel instead, which may result in data corruption - sError("failed to create eventfd since %s", strerror(errno)); - pthread_cancel(pThread->thread); - pThread->stop = true; - } else if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) { - // failed to call epoll_ctl, call pthread_cancel instead, which may result in data corruption - sError("failed to call epoll_ctl since %s", strerror(errno)); - pthread_cancel(pThread->thread); - } - pthread_join(thread, NULL); - taosClose(fd); } diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 650a32eede..637b02cd32 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -282,7 +282,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe _err: tfree(dataDir); - tsdbCloseHelperFile(pHelper, 1, NULL); + tsdbCloseHelperFile(pHelper, 1, pGroup); return -1; } diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index 4ae13bd7e5..a36f7f0261 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -145,6 +145,7 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { // forward to put the rest of data for (int idata = 1; idata < ndata; idata++) { pDataKey = pSkipList->keyFn(ppData[idata]); + hasDup = false; // Compare max key pKey = SL_GET_MAX_KEY(pSkipList); @@ -153,8 +154,6 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { for (int i = 0; i < pSkipList->maxLevel; i++) { forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i); } - - hasDup = false; } else { SSkipListNode *px = pSkipList->pHead; for (int i = pSkipList->maxLevel - 1; i >= 0; --i) { @@ -173,7 +172,7 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) { compare = pSkipList->comparFn(pKey, pDataKey); if (compare >= 0) { - if (compare == 0) hasDup = true; + if (compare == 0 && !hasDup) hasDup = true; break; } else { px = p; diff --git a/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md b/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md index 1c0e4c100b..e14a5f7b67 100644 --- a/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md +++ b/tests/examples/JDBC/JDBCDemo/README-jdbc-windows.md @@ -132,7 +132,7 @@ https://www.taosdata.com/cn/all-downloads/ 配置完成后,在命令行内使用taos shell连接server端 ```shell -C:\TDengine>taos +C:\TDengine>taos -h td01 Welcome to the TDengine shell from Linux, Client Version:2.0.1.1 Copyright (c) 2017 by TAOS Data, Inc. All rights reserved. diff --git a/tests/examples/JDBC/mybatisplus-demo/.gitignore b/tests/examples/JDBC/mybatisplus-demo/.gitignore new file mode 100644 index 0000000000..b56f1dd0d0 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/.gitignore @@ -0,0 +1,33 @@ +README.md +target/ +!.mvn/wrapper/maven-wrapper.jar +!**/src/main/**/target/ +!**/src/test/**/target/ + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ +!**/src/main/**/build/ +!**/src/test/**/build/ + +### VS Code ### +.vscode/ diff --git a/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100644 index 0000000000..a45eb6ba26 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,118 @@ +/* + * Copyright 2007-present the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + private static final String WRAPPER_VERSION = "0.5.6"; + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if (mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if (mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if (!outputFile.getParentFile().exists()) { + if (!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { + String username = System.getenv("MVNW_USERNAME"); + char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(username, password); + } + }); + } + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.jar b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000..2cc7d4a55c Binary files /dev/null and b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.jar differ diff --git a/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.properties b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000..642d572ce9 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,2 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar diff --git a/tests/examples/JDBC/mybatisplus-demo/mvnw b/tests/examples/JDBC/mybatisplus-demo/mvnw new file mode 100755 index 0000000000..3c8a553731 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/mvnw @@ -0,0 +1,322 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ]; then + + if [ -f /etc/mavenrc ]; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ]; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false +darwin=false +mingw=false +case "$(uname)" in +CYGWIN*) cygwin=true ;; +MINGW*) mingw=true ;; +Darwin*) + darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="$(/usr/libexec/java_home)" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ]; then + if [ -r /etc/gentoo-release ]; then + JAVA_HOME=$(java-config --jre-home) + fi +fi + +if [ -z "$M2_HOME" ]; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ]; do + ls=$(ls -ld "$PRG") + link=$(expr "$ls" : '.*-> \(.*\)$') + if expr "$link" : '/.*' >/dev/null; then + PRG="$link" + else + PRG="$(dirname "$PRG")/$link" + fi + done + + saveddir=$(pwd) + + M2_HOME=$(dirname "$PRG")/.. + + # make it fully qualified + M2_HOME=$(cd "$M2_HOME" && pwd) + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=$(cygpath --unix "$M2_HOME") + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --unix "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --unix "$CLASSPATH") +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw; then + [ -n "$M2_HOME" ] && + M2_HOME="$( ( + cd "$M2_HOME" + pwd + ))" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="$( ( + cd "$JAVA_HOME" + pwd + ))" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="$(which javac)" + if [ -n "$javaExecutable" ] && ! [ "$(expr \"$javaExecutable\" : '\([^ ]*\)')" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=$(which readlink) + if [ ! $(expr "$readLink" : '\([^ ]*\)') = "no" ]; then + if $darwin; then + javaHome="$(dirname \"$javaExecutable\")" + javaExecutable="$(cd \"$javaHome\" && pwd -P)/javac" + else + javaExecutable="$(readlink -f \"$javaExecutable\")" + fi + javaHome="$(dirname \"$javaExecutable\")" + javaHome=$(expr "$javaHome" : '\(.*\)/bin') + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ]; then + if [ -n "$JAVA_HOME" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="$(which java)" + fi +fi + +if [ ! -x "$JAVACMD" ]; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ]; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ]; then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ]; do + if [ -d "$wdir"/.mvn ]; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=$( + cd "$wdir/.." + pwd + ) + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' <"$1")" + fi +} + +BASE_DIR=$(find_maven_basedir "$(pwd)") +if [ -z "$BASE_DIR" ]; then + exit 1 +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + fi + while IFS="=" read key value; do + case "$key" in wrapperUrl) + jarUrl="$value" + break + ;; + esac + done <"$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") + fi + + if command -v wget >/dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl >/dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=$(cygpath --path --windows "$javaClass") + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=$(cygpath --path --windows "$M2_HOME") + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --windows "$CLASSPATH") + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/tests/examples/JDBC/mybatisplus-demo/mvnw.cmd b/tests/examples/JDBC/mybatisplus-demo/mvnw.cmd new file mode 100644 index 0000000000..c8d43372c9 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM https://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/tests/examples/JDBC/mybatisplus-demo/pom.xml b/tests/examples/JDBC/mybatisplus-demo/pom.xml new file mode 100644 index 0000000000..8535f3b797 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/pom.xml @@ -0,0 +1,101 @@ + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 2.4.0 + + + com.taosdata.example + mybatisplus-demo + 0.0.1-SNAPSHOT + mybatisplus-demo + Demo project for tdengine + + + 1.8 + + + + + org.springframework.boot + spring-boot-starter + + + org.projectlombok + lombok + true + + + com.baomidou + mybatis-plus-boot-starter + 3.1.2 + + + com.h2database + h2 + runtime + + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.11 + + + + + mysql + mysql-connector-java + 5.1.47 + + + org.springframework.boot + spring-boot-starter-web + + + org.springframework.boot + spring-boot-devtools + runtime + true + + + org.springframework.boot + spring-boot-starter-test + test + + + junit + junit + 4.12 + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.17 + + + **/*Test.java + + + **/Abstract*.java + + + + + + + + diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/MybatisplusDemoApplication.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/MybatisplusDemoApplication.java new file mode 100644 index 0000000000..7aaebca084 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/MybatisplusDemoApplication.java @@ -0,0 +1,15 @@ +package com.taosdata.example.mybatisplusdemo; + +import org.mybatis.spring.annotation.MapperScan; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +@MapperScan("com.taosdata.example.mybatisplusdemo.mapper") +public class MybatisplusDemoApplication { + + public static void main(String[] args) { + SpringApplication.run(MybatisplusDemoApplication.class, args); + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java new file mode 100644 index 0000000000..a6ac7f7fc2 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java @@ -0,0 +1,34 @@ +package com.taosdata.example.mybatisplusdemo.config; + +import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class MybatisPlusConfig { + + + /** mybatis 3.4.1 pagination config start ***/ +// @Bean +// public MybatisPlusInterceptor mybatisPlusInterceptor() { +// MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor(); +// interceptor.addInnerInterceptor(new PaginationInnerInterceptor()); +// return interceptor; +// } + +// @Bean +// public ConfigurationCustomizer configurationCustomizer() { +// return configuration -> configuration.setUseDeprecatedExecutor(false); +// } + + @Bean + public PaginationInterceptor paginationInterceptor() { +// return new PaginationInterceptor(); + PaginationInterceptor paginationInterceptor = new PaginationInterceptor(); + //TODO: mybatis-plus do not support TDengine, use postgresql Dialect + paginationInterceptor.setDialectType("postgresql"); + + return paginationInterceptor; + } + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Temperature.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Temperature.java new file mode 100644 index 0000000000..97e50b06f6 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Temperature.java @@ -0,0 +1,15 @@ +package com.taosdata.example.mybatisplusdemo.domain; + +import lombok.Data; + +import java.sql.Timestamp; + +@Data +public class Temperature { + + private Timestamp ts; + private float temperature; + private String location; + private int tbIndex; + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Weather.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Weather.java new file mode 100644 index 0000000000..361757411a --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Weather.java @@ -0,0 +1,15 @@ +package com.taosdata.example.mybatisplusdemo.domain; + +import lombok.Data; + +import java.sql.Timestamp; + +@Data +public class Weather { + + private Timestamp ts; + private float temperature; + private int humidity; + private String location; + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java new file mode 100644 index 0000000000..3e122524d5 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java @@ -0,0 +1,23 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.taosdata.example.mybatisplusdemo.domain.Temperature; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Update; + +public interface TemperatureMapper extends BaseMapper { + + @Update("CREATE TABLE if not exists temperature(ts timestamp, temperature float) tags(location nchar(64), tbIndex int)") + int createSuperTable(); + + @Update("create table #{tbName} using temperature tags( #{location}, #{tbindex})") + int createTable(@Param("tbName") String tbName, @Param("location") String location, @Param("tbindex") int tbindex); + + @Update("drop table if exists temperature") + void dropSuperTable(); + + @Insert("insert into t${tbIndex}(ts, temperature) values(#{ts}, #{temperature})") + int insertOne(Temperature one); + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java new file mode 100644 index 0000000000..6733cbded9 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java @@ -0,0 +1,8 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.taosdata.example.mybatisplusdemo.domain.Weather; + +public interface WeatherMapper extends BaseMapper { + +} diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml b/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml new file mode 100644 index 0000000000..96667f28b8 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml @@ -0,0 +1,34 @@ +spring: + datasource: + # driver-class-name: org.h2.Driver + # schema: classpath:db/schema-mysql.sql + # data: classpath:db/data-mysql.sql + # url: jdbc:h2:mem:test + # username: root + # password: test + + # driver-class-name: com.mysql.jdbc.Driver + # url: jdbc:mysql://master:3306/test?useSSL=false + # username: root + # password: 123456 + + driver-class-name: com.taosdata.jdbc.TSDBDriver + url: jdbc:TAOS://localhost:6030/mp_test + user: root + password: taosdata + charset: UTF-8 + locale: en_US.UTF-8 + timezone: UTC-8 + +mybatis-plus: + configuration: + map-underscore-to-camel-case: false + +logging: + level: + com: + taosdata: + example: + mybatisplusdemo: + mapper: debug + diff --git a/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java b/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java new file mode 100644 index 0000000000..4331d15d34 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java @@ -0,0 +1,140 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper; +import com.baomidou.mybatisplus.core.metadata.IPage; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.taosdata.example.mybatisplusdemo.domain.Temperature; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +@RunWith(SpringJUnit4ClassRunner.class) +@SpringBootTest +public class TemperatureMapperTest { + + private static Random random = new Random(System.currentTimeMillis()); + private static String[] locations = {"北京", "上海", "深圳", "广州", "杭州"}; + + @Before + public void before() { + mapper.dropSuperTable(); + // create table temperature + mapper.createSuperTable(); + // create table t_X using temperature + for (int i = 0; i < 10; i++) { + mapper.createTable("t" + i, locations[random.nextInt(locations.length)], i); + } + // insert into table + int affectRows = 0; + // insert 10 tables + for (int i = 0; i < 10; i++) { + // each table insert 5 rows + for (int j = 0; j < 5; j++) { + Temperature one = new Temperature(); + one.setTs(new Timestamp(1605024000000l)); + one.setTemperature(random.nextFloat() * 50); + one.setLocation("望京"); + one.setTbIndex(i); + affectRows += mapper.insertOne(one); + } + } + Assert.assertEquals(50, affectRows); + } + + @After + public void after() { + mapper.dropSuperTable(); + } + + @Autowired + private TemperatureMapper mapper; + + /*** + * test SelectList + * **/ + @Test + public void testSelectList() { + List temperatureList = mapper.selectList(null); + temperatureList.forEach(System.out::println); + } + + /*** + * test InsertOne which is a custom metheod + * ***/ + @Test + public void testInsert() { + Temperature one = new Temperature(); + one.setTs(new Timestamp(1605024000000l)); + one.setTemperature(random.nextFloat() * 50); + one.setLocation("望京"); + int affectRows = mapper.insertOne(one); + Assert.assertEquals(1, affectRows); + } + + /*** + * test SelectOne + * **/ + @Test + public void testSelectOne() { + QueryWrapper wrapper = new QueryWrapper<>(); + wrapper.eq("location", "beijing"); + Temperature one = mapper.selectOne(wrapper); + System.out.println(one); + Assert.assertNotNull(one); + } + + /*** + * test select By map + * ***/ + @Test + public void testSelectByMap() { + Map map = new HashMap<>(); + map.put("location", "beijing"); + List temperatures = mapper.selectByMap(map); + Assert.assertEquals(1, temperatures.size()); + } + + /*** + * test selectObjs + * **/ + @Test + public void testSelectObjs() { + List ts = mapper.selectObjs(null); + System.out.println(ts); + } + + /** + * test selectC ount + * **/ + @Test + public void testSelectCount() { + int count = mapper.selectCount(null); + Assert.assertEquals(5, count); + } + + /**** + * 分页 + */ + @Test + public void testSelectPage() { + IPage page = new Page(1, 2); + IPage temperatureIPage = mapper.selectPage(page, null); + System.out.println("total : " + temperatureIPage.getTotal()); + System.out.println("pages : " + temperatureIPage.getPages()); + for (Temperature temperature : temperatureIPage.getRecords()) { + System.out.println(temperature); + } + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java b/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java new file mode 100644 index 0000000000..1699344552 --- /dev/null +++ b/tests/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java @@ -0,0 +1,88 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper; +import com.baomidou.mybatisplus.core.metadata.IPage; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.taosdata.example.mybatisplusdemo.domain.Weather; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +@RunWith(SpringJUnit4ClassRunner.class) +@SpringBootTest +public class WeatherMapperTest { + + private static Random random = new Random(System.currentTimeMillis()); + + @Autowired + private WeatherMapper mapper; + + @Test + public void testSelectList() { + List weathers = mapper.selectList(null); + weathers.forEach(System.out::println); + } + + @Test + public void testInsert() { + Weather one = new Weather(); + one.setTs(new Timestamp(1605024000000l)); + one.setTemperature(random.nextFloat() * 50); + one.setHumidity(random.nextInt(100)); + one.setLocation("望京"); + int affectRows = mapper.insert(one); + Assert.assertEquals(1, affectRows); + } + + @Test + public void testSelectOne() { + QueryWrapper wrapper = new QueryWrapper<>(); + wrapper.eq("location", "beijing"); + Weather one = mapper.selectOne(wrapper); + System.out.println(one); + Assert.assertEquals(12.22f, one.getTemperature(), 0.00f); + Assert.assertEquals("beijing", one.getLocation()); + } + + @Test + public void testSelectByMap() { + Map map = new HashMap<>(); + map.put("location", "beijing"); + List weathers = mapper.selectByMap(map); + Assert.assertEquals(1, weathers.size()); + } + + @Test + public void testSelectObjs() { + List ts = mapper.selectObjs(null); + System.out.println(ts); + } + + @Test + public void testSelectCount() { + int count = mapper.selectCount(null); +// Assert.assertEquals(5, count); + System.out.println(count); + } + + @Test + public void testSelectPage() { + IPage page = new Page(1, 2); + IPage weatherIPage = mapper.selectPage(page, null); + System.out.println("total : " + weatherIPage.getTotal()); + System.out.println("pages : " + weatherIPage.getPages()); + for (Weather weather : weatherIPage.getRecords()) { + System.out.println(weather); + } + } + +} \ No newline at end of file diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index 59b9c74827..74a49288e9 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -50,10 +50,10 @@ static void queryDB(TAOS *taos, char *command) { taos_free_result(pSql); } +void Test(char *qstr, const char *input, int i); + int main(int argc, char *argv[]) { - TAOS * taos; char qstr[1024]; - TAOS_RES *result; // connect to server if (argc < 2) { @@ -63,41 +63,26 @@ int main(int argc, char *argv[]) { // init TAOS taos_init(); - - taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); + for (int i = 0; i < 4000000; i++) { + Test(qstr, argv[1], i); + } + taos_cleanup(); +} +void Test(char *qstr, const char *input, int index) { + TAOS *taos = taos_connect(input, "root", "taosdata", NULL, 0); + printf("==================test at %d\n================================", index); + queryDB(taos, "drop database if exists demo"); + queryDB(taos, "create database demo"); + TAOS_RES *result; if (taos == NULL) { printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); exit(1); } - printf("success to connect to server\n"); - - - //taos_query(taos, "drop database demo"); - queryDB(taos, "drop database if exists demo"); - - //result = taos_query(taos, "create database demo"); - //if (result == NULL) { - // printf("failed to create database, reason:%s\n", "null result"/*taos_errstr(taos)*/); - // exit(1); - //} - queryDB(taos, "create database demo"); - printf("success to create database\n"); - - //taos_query(taos, "use demo"); queryDB(taos, "use demo"); - // create table - //if (taos_query(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))") == 0) { - // printf("failed to create table, reason:%s\n", taos_errstr(result)); - // exit(1); - //} queryDB(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))"); printf("success to create table\n"); - // sleep for one second to make sure table is created on data node - // taosMsleep(1000); - - // insert 10 records int i = 0; for (i = 0; i < 10; ++i) { sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello"); @@ -117,7 +102,6 @@ int main(int argc, char *argv[]) { } taos_free_result(result); - //sleep(1); } printf("success to insert rows, total %d rows\n", i); @@ -147,5 +131,6 @@ int main(int argc, char *argv[]) { taos_free_result(result); printf("====demo end====\n\n"); - return getchar(); + taos_close(taos); } + diff --git a/tests/examples/rust b/tests/examples/rust new file mode 160000 index 0000000000..f2ffd30521 --- /dev/null +++ b/tests/examples/rust @@ -0,0 +1 @@ +Subproject commit f2ffd30521b8e8afbc9d25c75f8eeeb6a48bd030 diff --git a/tests/examples/rust/.gitignore b/tests/examples/rust/.gitignore deleted file mode 100644 index 693699042b..0000000000 --- a/tests/examples/rust/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/target -**/*.rs.bk -Cargo.lock diff --git a/tests/examples/rust/Cargo.toml b/tests/examples/rust/Cargo.toml deleted file mode 100644 index c9cff73bc0..0000000000 --- a/tests/examples/rust/Cargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "tdengine" -version = "0.1.0" -authors = ["Chunhua Jiang "] -edition = "2018" - -[dependencies] diff --git a/tests/examples/rust/README.md b/tests/examples/rust/README.md deleted file mode 100644 index 2ef8901ad6..0000000000 --- a/tests/examples/rust/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# TDengine driver connector for Rust - -It's a rust implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. - -## Dependencies -- Rust: -``` -curl https://sh.rustup.rs -sSf | sh -``` - -## Run with Sample - -Build and run basic sample: -``` -cargo run --example demo -``` -Build and run subscribe sample: -``` -cargo run --example subscribe -``` diff --git a/tests/examples/rust/build.rs b/tests/examples/rust/build.rs deleted file mode 100644 index f7276d3ef6..0000000000 --- a/tests/examples/rust/build.rs +++ /dev/null @@ -1,10 +0,0 @@ -// build.rs - -use std::env; - -fn main() { - let project_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - - println!("cargo:rustc-link-search={}", project_dir); // the "-L" flag - println!("cargo:rustc-link-lib=taos"); // the "-l" flag -} diff --git a/tests/examples/rust/examples/demo.rs b/tests/examples/rust/examples/demo.rs deleted file mode 100644 index 182e46c8db..0000000000 --- a/tests/examples/rust/examples/demo.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::process; -use tdengine::Tdengine; - -fn main() { - let tde = Tdengine::new("127.0.0.1", "root", "taosdata", "demo", 0) - .unwrap_or_else(|err| { - eprintln!("Can't create Tdengine: {}", err); - process::exit(1) - }); - - tde.query("drop database demo"); - tde.query("create database demo"); - tde.query("use demo"); - tde.query("create table m1 (ts timestamp, speed int)"); - - for i in 0..10 { - tde.query(format!("insert into m1 values (now+{}s, {})", i, i).as_str()); - } -} diff --git a/tests/examples/rust/examples/subscribe.rs b/tests/examples/rust/examples/subscribe.rs deleted file mode 100644 index 3255e36ee7..0000000000 --- a/tests/examples/rust/examples/subscribe.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::process; -use tdengine::Subscriber; - -fn main() { - let subscriber = Subscriber::new("127.0.0.1", "root", "taosdata", "demo", "m1", 0, 1000) - .unwrap_or_else(|err| { - eprintln!("Can't create Subscriber: {}", err); - process::exit(1) - }); - - loop { - let row = subscriber.consume().unwrap_or_else(|err| { - eprintln!("consume exit: {}", err); - process::exit(1) - }); - - subscriber.print_row(&row); - } -} diff --git a/tests/examples/rust/src/bindings.rs b/tests/examples/rust/src/bindings.rs deleted file mode 100644 index fc13647130..0000000000 --- a/tests/examples/rust/src/bindings.rs +++ /dev/null @@ -1,332 +0,0 @@ -/* automatically generated by rust-bindgen */ -#![allow(unused)] -#![allow(non_camel_case_types)] - -pub const _STDINT_H: u32 = 1; -pub const _FEATURES_H: u32 = 1; -pub const _DEFAULT_SOURCE: u32 = 1; -pub const __USE_ISOC11: u32 = 1; -pub const __USE_ISOC99: u32 = 1; -pub const __USE_ISOC95: u32 = 1; -pub const __USE_POSIX_IMPLICITLY: u32 = 1; -pub const _POSIX_SOURCE: u32 = 1; -pub const _POSIX_C_SOURCE: u32 = 200809; -pub const __USE_POSIX: u32 = 1; -pub const __USE_POSIX2: u32 = 1; -pub const __USE_POSIX199309: u32 = 1; -pub const __USE_POSIX199506: u32 = 1; -pub const __USE_XOPEN2K: u32 = 1; -pub const __USE_XOPEN2K8: u32 = 1; -pub const _ATFILE_SOURCE: u32 = 1; -pub const __USE_MISC: u32 = 1; -pub const __USE_ATFILE: u32 = 1; -pub const __USE_FORTIFY_LEVEL: u32 = 0; -pub const _STDC_PREDEF_H: u32 = 1; -pub const __STDC_IEC_559__: u32 = 1; -pub const __STDC_IEC_559_COMPLEX__: u32 = 1; -pub const __STDC_ISO_10646__: u32 = 201505; -pub const __STDC_NO_THREADS__: u32 = 1; -pub const __GNU_LIBRARY__: u32 = 6; -pub const __GLIBC__: u32 = 2; -pub const __GLIBC_MINOR__: u32 = 23; -pub const _SYS_CDEFS_H: u32 = 1; -pub const __WORDSIZE: u32 = 64; -pub const __WORDSIZE_TIME64_COMPAT32: u32 = 1; -pub const __SYSCALL_WORDSIZE: u32 = 64; -pub const _BITS_WCHAR_H: u32 = 1; -pub const INT8_MIN: i32 = -128; -pub const INT16_MIN: i32 = -32768; -pub const INT32_MIN: i32 = -2147483648; -pub const INT8_MAX: u32 = 127; -pub const INT16_MAX: u32 = 32767; -pub const INT32_MAX: u32 = 2147483647; -pub const UINT8_MAX: u32 = 255; -pub const UINT16_MAX: u32 = 65535; -pub const UINT32_MAX: u32 = 4294967295; -pub const INT_LEAST8_MIN: i32 = -128; -pub const INT_LEAST16_MIN: i32 = -32768; -pub const INT_LEAST32_MIN: i32 = -2147483648; -pub const INT_LEAST8_MAX: u32 = 127; -pub const INT_LEAST16_MAX: u32 = 32767; -pub const INT_LEAST32_MAX: u32 = 2147483647; -pub const UINT_LEAST8_MAX: u32 = 255; -pub const UINT_LEAST16_MAX: u32 = 65535; -pub const UINT_LEAST32_MAX: u32 = 4294967295; -pub const INT_FAST8_MIN: i32 = -128; -pub const INT_FAST16_MIN: i64 = -9223372036854775808; -pub const INT_FAST32_MIN: i64 = -9223372036854775808; -pub const INT_FAST8_MAX: u32 = 127; -pub const INT_FAST16_MAX: u64 = 9223372036854775807; -pub const INT_FAST32_MAX: u64 = 9223372036854775807; -pub const UINT_FAST8_MAX: u32 = 255; -pub const UINT_FAST16_MAX: i32 = -1; -pub const UINT_FAST32_MAX: i32 = -1; -pub const INTPTR_MIN: i64 = -9223372036854775808; -pub const INTPTR_MAX: u64 = 9223372036854775807; -pub const UINTPTR_MAX: i32 = -1; -pub const PTRDIFF_MIN: i64 = -9223372036854775808; -pub const PTRDIFF_MAX: u64 = 9223372036854775807; -pub const SIG_ATOMIC_MIN: i32 = -2147483648; -pub const SIG_ATOMIC_MAX: u32 = 2147483647; -pub const SIZE_MAX: i32 = -1; -pub const WINT_MIN: u32 = 0; -pub const WINT_MAX: u32 = 4294967295; -pub const TSDB_DATA_TYPE_NULL: u32 = 0; -pub const TSDB_DATA_TYPE_BOOL: u32 = 1; -pub const TSDB_DATA_TYPE_TINYINT: u32 = 2; -pub const TSDB_DATA_TYPE_SMALLINT: u32 = 3; -pub const TSDB_DATA_TYPE_INT: u32 = 4; -pub const TSDB_DATA_TYPE_BIGINT: u32 = 5; -pub const TSDB_DATA_TYPE_FLOAT: u32 = 6; -pub const TSDB_DATA_TYPE_DOUBLE: u32 = 7; -pub const TSDB_DATA_TYPE_BINARY: u32 = 8; -pub const TSDB_DATA_TYPE_TIMESTAMP: u32 = 9; -pub const TSDB_DATA_TYPE_NCHAR: u32 = 10; -pub type int_least8_t = ::std::os::raw::c_schar; -pub type int_least16_t = ::std::os::raw::c_short; -pub type int_least32_t = ::std::os::raw::c_int; -pub type int_least64_t = ::std::os::raw::c_long; -pub type uint_least8_t = ::std::os::raw::c_uchar; -pub type uint_least16_t = ::std::os::raw::c_ushort; -pub type uint_least32_t = ::std::os::raw::c_uint; -pub type uint_least64_t = ::std::os::raw::c_ulong; -pub type int_fast8_t = ::std::os::raw::c_schar; -pub type int_fast16_t = ::std::os::raw::c_long; -pub type int_fast32_t = ::std::os::raw::c_long; -pub type int_fast64_t = ::std::os::raw::c_long; -pub type uint_fast8_t = ::std::os::raw::c_uchar; -pub type uint_fast16_t = ::std::os::raw::c_ulong; -pub type uint_fast32_t = ::std::os::raw::c_ulong; -pub type uint_fast64_t = ::std::os::raw::c_ulong; -pub type intmax_t = ::std::os::raw::c_long; -pub type uintmax_t = ::std::os::raw::c_ulong; -pub const TSDB_OPTION_TSDB_OPTION_LOCALE: TSDB_OPTION = 0; -pub const TSDB_OPTION_TSDB_OPTION_CHARSET: TSDB_OPTION = 1; -pub const TSDB_OPTION_TSDB_OPTION_TIMEZONE: TSDB_OPTION = 2; -pub const TSDB_OPTION_TSDB_OPTION_CONFIGDIR: TSDB_OPTION = 3; -pub const TSDB_OPTION_TSDB_OPTION_SHELL_ACTIVITY_TIMER: TSDB_OPTION = 4; -pub const TSDB_OPTION_TSDB_MAX_OPTIONS: TSDB_OPTION = 5; -pub type TSDB_OPTION = u32; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct taosField { - pub name: [::std::os::raw::c_char; 64usize], - pub bytes: ::std::os::raw::c_short, - pub type_: ::std::os::raw::c_char, -} -#[test] -fn bindgen_test_layout_taosField() { - assert_eq!( - ::std::mem::size_of::(), - 68usize, - concat!("Size of: ", stringify!(taosField)) - ); - assert_eq!( - ::std::mem::align_of::(), - 2usize, - concat!("Alignment of ", stringify!(taosField)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).name as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(taosField), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).bytes as *const _ as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(taosField), - "::", - stringify!(bytes) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).type_ as *const _ as usize }, - 66usize, - concat!( - "Offset of field: ", - stringify!(taosField), - "::", - stringify!(type_) - ) - ); -} -pub type TAOS_FIELD = taosField; -extern "C" { - pub fn taos_init(); -} -extern "C" { - pub fn taos_options( - option: TSDB_OPTION, - arg: *const ::std::os::raw::c_void, - ... - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_connect( - ip: *mut ::std::os::raw::c_char, - user: *mut ::std::os::raw::c_char, - pass: *mut ::std::os::raw::c_char, - db: *mut ::std::os::raw::c_char, - port: ::std::os::raw::c_int, - ) -> *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_close(taos: *mut ::std::os::raw::c_void); -} -extern "C" { - pub fn taos_query( - taos: *mut ::std::os::raw::c_void, - sqlstr: *mut ::std::os::raw::c_char, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_use_result(taos: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_fetch_row(res: *mut ::std::os::raw::c_void) -> *mut *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_result_precision(res: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_free_result(res: *mut ::std::os::raw::c_void); -} -extern "C" { - pub fn taos_field_count(taos: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_num_fields(res: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_affected_rows(taos: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_fetch_fields(res: *mut ::std::os::raw::c_void) -> *mut TAOS_FIELD; -} -extern "C" { - pub fn taos_select_db( - taos: *mut ::std::os::raw::c_void, - db: *mut ::std::os::raw::c_char, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_print_row( - str: *mut ::std::os::raw::c_char, - row: *mut *mut ::std::os::raw::c_void, - fields: *mut TAOS_FIELD, - num_fields: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_stop_query(res: *mut ::std::os::raw::c_void); -} -extern "C" { - pub fn taos_fetch_block( - res: *mut ::std::os::raw::c_void, - rows: *mut *mut *mut ::std::os::raw::c_void, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_validate_sql( - taos: *mut ::std::os::raw::c_void, - sql: *mut ::std::os::raw::c_char, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_get_server_info(taos: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_char; -} -extern "C" { - pub fn taos_get_client_info() -> *mut ::std::os::raw::c_char; -} -extern "C" { - pub fn taos_errstr(taos: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_char; -} -extern "C" { - pub fn taos_errno(taos: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn taos_query_a( - taos: *mut ::std::os::raw::c_void, - sqlstr: *mut ::std::os::raw::c_char, - fp: ::std::option::Option< - unsafe extern "C" fn( - param: *mut ::std::os::raw::c_void, - arg1: *mut ::std::os::raw::c_void, - code: ::std::os::raw::c_int, - ), - >, - param: *mut ::std::os::raw::c_void, - ); -} -extern "C" { - pub fn taos_fetch_rows_a( - res: *mut ::std::os::raw::c_void, - fp: ::std::option::Option< - unsafe extern "C" fn( - param: *mut ::std::os::raw::c_void, - arg1: *mut ::std::os::raw::c_void, - numOfRows: ::std::os::raw::c_int, - ), - >, - param: *mut ::std::os::raw::c_void, - ); -} -extern "C" { - pub fn taos_fetch_row_a( - res: *mut ::std::os::raw::c_void, - fp: ::std::option::Option< - unsafe extern "C" fn( - param: *mut ::std::os::raw::c_void, - arg1: *mut ::std::os::raw::c_void, - row: *mut *mut ::std::os::raw::c_void, - ), - >, - param: *mut ::std::os::raw::c_void, - ); -} -extern "C" { - pub fn taos_subscribe( - host: *mut ::std::os::raw::c_char, - user: *mut ::std::os::raw::c_char, - pass: *mut ::std::os::raw::c_char, - db: *mut ::std::os::raw::c_char, - table: *mut ::std::os::raw::c_char, - time: i64, - mseconds: ::std::os::raw::c_int, - ) -> *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_consume(tsub: *mut ::std::os::raw::c_void) -> *mut *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_unsubscribe(tsub: *mut ::std::os::raw::c_void); -} -extern "C" { - pub fn taos_open_stream( - taos: *mut ::std::os::raw::c_void, - sqlstr: *mut ::std::os::raw::c_char, - fp: ::std::option::Option< - unsafe extern "C" fn( - param: *mut ::std::os::raw::c_void, - arg1: *mut ::std::os::raw::c_void, - row: *mut *mut ::std::os::raw::c_void, - ), - >, - stime: i64, - param: *mut ::std::os::raw::c_void, - callback: ::std::option::Option, - ) -> *mut ::std::os::raw::c_void; -} -extern "C" { - pub fn taos_close_stream(tstr: *mut ::std::os::raw::c_void); -} -extern "C" { - pub static mut configDir: [::std::os::raw::c_char; 0usize]; -} diff --git a/tests/examples/rust/src/lib.rs b/tests/examples/rust/src/lib.rs deleted file mode 100644 index fe7216dfd0..0000000000 --- a/tests/examples/rust/src/lib.rs +++ /dev/null @@ -1,10 +0,0 @@ -#![allow(unused)] -#![allow(non_camel_case_types)] - -pub mod subscriber; -pub use subscriber::*; - -pub mod tdengine; -pub use tdengine::*; - -pub mod utils; \ No newline at end of file diff --git a/tests/examples/rust/src/subscriber.rs b/tests/examples/rust/src/subscriber.rs deleted file mode 100644 index 78c6f5cd8d..0000000000 --- a/tests/examples/rust/src/subscriber.rs +++ /dev/null @@ -1,77 +0,0 @@ -#![allow(non_camel_case_types)] -#![allow(non_snake_case)] - -#[path = "utils.rs"] -mod utils; -use utils::*; -use utils::bindings::*; - -use std::os::raw::{c_void, c_char, c_int, c_long}; - -pub struct Subscriber { - tsub: *mut c_void, - fields: *mut taosField, - fcount: c_int, -} - -impl Subscriber { - pub fn new(host: &str, - username: &str, - passwd: &str, - db: &str, - table:&str, - time: i64, - mseconds: i32 - ) -> Result { - unsafe { - let mut tsub = taos_subscribe(str_into_raw(host), - str_into_raw(username), - str_into_raw(passwd), - str_into_raw(db), - str_into_raw(table), - time as c_long, - mseconds as c_int); - if tsub.is_null() { - return Err("subscribe error") - } - println!("subscribed to {} user:{}, db:{}, tb:{}, time:{}, mseconds:{}", - host, username, db, table, time, mseconds); - - let mut fields = taos_fetch_fields(tsub); - if fields.is_null() { - taos_unsubscribe(tsub); - return Err("fetch fields error") - } - - let fcount = taos_field_count(tsub); - if fcount == 0 { - taos_unsubscribe(tsub); - return Err("fields count is 0") - } - - Ok(Subscriber{tsub, fields, fcount}) - } - } - - pub fn consume(self: &Subscriber) -> Result { - unsafe { - let taosRow = taos_consume(self.tsub); - if taosRow.is_null() { - return Err("consume error") - } - let taosRow= std::slice::from_raw_parts(taosRow, self.fcount as usize); - let row = raw_into_row(self.fields, self.fcount, &taosRow); - Ok(row) - } - } - - pub fn print_row(self: &Subscriber, row: &Row) { - println!("{}", format_row(row)); - } -} - -impl Drop for Subscriber { - fn drop(&mut self) { - unsafe {taos_unsubscribe(self.tsub);} - } -} diff --git a/tests/examples/rust/src/tdengine.rs b/tests/examples/rust/src/tdengine.rs deleted file mode 100644 index 41225d52e0..0000000000 --- a/tests/examples/rust/src/tdengine.rs +++ /dev/null @@ -1,65 +0,0 @@ -#[path = "bindings.rs"] -mod bindings; -use bindings::*; - -#[path = "utils.rs"] -mod utils; -use utils::*; - -use std::os::raw::c_void; -use std::os::raw::c_char; -use std::os::raw::c_int; -use std::os::raw::c_long; - -pub struct Tdengine { - conn: *mut c_void, -} - -/// - **TODO**: doc -impl Tdengine { - - //! - **TODO**: implement default param. - //! - //! > refer to https://stackoverflow.com/questions/24047686/default-function-arguments-in-rust - pub fn new(ip: &str, username: &str, passwd: &str, db: &str, port: i32) -> Result { - unsafe { - taos_init(); - let mut conn = taos_connect(str_into_raw(ip), - str_into_raw(username), - str_into_raw(passwd), - str_into_raw(db), - port as c_int); - if conn.is_null() { - Err("connect error") - } else { - println!("connected to {}:{} user:{}, db:{}", ip, port, username, db); - Ok(Tdengine {conn}) - } - } - } - - // - **TODO**: check error code - pub fn query(self: &Tdengine, s: &str) { - unsafe { - if taos_query(self.conn, str_into_raw(s)) == 0 { - println!("query '{}' ok", s); - } else { - println!("query '{}' error: {}", s, raw_into_str(taos_errstr(self.conn))); - } - } - } -} - -impl Drop for Tdengine { - fn drop(&mut self) { - unsafe {taos_close(self.conn);} - } -} - -#[cfg(test)] -mod tests { - #[test] - fn it_works() { - assert_eq!(2 + 2, 4); - } -} \ No newline at end of file diff --git a/tests/examples/rust/src/utils.rs b/tests/examples/rust/src/utils.rs deleted file mode 100644 index 2875507275..0000000000 --- a/tests/examples/rust/src/utils.rs +++ /dev/null @@ -1,127 +0,0 @@ -#[path = "bindings.rs"] -pub mod bindings; -use bindings::*; - -use std::fmt; -use std::fmt::Display; -use std::os::raw::{c_void, c_char, c_int}; -use std::ffi::{CString, CStr}; - -// #[derive(Debug)] -pub enum Field { - tinyInt(i8), - smallInt(i16), - normalInt(i32), - bigInt(i64), - float(f32), - double(f64), - binary(String), - timeStamp(i64), - boolType(bool), -} - - -impl fmt::Display for Field { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match &*self { - Field::tinyInt(v) => write!(f, "{}", v), - Field::smallInt(v) => write!(f, "{}", v), - Field::normalInt(v) => write!(f, "{}", v), - Field::bigInt(v) => write!(f, "{}", v), - Field::float(v) => write!(f, "{}", v), - Field::double(v) => write!(f, "{}", v), - Field::binary(v) => write!(f, "{}", v), - Field::tinyInt(v) => write!(f, "{}", v), - Field::timeStamp(v) => write!(f, "{}", v), - Field::boolType(v) => write!(f, "{}", v), - } - } -} - -// pub type Fields = Vec; -pub type Row = Vec; - -pub fn format_row(row: &Row) -> String { - let mut s = String::new(); - for field in row { - s.push_str(format!("{} ", field).as_str()); - // println!("{}", field); - } - s -} - -pub fn str_into_raw(s: &str) -> *mut c_char { - if s.is_empty() { - 0 as *mut c_char - } else { - CString::new(s).unwrap().into_raw() - } -} - -pub fn raw_into_str<'a>(raw: *mut c_char) -> &'static str { - unsafe {CStr::from_ptr(raw).to_str().unwrap()} -} - - -pub fn raw_into_field(raw: *mut TAOS_FIELD, fcount: c_int) -> Vec { - let mut fields: Vec = Vec::new(); - - for i in 0..fcount as isize { - fields.push( - taosField { - name: unsafe {(*raw.offset(i as isize))}.name, - bytes: unsafe {(*raw.offset(i as isize))}.bytes, - type_: unsafe {(*raw.offset(i as isize))}.type_, - } - ); - } - - /// TODO: error[E0382]: use of moved value: `fields` - // for field in &fields { - // println!("type: {}, bytes: {}", field.type_, field.bytes); - // } - - fields -} - - pub fn raw_into_row(fields: *mut TAOS_FIELD, fcount: c_int, raw_row: &[*mut c_void]) -> Row { - let mut row: Row= Vec::new(); - let fields = raw_into_field(fields, fcount); - - for (i, field) in fields.iter().enumerate() { - // println!("index: {}, type: {}, bytes: {}", i, field.type_, field.bytes); - unsafe { - match field.type_ as u32 { - TSDB_DATA_TYPE_TINYINT => { - row.push(Field::tinyInt(*(raw_row[i] as *mut i8))); - } - TSDB_DATA_TYPE_SMALLINT => { - row.push(Field::smallInt(*(raw_row[i] as *mut i16))); - } - TSDB_DATA_TYPE_INT => { - row.push(Field::normalInt(*(raw_row[i] as *mut i32))); - } - TSDB_DATA_TYPE_BIGINT => { - row.push(Field::bigInt(*(raw_row[i] as *mut i64))); - } - TSDB_DATA_TYPE_FLOAT => { - row.push(Field::float(*(raw_row[i] as *mut f32))); - } - TSDB_DATA_TYPE_DOUBLE => { - row.push(Field::double(*(raw_row[i] as *mut f64))); - } - TSDB_DATA_TYPE_BINARY | TSDB_DATA_TYPE_NCHAR => { - // row.push(Field::binary(*(raw_row[i] as *mut f64))); - } - TSDB_DATA_TYPE_TIMESTAMP => { - row.push(Field::timeStamp(*(raw_row[i] as *mut i64))); - } - TSDB_DATA_TYPE_BOOL => { - // row.push(Field::boolType(*(raw_row[i] as *mut i8) as bool)); - } - _ => println!(""), - } - } - } - row - } \ No newline at end of file diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index 39a4cb48fd..5d1e9a7537 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -16,112 +16,202 @@ import sys import json import time import random -# query sql -query_sql = [ -# first supertable -"select count(*) from test.meters ;", -"select count(*) from test.meters where t3 > 2;", -"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters where t7 like 'taos_1%';", -"select count(*) from test.meters where t7 like '_____2';", -"select count(*) from test.meters where t8 like '%思%';", -"select count(*) from test.meters interval(1n) order by ts desc;", -#"select max(c0) from test.meters group by tbname", -"select first(ts) from test.meters where t5 >5000 and t5<5100;", -"select last(ts) from test.meters where t5 >5000 and t5<5100;", -"select last_row(*) from test.meters;", -"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters where t5 >5000 and t5<5100;", -"select bottom(c1, 2) from test.t1;", -"select diff(c1) from test.t1;", -"select leastsquares(c1, 1, 1) from test.t1 ;", -"select max(c1) from test.meters where t5 >5000 and t5<5100;", -"select min(c1) from test.meters where t5 >5000 and t5<5100;", -"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;", -"select percentile(c1, 50) from test.t1;", -"select spread(c1) from test.t1 ;", -"select stddev(c1) from test.t1;", -"select sum(c1) from test.meters where t5 >5000 and t5<5100;", -"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;" -"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c4) from test.meters where t5 >5000 and t5<5100;", -"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;", -"select diff(c4) from test.t1 where t5 >5000 and t5<5100;", -"select leastsquares(c4, 1, 1) from test.t1 ;", -"select max(c4) from test.meters where t5 >5000 and t5<5100;", -"select min(c4) from test.meters where t5 >5000 and t5<5100;", -"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;", -"select percentile(c5, 50) from test.t1;", -"select spread(c5) from test.t1 ;", -"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;", -"select sum(c5) from test.meters where t5 >5000 and t5<5100;", -"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;", -#all vnode -"select count(*) from test.meters where t5 >5000 and t5<5100", -"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100", -"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100", -"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100", -"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100", -# second supertable -"select count(*) from test.meters1 where t3 > 2;", -"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';", -"select count(*) from test.meters where t7 like 'taos_1%';", -"select count(*) from test.meters where t7 like '_____2';", -"select count(*) from test.meters where t8 like '%思%';", -"select count(*) from test.meters1 interval(1n) order by ts desc;", -#"select max(c0) from test.meters1 group by tbname", -"select first(ts) from test.meters1 where t5 >5000 and t5<5100;", -"select last(ts) from test.meters1 where t5 >5000 and t5<5100;", -"select last_row(*) from test.meters1 ;", -"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;", -"select diff(c1) from test.m1 ;", -"select leastsquares(c1, 1, 1) from test.m1 ;", -"select max(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select min(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select c1 + c2 + c1 / c0 + c2 from test.m1 ;", -"select percentile(c1, 50) from test.m1;", -"select spread(c1) from test.m1 ;", -"select stddev(c1) from test.m1;", -"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;", -"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;", -"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , -"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select bottom(c5, 2) from test.m1;", -"select diff(c5) from test.m1;", -"select leastsquares(c5, 1, 1) from test.m1 ;", -"select max(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select min(c5) from test.meters1 where t5 >5000 and t5<5100;", -"select c5 + c2 + c4 / c5 + c0 from test.m1;", -"select percentile(c4, 50) from test.m1;", -"select spread(c4) from test.m1 ;", -"select stddev(c4) from test.m1;", -"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;", -"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;", -"select count(*) from test.meters1 where t5 >5100 and t5<5300", -#all vnode -"select count(*) from test.meters1 where t5 >5100 and t5<5300", -"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100", -"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100", -"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100", -"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100", -#join -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5", -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7", -# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8", -# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8" +import requests +from requests.auth import HTTPBasicAuth +func_list=['avg','count','twa','sum','stddev','leastsquares','min', +'max','first','last','top','bottom','percentile','apercentile', +'last_row','diff','spread'] +condition_list=[ + "where _c0 > now -10d ", + 'interval(10s)', + 'limit 10', + 'group by', + 'order by', + 'fill(null)' + ] - +where_list = ['_c0>now-10d',' <50'," like \'%a%\'"] class ConcurrentInquiry: - def initConnection(self): - self.numOfTherads = 50 + def __init__(self,n_Therads=25,r_Therads=25): + self.n_numOfTherads = n_Therads + self.r_numOfTherads = r_Therads self.ts=1500000001000 - + self.dbname='test' + self.stb_list=[] + self.subtb_list=[] + self.stb_stru_list=[] + self.subtb_stru_list=[] + self.stb_tag_list=[] + self.subtb_tag_list=[] + def SetThreadsNum(self,num): self.numOfTherads=num - def query_thread(self,threadID): - host = "10.211.55.14" + + def ret_fcol(self,cl,sql): #返回结果的第一列 + cl.execute(sql) + fcol_list=[] + for data in cl: + fcol_list.append(data[0]) + return fcol_list + + def r_stb_list(self,cl): #返回超级表列表 + sql='show '+self.dbname+'.stables' + self.stb_list=self.ret_fcol(cl,sql) + + def r_subtb_list(self,cl,stablename): #每个超级表返回2个子表 + sql='select tbname from '+self.dbname+'.'+stablename+' limit 2;' + self.subtb_list+=self.ret_fcol(cl,sql) + + def cal_struct(self,cl,tbname): #查看表结构 + tb=[] + tag=[] + sql='describe '+self.dbname+'.'+tbname+';' + cl.execute(sql) + for data in cl: + if data[3]: + tag.append(data[0]) + else: + tb.append(data[0]) + return tb,tag + + def r_stb_stru(self,cl): #获取所有超级表的表结构 + for i in self.stb_list: + tb,tag=self.cal_struct(cl,i) + self.stb_stru_list.append(tb) + self.stb_tag_list.append(tag) + + def r_subtb_stru(self,cl): #返回所有子表的表结构 + for i in self.subtb_list: + tb,tag=self.cal_struct(cl,i) + self.subtb_stru_list.append(tb) + self.subtb_tag_list.append(tag) + + def get_full(self): #获取所有的表、表结构 + host = "127.0.0.1" + user = "root" + password = "taosdata" + conn = taos.connect( + host, + user, + password, + ) + cl = conn.cursor() + self.r_stb_list(cl) + for i in self.stb_list: + self.r_subtb_list(cl,i) + self.r_stb_stru(cl) + self.r_subtb_stru(cl) + cl.close() + conn.close() + + #query condition + def con_where(self,tlist): + l=[] + for i in range(random.randint(0,len(tlist))): + c = random.choice(where_list) + if c == '_c0>now-10d': + l.append(c) + else: + l.append(random.choice(tlist)+c) + return 'where '+random.choice([' and ',' or ']).join(l) + + def con_interval(self,tlist): + return random.choice(['interval(10s)','interval(10d)','interval(1n)']) + + def con_limit(self,tlist): + return random.choice(['limit 10','limit 10 offset 10','slimit 10','slimit 10 offset 10','limit 10 slimit 10','limit 10 offset 5 slimit 5 soffset 10']) + + def con_fill(self,tlist): + return random.choice(['fill(null)','fill(prev)','fill(none)','fill(LINEAR)']) + + def con_group(self,tlist): + return 'group by '+random.choice(tlist) + + def con_order(self,tlist): + return 'order by '+random.choice(tlist) + + def gen_query_sql(self): #生成查询语句 + tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表 + tbname='' + col_list=[] + tag_list=[] + is_stb=0 + if tbi>len(self.stb_list) : + tbi=tbi-len(self.stb_list) + tbname=self.subtb_list[tbi-1] + col_list=self.subtb_stru_list[tbi-1] + tag_list=self.subtb_tag_list[tbi-1] + else: + tbname=self.stb_list[tbi-1] + col_list=self.stb_stru_list[tbi-1] + tag_list=self.stb_tag_list[tbi-1] + is_stb=1 + tlist=col_list+tag_list + con_rand=random.randint(0,len(condition_list)) + func_rand=random.randint(0,len(func_list)) + col_rand=random.randint(0,len(col_list)) + tag_rand=random.randint(0,len(tag_list)) + t_rand=random.randint(0,len(tlist)) + sql='select ' #select + random.shuffle(col_list) + random.shuffle(func_list) + sel_col_list=[] + col_rand=random.randint(0,len(col_list)) + for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数 + if j == 'leastsquares': + sel_col_list.append(j+'('+i+',1,1)') + elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile': + sel_col_list.append(j+'('+i+',1)') + else: + sel_col_list.append(j+'('+i+')') + sql=sql+','.join(sel_col_list)+' from '+random.choice(self.stb_list+self.subtb_list)+' ' #select col & func + con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] + sel_con=random.sample(con_func,random.randint(0,len(con_func))) + sel_con_list=[] + for i in sel_con: + sel_con_list.append(i(tlist)) #获取对应的条件函数 + sql+=' '.join(sel_con_list) # condition + print(sql) + return sql + + def rest_query(self,sql): #rest 接口 + host = "127.0.0.1" + user = "root" + password = "taosdata" + port =6041 + url = "http://{}:{}/rest/sql".format(host, port ) + try: + r = requests.post(url, + data = 'use test', + auth = HTTPBasicAuth('root', 'taosdata')) + r = requests.post(url, + data = sql, + auth = HTTPBasicAuth('root', 'taosdata')) + except: + print("REST API Failure (TODO: more info here)") + raise + rj = r.json() + if ('status' not in rj): + raise RuntimeError("No status in REST response") + + if rj['status'] == 'error': # clearly reported error + if ('code' not in rj): # error without code + raise RuntimeError("REST error return without code") + errno = rj['code'] # May need to massage this in the future + # print("Raising programming error with REST return: {}".format(rj)) + raise taos.error.ProgrammingError( + rj['desc'], errno) # todo: check existance of 'desc' + + if rj['status'] != 'succ': # better be this + raise RuntimeError( + "Unexpected REST return status: {}".format( + rj['status'])) + + nRows = rj['rows'] if ('rows' in rj) else 0 + return nRows + + def query_thread_n(self,threadID): #使用原生python接口查询 + host = "127.0.0.1" user = "root" password = "taosdata" conn = taos.connect( @@ -135,35 +225,59 @@ class ConcurrentInquiry: print("Thread %d: starting" % threadID) while True: - ran_query_sql=query_sql - random.shuffle(ran_query_sql) - for i in ran_query_sql: - print("Thread %d : %s"% (threadID,i)) + try: + sql=self.gen_query_sql() + print("sql is ",sql) start = time.time() - cl.execute(i) - cl.fetchall + cl.execute(sql) + cl.fetchall() end = time.time() print("time cost :",end-start) except Exception as e: print( "Failure thread%d, sql: %s,exception: %s" % - (threadID, str(i),str(e))) - exit(-1) + (threadID, str(sql),str(e))) + #exit(-1) - print("Thread %d: finishing" % threadID) + print("Thread %d: finishing" % threadID) - + def query_thread_r(self,threadID): #使用rest接口查询 + print("Thread %d: starting" % threadID) + while True: + try: + sql=self.gen_query_sql() + print("sql is ",sql) + start = time.time() + self.rest_query(sql) + end = time.time() + print("time cost :",end-start) + except Exception as e: + print( + "Failure thread%d, sql: %s,exception: %s" % + (threadID, str(sql),str(e))) + #exit(-1) + + + print("Thread %d: finishing" % threadID) def run(self): - + print(self.n_numOfTherads,self.r_numOfTherads) threads = [] - for i in range(self.numOfTherads): - thread = threading.Thread(target=self.query_thread, args=(i,)) + for i in range(self.n_numOfTherads): + thread = threading.Thread(target=self.query_thread_n, args=(i,)) threads.append(thread) thread.start() - -q = ConcurrentInquiry() -q.initConnection() + for i in range(self.r_numOfTherads): + # for i in range(1): + thread = threading.Thread(target=self.query_thread_r, args=(i,)) + threads.append(thread) + thread.start() +if len(sys.argv)>1: + q = ConcurrentInquiry(n_Therads=sys.argv[1],r_Therads=sys.argv[2]) +else: + q = ConcurrentInquiry() +q.get_full() +#q.gen_query_sql() q.run() diff --git a/tests/pytest/cq.py b/tests/pytest/cq.py new file mode 100644 index 0000000000..7778969619 --- /dev/null +++ b/tests/pytest/cq.py @@ -0,0 +1,169 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import threading +import taos +import sys +import json +import time +import random +# query sql +query_sql = [ +# first supertable +"select count(*) from test.meters ;", +"select count(*) from test.meters where t3 > 2;", +"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';", +"select count(*) from test.meters where t7 like 'taos_1%';", +"select count(*) from test.meters where t7 like '_____2';", +"select count(*) from test.meters where t8 like '%思%';", +"select count(*) from test.meters interval(1n) order by ts desc;", +#"select max(c0) from test.meters group by tbname", +"select first(ts) from test.meters where t5 >5000 and t5<5100;", +"select last(ts) from test.meters where t5 >5000 and t5<5100;", +"select last_row(*) from test.meters;", +"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c1) from test.meters where t5 >5000 and t5<5100;", +"select bottom(c1, 2) from test.t1;", +"select diff(c1) from test.t1;", +"select leastsquares(c1, 1, 1) from test.t1 ;", +"select max(c1) from test.meters where t5 >5000 and t5<5100;", +"select min(c1) from test.meters where t5 >5000 and t5<5100;", +"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;", +"select percentile(c1, 50) from test.t1;", +"select spread(c1) from test.t1 ;", +"select stddev(c1) from test.t1;", +"select sum(c1) from test.meters where t5 >5000 and t5<5100;", +"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;" +"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c4) from test.meters where t5 >5000 and t5<5100;", +"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;", +"select diff(c4) from test.t1 where t5 >5000 and t5<5100;", +"select leastsquares(c4, 1, 1) from test.t1 ;", +"select max(c4) from test.meters where t5 >5000 and t5<5100;", +"select min(c4) from test.meters where t5 >5000 and t5<5100;", +"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;", +"select percentile(c5, 50) from test.t1;", +"select spread(c5) from test.t1 ;", +"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;", +"select sum(c5) from test.meters where t5 >5000 and t5<5100;", +"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;", +#all vnode +"select count(*) from test.meters where t5 >5000 and t5<5100", +"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100", +"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100", +"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100", +"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100", +# second supertable +"select count(*) from test.meters1 where t3 > 2;", +"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';", +"select count(*) from test.meters where t7 like 'taos_1%';", +"select count(*) from test.meters where t7 like '_____2';", +"select count(*) from test.meters where t8 like '%思%';", +"select count(*) from test.meters1 interval(1n) order by ts desc;", +#"select max(c0) from test.meters1 group by tbname", +"select first(ts) from test.meters1 where t5 >5000 and t5<5100;", +"select last(ts) from test.meters1 where t5 >5000 and t5<5100;", +"select last_row(*) from test.meters1 ;", +"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;", +"select diff(c1) from test.m1 ;", +"select leastsquares(c1, 1, 1) from test.m1 ;", +"select max(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select min(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select c1 + c2 + c1 / c0 + c2 from test.m1 ;", +"select percentile(c1, 50) from test.m1;", +"select spread(c1) from test.m1 ;", +"select stddev(c1) from test.m1;", +"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;", +"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;", +"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" , +"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;", +"select bottom(c5, 2) from test.m1;", +"select diff(c5) from test.m1;", +"select leastsquares(c5, 1, 1) from test.m1 ;", +"select max(c5) from test.meters1 where t5 >5000 and t5<5100;", +"select min(c5) from test.meters1 where t5 >5000 and t5<5100;", +"select c5 + c2 + c4 / c5 + c0 from test.m1;", +"select percentile(c4, 50) from test.m1;", +"select spread(c4) from test.m1 ;", +"select stddev(c4) from test.m1;", +"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;", +"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;", +"select count(*) from test.meters1 where t5 >5100 and t5<5300", +#all vnode +"select count(*) from test.meters1 where t5 >5100 and t5<5300", +"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100", +"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100", +"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100", +"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100", +#join +# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5", +# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7", +# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8", +# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8" +] + +class ConcurrentInquiry: + def initConnection(self): + self.numOfTherads = 50 + self.ts=1500000001000 + + def SetThreadsNum(self,num): + self.numOfTherads=num + def query_thread(self,threadID): + host = "10.211.55.14" + user = "root" + password = "taosdata" + conn = taos.connect( + host, + user, + password, + ) + cl = conn.cursor() + cl.execute("use test;") + + print("Thread %d: starting" % threadID) + + while True: + ran_query_sql=query_sql + random.shuffle(ran_query_sql) + for i in ran_query_sql: + print("Thread %d : %s"% (threadID,i)) + try: + start = time.time() + cl.execute(i) + cl.fetchall() + end = time.time() + print("time cost :",end-start) + except Exception as e: + print( + "Failure thread%d, sql: %s,exception: %s" % + (threadID, str(i),str(e))) + exit(-1) + + + print("Thread %d: finishing" % threadID) + + + + def run(self): + + threads = [] + for i in range(self.numOfTherads): + thread = threading.Thread(target=self.query_thread, args=(i,)) + threads.append(thread) + thread.start() + +q = ConcurrentInquiry() +q.initConnection() +q.run() diff --git a/tests/pytest/functions/function_arithmetic.py b/tests/pytest/functions/function_arithmetic.py new file mode 100644 index 0000000000..a2249bab88 --- /dev/null +++ b/tests/pytest/functions/function_arithmetic.py @@ -0,0 +1,71 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 int, col2 int) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute("create table test2 using test tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d)" % (self.ts + i, i + 1, i + 1)) + tdSql.execute("insert into test2 values(%d, %d, %d)" % (self.ts + i, i + 1, i + 1)) + + # arithmetic verifacation + tdSql.query("select 0.1 + 0.1 from test") + tdSql.checkRows(self.rowNum * 2) + for i in range(self.rowNum * 2): + tdSql.checkData(0, 0, 0.20000000) + + tdSql.query("select 4 * avg(col1) from test") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 22) + + tdSql.query("select 4 * sum(col1) from test") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 440) + + tdSql.query("select 4 * avg(col1) * sum(col2) from test") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 2420) + + tdSql.query("select 4 * avg(col1) * sum(col2) from test group by loc") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 1210) + tdSql.checkData(1, 0, 1210) + + tdSql.error("select avg(col1 * 2)from test group by loc") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/restfulInsert.py b/tests/pytest/insert/restfulInsert.py index a6c9b074e1..9fa1f33a24 100644 --- a/tests/pytest/insert/restfulInsert.py +++ b/tests/pytest/insert/restfulInsert.py @@ -15,25 +15,28 @@ import requests import threading import random import time +import argparse class RestfulInsert: - def init(self): + def __init__(self, host, dbname, threads, tables, records, batchSize, tbNamePerfix, outOfOrder): self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} - self.url = "http://127.0.0.1:6041/rest/sql" + self.url = "http://%s:6041/rest/sql" % host self.ts = 1500000000000 - self.numOfThreads = 20 - self.numOfTables = 10000 - self.recordsPerTable = 10000 - self.batchSize = 1000 - self.tableNamePerfix = 't' + self.dbname = dbname + self.numOfThreads = threads + self.numOfTables = tables + self.recordsPerTable = records + self.batchSize = batchSize + self.tableNamePerfix = tbNamePerfix + self.outOfOrder = outOfOrder def createTable(self, threadID): - tablesPerThread = int (self.numOfTables / self.numOfThreads) + tablesPerThread = int (self.numOfTables / self.numOfThreads) print("create table %d to %d" % (tablesPerThread * threadID, tablesPerThread * (threadID + 1) - 1)) for i in range(tablesPerThread): tableID = threadID * tablesPerThread name = 'beijing' if tableID % 2 == 0 else 'shanghai' - data = "create table test.%s%d using test.meters tags(%d, '%s')" % (self.tableNamePerfix, tableID + i, tableID + i, name) + data = "create table %s.%s%d using %s.meters tags(%d, '%s')" % (self.dbname, self.tableNamePerfix, tableID + i, self.dbname, tableID + i, name) requests.post(self.url, data, headers = self.header) def insertData(self, threadID): @@ -43,17 +46,42 @@ class RestfulInsert: tableID = i + threadID * tablesPerThread start = self.ts for j in range(int(self.recordsPerTable / self.batchSize)): - data = "insert into test.%s%d values" % (self.tableNamePerfix, tableID) + data = "insert into %s.%s%d values" % (self.dbname, self.tableNamePerfix, tableID) + values = [] for k in range(self.batchSize): - data += "(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100)) + data += "(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100)) + requests.post(self.url, data, headers = self.header) + + def insertUnlimitedData(self, threadID): + print("thread %d started" % threadID) + tablesPerThread = int (self.numOfTables / self.numOfThreads) + while True: + i = 0 + start = self.ts + + for i in range(tablesPerThread): + tableID = i + threadID * tablesPerThread + + data = "insert into %s.%s%d values" % (self.dbname, self.tableNamePerfix, tableID) + values = [] + for k in range(self.batchSize): + values.append("(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100))) + + if(self.outOfOrder == False): + for k in range(len(values)): + data += values[k] + else: + random.shuffle(values) + for k in range(len(values)): + data += values[k] requests.post(self.url, data, headers = self.header) def run(self): - data = "drop database if exists test" + data = "drop database if exists %s" % self.dbname requests.post(self.url, data, headers = self.header) - data = "create database test" + data = "create database %s" % self.dbname requests.post(self.url, data, headers = self.header) - data = "create table test.meters(ts timestamp, f1 int, f2 int, f3 int) tags(id int, loc nchar(20))" + data = "create table %s.meters(ts timestamp, f1 int, f2 int, f3 int) tags(id int, loc nchar(20))" % self.dbname requests.post(self.url, data, headers = self.header) threads = [] @@ -70,7 +98,10 @@ class RestfulInsert: threads = [] startTime = time.time() for i in range(self.numOfThreads): - thread = threading.Thread(target=self.insertData, args=(i,)) + if(self.recordsPerTable != -1): + thread = threading.Thread(target=self.insertData, args=(i,)) + else: + thread = threading.Thread(target=self.insertUnlimitedData, args=(i,)) thread.start() threads.append(thread) @@ -78,6 +109,62 @@ class RestfulInsert: threads[i].join() print("inserting %d records takes %d seconds" % (self.numOfTables * self.recordsPerTable, (time.time() - startTime))) -ri = RestfulInsert() -ri.init() +parser = argparse.ArgumentParser() +parser.add_argument( + '-H', + '--host-name', + action='store', + default='127.0.0.1', + type=str, + help='host name to be connected (default: 127.0.0.1)') +parser.add_argument( + '-d', + '--db-name', + action='store', + default='test', + type=str, + help='Database name to be created (default: test)') +parser.add_argument( + '-t', + '--number-of-threads', + action='store', + default=10, + type=int, + help='Number of threads to create tables and insert datas (default: 10)') +parser.add_argument( + '-T', + '--number-of-tables', + action='store', + default=1000, + type=int, + help='Number of tables to be created (default: 1000)') +parser.add_argument( + '-r', + '--number-of-records', + action='store', + default=1000, + type=int, + help='Number of record to be created for each table (default: 1000, -1 for unlimited records)') +parser.add_argument( + '-s', + '--batch-size', + action='store', + default='1000', + type=int, + help='Number of tables to be created (default: 1000)') +parser.add_argument( + '-p', + '--table-name-prefix', + action='store', + default='t', + type=str, + help='Number of tables to be created (default: 1000)') +parser.add_argument( + '-o', + '--out-of-order', + action='store_true', + help='The order of test data (default: False)') + +args = parser.parse_args() +ri = RestfulInsert(args.host_name, args.db_name, args.number_of_threads, args.number_of_tables, args.number_of_records, args.batch_size, args.table_name_prefix, args.out_of_order) ri.run() \ No newline at end of file diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim index 92aa6a922c..0547db7749 100644 --- a/tests/script/general/parser/fill.sim +++ b/tests/script/general/parser/fill.sim @@ -851,11 +851,205 @@ endi print =====================>td-1442 sql_error select count(*) from m_fl_tb0 interval(1s) fill(prev); -print =============== clear -sql drop database $db -sql show databases -if $rows != 0 then +print =====================> aggregation + arithmetic + fill +#sql select avg(cpu_taosd) - first(cpu_taosd) from dn1 where ts<'2020-11-13 11:00:00' and ts>'2020-11-13 10:50:00' interval(10s) fill(value, 99) +#sql select count(*), first(k), avg(k), avg(k)-first(k) from tm0 where ts>'2020-1-1 1:1:1' and ts<'2020-1-1 1:02:59' interval(10s) fill(value, 99); +#sql select count(*), first(k), avg(k), avg(k)-first(k) from tm0 where ts>'2020-1-1 1:1:1' and ts<'2020-1-1 1:02:59' interval(10s) fill(NULL); + +print =====================> td-2060 +sql create table m1 (ts timestamp, k int ) tags(a int); +sql create table if not exists tm0 using m1 tags(1); +sql insert into tm0 values('2020-1-1 1:1:1', 1); +sql insert into tm0 values('2020-1-1 1:1:2', 2); +sql insert into tm0 values('2020-1-1 1:1:3', 3); +sql insert into tm0 values('2020-1-1 1:2:4', 4); +sql insert into tm0 values('2020-1-1 1:2:5', 5); +sql insert into tm0 values('2020-1-1 1:2:6', 6); +sql insert into tm0 values('2020-1-1 1:3:7', 7); +sql insert into tm0 values('2020-1-1 1:3:8', 8); +sql insert into tm0 values('2020-1-1 1:3:9', 9); +sql insert into tm0 values('2020-1-1 1:4:10', 10); + +sql select max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85); +if $rows != 8 then return -1 endi +if $data00 != @20-01-01 01:01:00.000@ then + return -1 +endi + +if $data01 != 2.000000000 then + return -1 +endi + +if $data02 != 2.000000000 then + return -1 +endi + +if $data03 != -2.000000000 then + return -1 +endi + +if $data10 != @20-01-01 01:01:10.000@ then + return -1 +endi + +if $data11 != 99.000000000 then + return -1 +endi + +if $data12 != 91.000000000 then + return -1 +endi + +if $data13 != 90.000000000 then + return -1 +endi + +if $data60 != @20-01-01 01:02:00.000@ then + return -1 +endi + +if $data61 != 2.000000000 then + return -1 +endi + +if $data62 != 2.000000000 then + return -1 +endi + +if $data63 != -2.000000000 then + return -1 +endi + +if $data70 != @20-01-01 01:02:10.000@ then + return -1 +endi + +if $data71 != 99.000000000 then + return -1 +endi + +if $data72 != 91.000000000 then + return -1 +endi + +if $data73 != 90.000000000 then + return -1 +endi + +sql select first(k)-avg(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(NULL); +if $rows != 8 then + return -1 +endi + +if $data00 != @20-01-01 01:01:00.000@ then + return -1 +endi + +if $data01 != -1.000000000 then + return -1 +endi + +if $data02 != -2.000000000 then + return -1 +endi + +if $data10 != @20-01-01 01:01:10.000@ then + return -1 +endi + +if $data11 != NULL then + return -1 +endi + +if $data12 != NULL then + return -1 +endi + +sql select max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) order by ts asc; +if $rows != 21749 then + return -1 +endi + +sql select max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) order by ts asc; +if $rows != 8 then + return -1 +endi + +if $data00 != @20-01-01 01:01:00.000@ then + return -1 +endi + +if $data00 != @20-01-01 01:01:00.000@ then + return -1 +endi +if $data1 +if $data01 != 2.000000000 then + return -1 +endi + +if $data02 != 2.000000000 then + return -1 +endi + +if $data03 != -2.000000000 then + return -1 +endi + +if $data04 != 3 then + return -1 +endi + +if $data10 != @20-01-01 01:01:10.000@ then + return -1 +endi + +if $data11 != 99.000000000 then + return -1 +endi + +if $data12 != 91.000000000 then + return -1 +endi + +if $data13 != 90.000000000 then + return -1 +endi + +if $data14 != 89 then + return -1 +endi + +print ==================> td-2115 +sql select count(*), min(c3)-max(c3) from m_fl_mt0 group by tgcol +if $rows != 10 then + return -1 +endi + +if $data00 != 5 then + return -1 +endi + +if $data01 != -4.000000000 then + return -1 +endi + +if $data02 != 0 then + return -1 +endi + +if $data12 != 1 then + return -1 +endi + + +print =============== clear +#sql drop database $db +#sql show databases +#if $rows != 0 then +# return -1 +#endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/join_multivnode.sim b/tests/script/general/parser/join_multivnode.sim index 76230f79f0..a5e71260b4 100644 --- a/tests/script/general/parser/join_multivnode.sim +++ b/tests/script/general/parser/join_multivnode.sim @@ -212,4 +212,157 @@ sql select count(join_mt0.c1), first(join_mt0.c1)/count(*), first(join_mt1.c9) f sql select count(join_mt0.c1), first(join_mt0.c1)-last(join_mt0.c1), first(join_mt1.c9) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;", NULL); sql select last(join_mt0.c1) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts;", NULL); +sql create database disorder_db; +sql use disorder_db; +sql create table m1(ts timestamp, k int) tags(a int); +sql create table tm0 using m1 tags(0); +sql create table tm1 using m1 tags(1); +sql create table tm2 using m1 tags(2); +sql create table tm3 using m1 tags(3); +sql create table tm4 using m1 tags(4); +sql create table tm5 using m1 tags(5); +sql create table tm6 using m1 tags(6); +sql create table tm7 using m1 tags(7); + +sql show vgroups +if $rows != 2 then + print maxTablesPerVnode set to 4 is not active. + return -1 +endi + +sql insert into tm0 values('2020-1-1 1:1:1', 0); +sql insert into tm1 values('2020-1-1 1:1:1', 1); +sql insert into tm2 values('2020-1-1 1:1:1', 2); +sql insert into tm3 values('2020-1-1 1:1:1', 3); +sql insert into tm4 values('2020-1-1 1:1:1', 4); +sql insert into tm5 values('2020-1-1 1:1:1', 5); +sql insert into tm6 values('2020-1-1 1:1:1', 6); +sql insert into tm7 values('2020-1-1 1:1:1', 7); + +sql create table m2(ts timestamp, k int) tags(b int); +sql create table t0 using m2 tags(0); +sql create table t1 using m2 tags(4); +sql create table t2 using m2 tags(92); +sql create table t3 using m2 tags(93); +sql create table t4 using m2 tags(1); +sql create table t5 using m2 tags(5); +sql create table t6 using m2 tags(96); +sql create table t7 using m2 tags(97); + +sql show vgroups +if $rows != 4 then + return -1 +endi + +sql insert into t0 values('2020-1-1 1:1:1', 10); +sql insert into t1 values('2020-1-1 1:1:1', 11); +sql insert into t2 values('2020-1-1 1:1:1', 12); +sql insert into t3 values('2020-1-1 1:1:1', 13); +sql insert into t4 values('2020-1-1 1:1:1', 14); +sql insert into t5 values('2020-1-1 1:1:1', 15); +sql insert into t6 values('2020-1-1 1:1:1', 16); +sql insert into t7 values('2020-1-1 1:1:1', 17); + +sql select m1.ts,m1.tbname,m1.a, m2.ts,m2.tbname,m2.b from m1,m2 where m1.a=m2.b and m1.ts=m2.ts; +if $rows != 4 then + return -1 +endi + +if $data00 != @20-01-01 01:01:01.000@ then + print expect 20-01-01 01:01:01.000, actual:$data00 + return -1 +endi + +if $data01 != @tm0@ then + return -1 +endi + +if $data02 != 0 then + return -1 +endi + +if $data03 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data04 != @t0@ then + return -1 +endi + +if $data05 != 0 then + return -1 +endi + +if $data10 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data11 != @tm1@ then + return -1 +endi + +if $data12 != 1 then + return -1 +endi + +if $data13 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data14 != @t4@ then + return -1 +endi + +if $data15 != 1 then + return -1 +endi + +if $data20 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data21 != @tm4@ then + return -1 +endi + +if $data22 != 4 then + return -1 +endi + +if $data23 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data24 != @t1@ then + return -1 +endi + +if $data25 != 4 then + return -1 +endi + +if $data30 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data31 != @tm5@ then + return -1 +endi + +if $data32 != 5 then + return -1 +endi + +if $data33 != @20-01-01 01:01:01.000@ then + return -1 +endi + +if $data34 != @t5@ then + return -1 +endi + +if $data35 != 5 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file