Merge branch 'develop' into feature/changshuaiqiang/testcase

* develop: (21 commits)
  [td-225] fix bugs in multi-vnode projection query.
  support GCC 4.8
  [td-225]
  fix failed test cases
  [TD-161]
  [TD-161]
  [TD-161]
  avoid a potential bug
  tsdb notify the vnode about it status changes
  [td-225] refactor hash function
  [td-225] fix bugs for leastsquares query.
  [td-225] fix bugs at client
  [td-225] fix bugs for 256 columns data check
  use tbname in function tbid()
  single table subscribe is done
  super table subscribe is done
  build VgroupTableInfo
  fix typo
  add taosArrayClear
  enhance exception handling
  ...
This commit is contained in:
changshuaiqiang 2020-05-13 17:04:05 +08:00
commit 637e1ef3c9
48 changed files with 1072 additions and 858 deletions

View File

@ -203,6 +203,29 @@ matrix:
;;
esac
- os: linux
dist: trusty
language: c
git:
- depth: 1
addons:
apt:
packages:
- build-essential
- cmake
env:
- DESC="trusty/gcc-4.8 build"
before_script:
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
script:
- cmake .. > /dev/null
- make > /dev/null
- os: linux
language: c
compiler: clang

View File

@ -116,7 +116,7 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
* create local reducer to launch the second-stage reduce process at client site
*/
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalModel, SSqlCmd *pSqlCmd, SSqlRes *pRes);
SColumnModel *finalModel, SSqlObj* pSql);
void tscDestroyLocalReducer(SSqlObj *pSql);

View File

@ -31,13 +31,13 @@ extern "C" {
#include "tscSecondaryMerge.h"
#include "tsclient.h"
#define UTIL_TABLE_IS_SUPERTABLE(metaInfo) \
#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE))
#define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE))
#define UTIL_TABLE_IS_NOMRAL_TABLE(metaInfo)\
(!(UTIL_TABLE_IS_SUPERTABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
@ -183,7 +183,7 @@ void tscSqlExprInfoDestroy(SArray* pExprInfo);
SColumn* tscColumnClone(const SColumn* src);
SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex);
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
void tscColumnListDestroy(SArray* pColList);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
@ -265,6 +265,10 @@ void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)());
void* malloc_throw(size_t size);
void* calloc_throw(size_t nmemb, size_t size);
char* strdup_throw(const char* str);
#ifdef __cplusplus
}
#endif

View File

@ -438,6 +438,9 @@ extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int numOfRows);
int32_t tscCompareTidTags(const void* p1, const void* p2);
void tscBuildVgroupTableInfo(STableMetaInfo* pTableMetaInfo, SArray* tables);
#ifdef __cplusplus
}
#endif

View File

@ -145,7 +145,7 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
return;
}
// local reducer has handle this situation during super table non-projection query.
// local merge has handle this situation during super table non-projection query.
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE) {
pRes->numOfTotalInCurrentClause += pRes->numOfRows;
}
@ -222,9 +222,30 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi
tscResetForNextRetrieve(pRes);
// handle the sub queries of join query
if (pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE) {
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
} else if (pRes->completed && pCmd->command == TSDB_SQL_FETCH) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode);
return;
} else {
/*
* all available virtual node has been checked already, now we need to check
* for the next subclause queries
*/
if (pCmd->clauseIndex < pCmd->numOfClause - 1) {
tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode);
return;
}
/*
* 1. has reach the limitation
* 2. no remain virtual nodes to be retrieved anymore
*/
(*pSql->fetchFp)(param, pSql, 0);
}
return;
} else { // current query is not completed, continue retrieve from node
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
@ -425,6 +446,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pTableMetaInfo->pTableMeta == NULL){
code = tscGetTableMeta(pSql, pTableMetaInfo);
assert(code == TSDB_CODE_SUCCESS);
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pTableMetaInfo->vgroupIndex >= 0 && pSql->param != NULL);
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
@ -435,11 +461,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscTrace("%p get metricMeta during super table query successfully", pSql);
code = tscGetTableMeta(pSql, pTableMetaInfo);
pRes->code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
code = tscGetSTableVgroupInfo(pSql, 0);
pRes->code = code;
@ -471,7 +492,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
if (code == TSDB_CODE_SUCCESS && UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (code == TSDB_CODE_SUCCESS && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
code = tscGetSTableVgroupInfo(pSql, pCmd->clauseIndex);
pRes->code = code;

View File

@ -2904,7 +2904,11 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) {
param[1][2] /= param[1][1];
sprintf(pCtx->aOutputBuf, "(%lf, %lf)", param[0][2], param[1][2]);
int32_t maxOutputSize = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE - VARSTR_HEADER_SIZE;
size_t n = snprintf(varDataVal(pCtx->aOutputBuf), maxOutputSize, "{slop:%.6lf, intercept:%.6lf}",
param[0][2], param[1][2]);
varDataSetLen(pCtx->aOutputBuf, n);
doFinalizer(pCtx);
}

View File

@ -122,7 +122,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
int32_t numOfRows = tscGetNumOfColumns(pMeta);
int32_t totalNumOfRows = numOfRows + tscGetNumOfTags(pMeta);
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
numOfRows = numOfRows + tscGetNumOfTags(pMeta);
}
@ -161,7 +161,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
}
}
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return 0;
}

View File

@ -796,7 +796,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return code;
}
if (!UTIL_TABLE_IS_SUPERTABLE(pSTableMeterMetaInfo)) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMeterMetaInfo)) {
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
}
@ -1097,7 +1097,7 @@ int doParseInsertSql(SSqlObj *pSql, char *str) {
goto _error_clean; // TODO: should _clean or _error_clean to async flow ????
}
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL);
goto _error_clean;
}

View File

@ -194,7 +194,7 @@ static int normalStmtPrepare(STscStmt* stmt) {
static char* normalStmtBuildSql(STscStmt* stmt) {
SNormalStmt* normal = &stmt->normal;
SStringBuilder sb = {0};
SStringBuilder sb; memset(&sb, 0, sizeof(sb));
if (taosStringBuilderSetJmp(&sb) != 0) {
taosStringBuilderDestroy(&sb);

View File

@ -1297,10 +1297,6 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t c
pSchema->bytes, functionId == TSDB_FUNC_TAGPRJ);
}
void addRequiredTagColumn(STableMetaInfo* pTableMetaInfo, SColumnIndex* index) {
}
static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) {
SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex);
@ -1352,7 +1348,7 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
numOfTotalColumns = tinfo.numOfColumns + tinfo.numOfTags;
} else {
numOfTotalColumns = tinfo.numOfColumns;
@ -1412,7 +1408,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo)) {
if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
}
@ -1866,7 +1862,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
case TK_TBID: {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(pQueryInfo->msg, msg7);
}
@ -2283,7 +2279,7 @@ bool validateIpAddress(const char* ip, size_t size) {
int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pTableMetaInfo->pTableMeta == NULL || !UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (pTableMetaInfo->pTableMeta == NULL || !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return TSDB_CODE_INVALID_SQL;
}
@ -2322,7 +2318,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
/* transfer the field-info back to original input format */
void tscRestoreSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (!UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return;
}
@ -2546,7 +2542,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
}
if (groupTag) {
if (!UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(pQueryInfo->msg, msg9);
}
@ -3258,7 +3254,7 @@ static bool validateJoinExprNode(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColum
}
// table to table/ super table to super table are allowed
if (UTIL_TABLE_IS_SUPERTABLE(pLeftMeterMeta) != UTIL_TABLE_IS_SUPERTABLE(pRightMeterMeta)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pLeftMeterMeta) != UTIL_TABLE_IS_SUPER_TABLE(pRightMeterMeta)) {
invalidSqlErrMsg(pQueryInfo->msg, msg5);
return false;
}
@ -3341,7 +3337,7 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
} else if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) ||
index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { // query on tags
// check for tag query condition
if (UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
}
@ -3562,7 +3558,7 @@ static int32_t setTableCondForSTableQuery(SQueryInfo* pQueryInfo, const char* ac
return TSDB_CODE_SUCCESS;
}
SStringBuilder sb1 = {0};
SStringBuilder sb1; memset(&sb1, 0, sizeof(sb1));
taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN);
char db[TSDB_TABLE_ID_LEN] = {0};
@ -3701,7 +3697,7 @@ static int32_t validateJoinExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) { // for stable join, tag columns
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // for stable join, tag columns
// must be present for join
if (pCondExpr->pJoinExpr == NULL) {
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
@ -3739,7 +3735,7 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) {
static void doAddJoinTagsColumnsIntoTagList(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
SColumnIndex index = {0};
getColumnIndexByName(&pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index);
@ -3796,6 +3792,8 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr,
tSQLExprDestroy(p1);
tExprTreeDestroy(&p, NULL);
taosArrayDestroy(colList);
}
pCondExpr->pTagCond = NULL;
@ -3815,7 +3813,7 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql
pQueryInfo->window.ekey = INT64_MAX;
// tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space
SStringBuilder sb = {0};
SStringBuilder sb; memset(&sb, 0, sizeof(sb));
SCondExpr condExpr = {0};
if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) {
@ -4104,7 +4102,7 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) {
}
/* for super table query, set default ascending order for group output */
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
}
}
@ -4130,7 +4128,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
*
* for super table query, the order option must be less than 3.
*/
if (UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
if (pSortorder->nExpr > 1) {
return invalidSqlErrMsg(pQueryInfo->msg, msg0);
}
@ -4151,7 +4149,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
SSQLToken columnName = {pVar->nLen, pVar->nType, pVar->pz};
SColumnIndex index = {0};
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) { // super table query
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
if (getColumnIndexByName(&columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
}
@ -4304,10 +4302,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
if (UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(pQueryInfo->msg, msg3);
}
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo))) {
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
return invalidSqlErrMsg(pQueryInfo->msg, msg4);
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) &&
UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
@ -4693,7 +4691,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
}
// todo refactor
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
if (!tscQueryTags(pQueryInfo)) { // local handle the super table tag query
if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) {
@ -5629,7 +5627,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
return code;
}
bool isSTable = UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo);
bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_INVALID_SQL;
}
@ -5773,7 +5771,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
assert(pQueryInfo->numOfTables == pQuerySql->from->nExpr);
bool isSTable = false;
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
isSTable = true;
code = tscGetSTableVgroupInfo(pSql, index);
if (code != TSDB_CODE_SUCCESS) {

View File

@ -55,7 +55,7 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
}
}
static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pReducer, tOrderDescriptor *pDesc) {
static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDescriptor *pDesc) {
/*
* the fields and offset attributes in pCmd and pModel may be different due to
* merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object.
@ -132,16 +132,15 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu
}
}
/*
* todo release allocated memory process with async process
*/
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalmodel, SSqlCmd *pCmd, SSqlRes *pRes) {
// offset of cmd in SSqlObj structure
char *pSqlObjAddr = (char *)pCmd - offsetof(SSqlObj, cmd);
SColumnModel *finalmodel, SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
if (pMemBuffer == NULL) {
tscError("%p pMemBuffer", pMemBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscError("%p pMemBuffer is NULL", pMemBuffer);
pRes->code = TSDB_CODE_APP_ERROR;
return;
}
@ -149,7 +148,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pDesc->pColumnModel == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscError("%p no local buffer or intermediate result format model", pSqlObjAddr);
tscError("%p no local buffer or intermediate result format model", pSql);
pRes->code = TSDB_CODE_APP_ERROR;
return;
}
@ -158,7 +157,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
for (int32_t i = 0; i < numOfBuffer; ++i) {
int32_t len = pMemBuffer[i]->fileMeta.flushoutData.nLength;
if (len == 0) {
tscTrace("%p no data retrieved from orderOfVnode:%d", pSqlObjAddr, i + 1);
tscTrace("%p no data retrieved from orderOfVnode:%d", pSql, i + 1);
continue;
}
@ -167,13 +166,13 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (numOfFlush == 0 || numOfBuffer == 0) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscTrace("%p retrieved no data", pSqlObjAddr);
tscTrace("%p retrieved no data", pSql);
return;
}
if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSqlObjAddr, pDesc->pColumnModel->capacity,
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity,
pMemBuffer[0]->pageSize);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
@ -181,10 +180,11 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
return;
}
size_t nReducerSize = sizeof(SLocalReducer) + sizeof(void *) * numOfFlush;
SLocalReducer *pReducer = (SLocalReducer *)calloc(1, nReducerSize);
size_t size = sizeof(SLocalReducer) + POINTER_BYTES * numOfFlush;
SLocalReducer *pReducer = (SLocalReducer *) calloc(1, size);
if (pReducer == NULL) {
tscError("%p failed to create merge structure", pSqlObjAddr);
tscError("%p failed to create local merge structure, out of memory", pSql);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
@ -199,48 +199,52 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->numOfVnode = numOfBuffer;
pReducer->pDesc = pDesc;
tscTrace("%p the number of merged leaves is: %d", pSqlObjAddr, pReducer->numOfBuffer);
tscTrace("%p the number of merged leaves is: %d", pSql, pReducer->numOfBuffer);
int32_t idx = 0;
for (int32_t i = 0; i < numOfBuffer; ++i) {
int32_t numOfFlushoutInFile = pMemBuffer[i]->fileMeta.flushoutData.nLength;
for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
SLocalDataSource *pDS = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
if (pDS == NULL) {
tscError("%p failed to create merge structure", pSqlObjAddr);
SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
if (ds == NULL) {
tscError("%p failed to create merge structure", pSql);
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
return;
}
pReducer->pLocalDataSrc[idx] = pDS;
pDS->pMemBuffer = pMemBuffer[i];
pDS->flushoutIdx = j;
pDS->filePage.numOfElems = 0;
pDS->pageId = 0;
pDS->rowIdx = 0;
pReducer->pLocalDataSrc[idx] = ds;
tscTrace("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSqlObjAddr, i + 1, idx + 1);
tExtMemBufferLoadData(pMemBuffer[i], &(pDS->filePage), j, 0);
ds->pMemBuffer = pMemBuffer[i];
ds->flushoutIdx = j;
ds->filePage.numOfElems = 0;
ds->pageId = 0;
ds->rowIdx = 0;
tscTrace("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSql, i + 1, idx + 1);
tExtMemBufferLoadData(pMemBuffer[i], &(ds->filePage), j, 0);
#ifdef _DEBUG_VIEW
printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", pDS->filePage.numOfElems);
printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", ds->filePage.numOfElems);
SSrcColumnInfo colInfo[256] = {0};
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tscGetSrcColumnInfo(colInfo, pQueryInfo);
tColModelDisplayEx(pDesc->pColumnModel, pDS->filePage.data, pDS->filePage.numOfElems,
tColModelDisplayEx(pDesc->pColumnModel, ds->filePage.data, ds->filePage.numOfElems,
pMemBuffer[0]->numOfElemsPerPage, colInfo);
#endif
if (pDS->filePage.numOfElems == 0) { // no data in this flush
tscTrace("%p flush data is empty, ignore %d flush record", pSqlObjAddr, idx);
tfree(pDS);
if (ds->filePage.numOfElems == 0) { // no data in this flush, the index does not increase
tscTrace("%p flush data is empty, ignore %d flush record", pSql, idx);
tfree(ds);
continue;
}
idx += 1;
}
}
assert(idx >= pReducer->numOfBuffer);
// no data actually, no need to merge result.
if (idx == 0) {
return;
}
@ -262,9 +266,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
// the input data format follows the old format, but output in a new format.
// so, all the input must be parsed as old format
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
pReducer->pCtx = (SQLFunctionCtx *)calloc(size, sizeof(SQLFunctionCtx));
pReducer->pCtx = (SQLFunctionCtx *)calloc(tscSqlExprNumOfExprs(pQueryInfo), sizeof(SQLFunctionCtx));
pReducer->rowSize = pMemBuffer[0]->nElemSize;
tscRestoreSQLFuncForSTableQuery(pQueryInfo);
@ -281,7 +283,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
// used to keep the latest input row
pReducer->pTempBuffer = (tFilePage *)calloc(1, pReducer->rowSize + sizeof(tFilePage));
pReducer->discardData = (tFilePage *)calloc(1, pReducer->rowSize + sizeof(tFilePage));
pReducer->discard = false;
@ -309,11 +310,13 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
return;
}
size = tscSqlExprNumOfExprs(pQueryInfo);
pReducer->pTempBuffer->numOfElems = 0;
pReducer->pResInfo = calloc(size, sizeof(SResultInfo));
tscCreateResPointerInfo(pRes, pQueryInfo);
tscInitSqlContext(pCmd, pRes, pReducer, pDesc);
tscInitSqlContext(pCmd, pReducer, pDesc);
// we change the capacity of schema to denote that there is only one row in temp buffer
pReducer->pDesc->pColumnModel->capacity = 1;
@ -428,8 +431,7 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa
tColModelAppend(pModel, pPage, data, numOfRows - remain, numOfWriteElems, numOfRows);
if (pPage->numOfElems == pModel->capacity) {
int32_t ret = tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType);
if (ret != 0) {
if (tscFlushTmpBuffer(pMemoryBuf, pDesc, pPage, orderType) != TSDB_CODE_SUCCESS) {
return -1;
}
} else {

View File

@ -39,7 +39,7 @@ int (*tscProcessMsgRsp[TSDB_SQL_MAX])(SSqlObj *pSql);
void tscProcessActivityTimer(void *handle, void *tmrId);
int tscKeepConn[TSDB_SQL_MAX] = {0};
TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid);
TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt);
void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts);
void tscSaveSubscriptionProgress(void* sub);
@ -500,7 +500,7 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// todo valid the vgroupId at the client side
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int32_t vgIndex = pTableMetaInfo->vgroupIndex;
SVgroupsInfo* pVgroupInfo = pTableMetaInfo->vgroupList;
@ -550,8 +550,7 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
/*
* for meter query, simply return the size <= 1k
* for metric query, estimate size according to meter tags
* for table query, simply return the size <= 1k
*/
static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
const static int32_t MIN_QUERY_MSG_PKT_SIZE = TSDB_MAX_BYTES_PER_ROW * 5;
@ -562,25 +561,18 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
int32_t exprSize = sizeof(SSqlFuncMsg) * numOfExprs;
//STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// table query without tags values
//if (!UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + 4096;
//}
//int32_t size = 4096;
//return size;
}
static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
TSKEY dfltKey = htobe64(pQueryMsg->window.skey);
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
if (UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo) || pTableMetaInfo->pVgroupTables == NULL) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || pTableMetaInfo->pVgroupTables == NULL) {
SCMVgroupInfo* pVgroupInfo = NULL;
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int32_t index = pTableMetaInfo->vgroupIndex;
assert(index >= 0);
@ -596,7 +588,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
pTableIdInfo->tid = htonl(pTableMeta->sid);
pTableIdInfo->uid = htobe64(pTableMeta->uid);
pTableIdInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pTableMeta->uid));
pTableIdInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pTableMeta->uid, dfltKey));
pQueryMsg->numOfTables = htonl(1); // set the number of tables
@ -624,7 +616,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
pTableIdInfo->tid = htonl(pItem->tid);
pTableIdInfo->uid = htobe64(pItem->uid);
// pTableIdInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pTableMeta->uid));
pTableIdInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pItem->uid, dfltKey));
pMsg += sizeof(STableIdInfo);
}
}
@ -1453,8 +1445,9 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
}
pRes->row = 0;
pRes->completed = (pRes->numOfRows == 0);
uint8_t code = pRes->code;
int32_t code = pRes->code;
if (pRes->code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
} else {
@ -2292,7 +2285,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
if (pTableMetaInfo->pTableMeta) {
bool isSuperTable = UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo);
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
// taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pMetricMeta), true);
@ -2354,6 +2347,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
for (int i = 0; i < numOfTables; i++) {
int64_t uid = htobe64(*(int64_t*)p);
p += sizeof(int64_t);
p += sizeof(int32_t); // skip tid
TSKEY key = htobe64(*(TSKEY*)p);
p += sizeof(TSKEY);
tscUpdateSubscriptionProgress(pSql->pSubscription, uid, key);

View File

@ -463,11 +463,11 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
return NULL;
}
// current data are exhausted, fetch more data
if (pRes->row >= pRes->numOfRows && pRes->completed != true &&
// current data set are exhausted, fetch more data from node
if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql)) &&
(pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SELECT ||

View File

@ -79,7 +79,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
if (code == 0 && UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (code == 0 && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
code = tscGetSTableVgroupInfo(pSql, 0);
pSql->res.code = code;

View File

@ -44,8 +44,7 @@ typedef struct SSub {
int interval;
TAOS_SUBSCRIBE_CALLBACK fp;
void * param;
int numOfTables;
SSubscriptionProgress * progress;
SArray* progress;
} SSub;
@ -57,92 +56,113 @@ static int tscCompareSubscriptionProgress(const void* a, const void* b) {
return 0;
}
TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid) {
if (sub == NULL)
return 0;
SSub* pSub = (SSub*)sub;
for (int s = 0, e = pSub->numOfTables; s < e;) {
int m = (s + e) / 2;
SSubscriptionProgress* p = pSub->progress + m;
if (p->uid > uid)
e = m;
else if (p->uid < uid)
s = m + 1;
else
return p->key;
TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt) {
if (sub == NULL) {
return dflt;
}
SSub* pSub = (SSub*)sub;
return 0;
SSubscriptionProgress target = {.uid = uid, .key = 0};
SSubscriptionProgress* p = taosArraySearch(pSub->progress, tscCompareSubscriptionProgress, &target);
if (p == NULL) {
return dflt;
}
return p->key;
}
void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts) {
if( sub == NULL)
return;
SSub* pSub = (SSub*)sub;
for (int s = 0, e = pSub->numOfTables; s < e;) {
int m = (s + e) / 2;
SSubscriptionProgress* p = pSub->progress + m;
if (p->uid > uid)
e = m;
else if (p->uid < uid)
s = m + 1;
else {
if (ts >= p->key) p->key = ts;
break;
SSubscriptionProgress target = {.uid = uid, .key = ts};
SSubscriptionProgress* p = taosArraySearch(pSub->progress, tscCompareSubscriptionProgress, &target);
if (p != NULL) {
p->key = ts;
}
}
static void asyncCallback(void *param, TAOS_RES *tres, int code) {
assert(param != NULL);
SSqlObj *pSql = ((SSqlObj *)param);
pSql->res.code = code;
sem_post(&pSql->rspSem);
}
static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* sql) {
SSub* pSub = calloc(1, sizeof(SSub));
if (pSub == NULL) {
terrno = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscError("failed to allocate memory for subscription");
return NULL;
}
SSub* pSub = NULL;
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
if (pSql == NULL) {
terrno = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscError("failed to allocate SSqlObj for subscription");
goto _pSql_failed;
TRY( 8 ) {
SSqlObj* pSql = calloc_throw(1, sizeof(SSqlObj));
CLEANUP_PUSH_FREE(true, pSql);
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (tsem_init(&pSql->rspSem, 0, 0) == -1) {
THROW(TAOS_SYSTEM_ERROR(errno));
}
CLEANUP_PUSH_INT_PTR(true, tsem_destroy, &pSql->rspSem);
pSql->signature = pSql;
pSql->param = pSql;
pSql->pTscObj = pObj;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM;
pSql->fp = asyncCallback;
char* sqlstr = (char*)malloc(strlen(sql) + 1);
if (sqlstr == NULL) {
tscError("failed to allocate sql string for subscription");
goto failed;
int code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
if (code != TSDB_CODE_SUCCESS) {
THROW(code);
}
strcpy(sqlstr, sql);
strtolower(sqlstr, sqlstr);
pSql->sqlstr = sqlstr;
CLEANUP_PUSH_FREE(true, pCmd->payload);
tsem_init(&pSql->rspSem, 0, 0);
SSqlRes *pRes = &pSql->res;
pRes->qhandle = 0;
pRes->numOfRows = 1;
pRes->numOfTotal = 0;
pSql->sqlstr = strdup_throw(sql);
CLEANUP_PUSH_FREE(true, pSql->sqlstr);
strtolower(pSql->sqlstr, pSql->sqlstr);
code = tsParseSql(pSql, false);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
// wait for the callback function to post the semaphore
sem_wait(&pSql->rspSem);
code = pSql->res.code;
}
if (code != TSDB_CODE_SUCCESS) {
tscError("failed to parse sql statement: %s, error: %s", pSub->topic, tstrerror(code));
THROW( code );
}
if (pSql->cmd.command != TSDB_SQL_SELECT) {
tscError("only 'select' statement is allowed in subscription: %s", pSub->topic);
THROW( -1 ); // TODO
}
pSub = calloc_throw(1, sizeof(SSub));
CLEANUP_PUSH_FREE(true, pSub);
pSql->pSubscription = pSub;
pSub->pSql = pSql;
pSub->signature = pSub;
strncpy(pSub->topic, topic, sizeof(pSub->topic));
pSub->topic[sizeof(pSub->topic) - 1] = 0;
pSub->progress = taosArrayInit(32, sizeof(SSubscriptionProgress));
if (pSub->progress == NULL) {
THROW(TSDB_CODE_CLI_OUT_OF_MEMORY);
}
CLEANUP_EXECUTE();
} CATCH( code ) {
tscError("failed to create subscription object: %s", tstrerror(code));
CLEANUP_EXECUTE();
pSub = NULL;
} END_TRY
return pSub;
failed:
tfree(sqlstr);
_pSql_failed:
tfree(pSql);
tfree(pSub);
return NULL;
}
@ -159,61 +179,69 @@ static void tscProcessSubscriptionTimer(void *handle, void *tmrId) {
}
int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
int code = (uint8_t)tsParseSql(pSub->pSql, false);
static SArray* getTableList( SSqlObj* pSql ) {
const char* p = strstr( pSql->sqlstr, " from " );
char* sql = alloca(strlen(p) + 32);
sprintf(sql, "select tbid(tbname)%s", p);
int code = taos_query( pSql->pTscObj, sql );
if (code != TSDB_CODE_SUCCESS) {
tscError("failed to parse sql statement: %s", pSub->topic);
return 0;
tscError("failed to retrieve table id: %s", tstrerror(code));
return NULL;
}
SSqlCmd* pCmd = &pSub->pSql->cmd;
if (pCmd->command != TSDB_SQL_SELECT) {
tscError("only 'select' statement is allowed in subscription: %s", pSub->topic);
return 0;
TAOS_RES* res = taos_use_result( pSql->pTscObj );
TAOS_ROW row;
SArray* result = taosArrayInit( 128, sizeof(STidTags) );
while ((row = taos_fetch_row(res))) {
STidTags tags;
memcpy(&tags, row[0], sizeof(tags));
taosArrayPush(result, &tags);
}
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0, 0);
int numOfTables = 0;
if (!UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo)) {
// SSuperTableMeta* pMetricMeta = pTableMetaInfo->pMetricMeta;
// for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) {
// SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i);
// numOfTables += pVnodeSidList->numOfSids;
// }
return result;
}
SSubscriptionProgress* progress = (SSubscriptionProgress*)calloc(numOfTables, sizeof(SSubscriptionProgress));
if (progress == NULL) {
tscError("failed to allocate memory for progress: %s", pSub->topic);
return 0;
}
if (UTIL_TABLE_IS_NOMRAL_TABLE(pTableMetaInfo)) {
numOfTables = 1;
int64_t uid = pTableMetaInfo->pTableMeta->uid;
progress[0].uid = uid;
progress[0].key = tscGetSubscriptionProgress(pSub, uid);
} else {
// SSuperTableMeta* pMetricMeta = pTableMetaInfo->pMetricMeta;
// numOfTables = 0;
// for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) {
// SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i);
// for (int32_t j = 0; j < pVnodeSidList->numOfSids; j++) {
// STableIdInfo *pTableMetaInfo = tscGetMeterSidInfo(pVnodeSidList, j);
// int64_t uid = pTableMetaInfo->uid;
// progress[numOfTables].uid = uid;
// progress[numOfTables++].key = tscGetSubscriptionProgress(pSub, uid);
// }
// }
qsort(progress, numOfTables, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress);
}
static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
SSqlObj* pSql = pSub->pSql;
free(pSub->progress);
pSub->numOfTables = numOfTables;
pSub->progress = progress;
SSqlCmd* pCmd = &pSql->cmd;
pSub->lastSyncTime = taosGetTimestampMs();
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
SSubscriptionProgress target = {.uid = pTableMeta->uid, .key = 0};
SSubscriptionProgress* p = taosArraySearch(pSub->progress, tscCompareSubscriptionProgress, &target);
if (p == NULL) {
taosArrayClear(pSub->progress);
taosArrayPush(pSub->progress, &target);
}
return 1;
}
SArray* tables = getTableList(pSql);
size_t numOfTables = taosArrayGetSize(tables);
SArray* progress = taosArrayInit(numOfTables, sizeof(SSubscriptionProgress));
for( size_t i = 0; i < numOfTables; i++ ) {
STidTags* tt = taosArrayGet( tables, i );
SSubscriptionProgress p = { .uid = tt->uid };
p.key = tscGetSubscriptionProgress(pSub, tt->uid, INT64_MIN);
taosArrayPush(progress, &p);
}
taosArraySort(progress, tscCompareSubscriptionProgress);
taosArrayDestroy(pSub->progress);
pSub->progress = progress;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
taosArraySort( tables, tscCompareTidTags );
tscBuildVgroupTableInfo( pTableMetaInfo, tables );
}
taosArrayDestroy(tables);
return 1;
}
@ -248,32 +276,22 @@ static int tscLoadSubscriptionProgress(SSub* pSub) {
return 0;
}
if (fgets(buf, sizeof(buf), fp) == NULL || atoi(buf) < 0) {
tscTrace("invalid subscription progress file: %s", pSub->topic);
fclose(fp);
return 0;
}
int numOfTables = atoi(buf);
SSubscriptionProgress* progress = calloc(numOfTables, sizeof(SSubscriptionProgress));
for (int i = 0; i < numOfTables; i++) {
SArray* progress = pSub->progress;
taosArrayClear(progress);
while( 1 ) {
if (fgets(buf, sizeof(buf), fp) == NULL) {
fclose(fp);
free(progress);
return 0;
}
int64_t uid, key;
sscanf(buf, "%" SCNd64 ":%" SCNd64, &uid, &key);
progress[i].uid = uid;
progress[i].key = key;
SSubscriptionProgress p;
sscanf(buf, "%" SCNd64 ":%" SCNd64, &p.uid, &p.key);
taosArrayPush(progress, &p);
}
fclose(fp);
qsort(progress, numOfTables, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress);
pSub->numOfTables = numOfTables;
pSub->progress = progress;
tscTrace("subscription progress loaded, %d tables: %s", numOfTables, pSub->topic);
taosArraySort(progress, tscCompareSubscriptionProgress);
tscTrace("subscription progress loaded, %d tables: %s", taosArrayGetSize(progress), pSub->topic);
return 1;
}
@ -294,11 +312,10 @@ void tscSaveSubscriptionProgress(void* sub) {
}
fputs(pSub->pSql->sqlstr, fp);
fprintf(fp, "\n%d\n", pSub->numOfTables);
for (int i = 0; i < pSub->numOfTables; i++) {
int64_t uid = pSub->progress[i].uid;
TSKEY key = pSub->progress[i].key;
fprintf(fp, "%" PRId64 ":%" PRId64 "\n", uid, key);
fprintf(fp, "\n");
for(size_t i = 0; i < taosArrayGetSize(pSub->progress); i++) {
SSubscriptionProgress* p = taosArrayGet(pSub->progress, i);
fprintf(fp, "%" PRId64 ":%" PRId64 "\n", p->uid, p->key);
}
fclose(fp);
@ -363,35 +380,34 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
tscRemoveFromSqlList(pSql);
if (taosGetTimestampMs() - pSub->lastSyncTime > 10 * 60 * 1000) {
tscTrace("begin meter synchronization");
char* sqlstr = pSql->sqlstr;
pSql->sqlstr = NULL;
taos_free_result_imp(pSql, 0);
pSql->sqlstr = sqlstr;
taosCacheEmpty(tscCacheHandle);
tscTrace("begin table synchronization");
if (!tscUpdateSubscription(pSub->taos, pSub)) return NULL;
tscTrace("meter synchronization completed");
} else {
tscTrace("table synchronization completed");
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
uint32_t type = pQueryInfo->type;
taos_free_result_imp(pSql, 1);
tscFreeSqlResult(pSql);
pRes->numOfRows = 1;
pRes->numOfTotal = 0;
pRes->qhandle = 0;
pSql->cmd.command = TSDB_SQL_SELECT;
pQueryInfo->type = type;
tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->vgroupIndex = 0;
}
pSql->fp = asyncCallback;
pSql->param = pSql;
tscDoQuery(pSql);
if (pRes->code != TSDB_CODE_NOT_ACTIVE_TABLE) {
break;
sem_wait(&pSql->rspSem);
if (pRes->code != TSDB_CODE_SUCCESS) {
continue;
}
// meter was removed, make sync time zero, so that next retry will
// do synchronization first
pSub->lastSyncTime = 0;
break;
}
if (pRes->code != TSDB_CODE_SUCCESS) {
@ -421,7 +437,7 @@ void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) {
}
tscFreeSqlObj(pSub->pSql);
free(pSub->progress);
taosArrayDestroy(pSub->progress);
memset(pSub, 0, sizeof(*pSub));
free(pSub);
}

View File

@ -330,7 +330,7 @@ int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql) {
pNewQueryInfo->limit = pSupporter->limit;
// fetch the join tag column
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
SSqlExpr* pExpr = tscSqlExprGet(pNewQueryInfo, 0);
assert(pQueryInfo->tagCond.joinInfo.hasJoin);
@ -463,77 +463,51 @@ static void tSIntersectionAndLaunchSecQuery(SJoinSupporter* pSupporter, SSqlObj*
}
}
int32_t tagsOrderCompar(const void* p1, const void* p2) {
STidTags* t1 = (STidTags*) p1;
STidTags* t2 = (STidTags*) p2;
int32_t tscCompareTidTags(const void* p1, const void* p2) {
const STidTags* t1 = (const STidTags*) p1;
const STidTags* t2 = (const STidTags*) p2;
if (t1->vgId != t2->vgId) {
return (t1->vgId > t2->vgId) ? 1 : -1;
} else {
}
if (t1->tid != t2->tid) {
return (t1->tid > t2->tid) ? 1 : -1;
} else {
}
return 0;
}
}
}
static void doBuildVgroupTableInfo(SArray* res, STableMetaInfo* pTableMetaInfo) {
SArray* pGroup = taosArrayInit(4, sizeof(SVgroupTableInfo));
void tscBuildVgroupTableInfo(STableMetaInfo* pTableMetaInfo, SArray* tables) {
SArray* result = taosArrayInit( 4, sizeof(SVgroupTableInfo) );
SArray* vgTables = NULL;
STidTags* prev = NULL;
SArray* vgTableIdItem = taosArrayInit(4, sizeof(STableIdInfo));
int32_t size = taosArrayGetSize(res);
STidTags* prev = taosArrayGet(res, 0);
int32_t prevVgId = prev->vgId;
STableIdInfo item = {.uid = prev->uid, .tid = prev->tid, .key = INT64_MIN};
taosArrayPush(vgTableIdItem, &item);
for(int32_t k = 1; k < size; ++k) {
STidTags* t1 = taosArrayGet(res, k);
if (prevVgId != t1->vgId) {
SVgroupTableInfo info = {0};
size_t numOfTables = taosArrayGetSize( tables );
for( size_t i = 0; i < numOfTables; i++ ) {
STidTags* tt = taosArrayGet( tables, i );
if( prev == NULL || tt->vgId != prev->vgId ) {
SVgroupsInfo* pvg = pTableMetaInfo->vgroupList;
SVgroupTableInfo info = {{ 0 }};
for( int32_t m = 0; m < pvg->numOfVgroups; ++m ) {
if (prevVgId == pvg->vgroups[m].vgId) {
if( tt->vgId == pvg->vgroups[m].vgId ) {
info.vgInfo = pvg->vgroups[m];
break;
}
}
assert( info.vgInfo.numOfIps != 0 );
info.itemList = vgTableIdItem;
taosArrayPush(pGroup, &info);
vgTableIdItem = taosArrayInit(4, sizeof(STableIdInfo));
STableIdInfo item1 = {.uid = t1->uid, .tid = t1->tid, .key = INT64_MIN};
taosArrayPush(vgTableIdItem, &item1);
prevVgId = t1->vgId;
} else {
taosArrayPush(vgTableIdItem, &item);
}
vgTables = taosArrayInit( 4, sizeof(STableIdInfo) );
info.itemList = vgTables;
taosArrayPush( result, &info );
}
if (taosArrayGetSize(vgTableIdItem) > 0) {
SVgroupTableInfo info = {0};
SVgroupsInfo* pvg = pTableMetaInfo->vgroupList;
for(int32_t m = 0; m < pvg->numOfVgroups; ++m) {
if (prevVgId == pvg->vgroups[m].vgId) {
info.vgInfo = pvg->vgroups[m];
break;
}
STableIdInfo item = { .uid = tt->uid, .tid = tt->tid, .key = INT64_MIN };
taosArrayPush( vgTables, &item );
prev = tt;
}
assert(info.vgInfo.numOfIps != 0);
info.itemList = vgTableIdItem;
taosArrayPush(pGroup, &info);
}
pTableMetaInfo->pVgroupTables = pGroup;
pTableMetaInfo->pVgroupTables = result;
}
static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* pParent) {
@ -627,8 +601,8 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
SJoinSupporter* p1 = pParentSql->pSubs[0]->param;
SJoinSupporter* p2 = pParentSql->pSubs[1]->param;
qsort(p1->pIdTagList, p1->num, p1->tagSize, tagsOrderCompar);
qsort(p2->pIdTagList, p2->num, p2->tagSize, tagsOrderCompar);
qsort(p1->pIdTagList, p1->num, p1->tagSize, tscCompareTidTags);
qsort(p2->pIdTagList, p2->num, p2->tagSize, tscCompareTidTags);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -668,11 +642,11 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pSubCmd1, 0);
STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo1, 0);
doBuildVgroupTableInfo(s1, pTableMetaInfo1);
tscBuildVgroupTableInfo(pTableMetaInfo1, s1);
SQueryInfo* pQueryInfo2 = tscGetQueryInfoDetail(pSubCmd2, 0);
STableMetaInfo* pTableMetaInfo2 = tscGetMetaInfo(pQueryInfo2, 0);
doBuildVgroupTableInfo(s2, pTableMetaInfo2);
tscBuildVgroupTableInfo(pTableMetaInfo2, s2);
pSupporter->pState->numOfCompleted = 0;
pSupporter->pState->code = 0;
@ -1096,7 +1070,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
tscInitQueryInfo(pNewQueryInfo);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0);
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) { // return the tableId & tag
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // return the tableId & tag
SSchema s = {0};
SColumnIndex index = {0};
@ -1203,7 +1177,7 @@ int32_t tscHandleMasterJoinQuery(SSqlObj* pSql) {
}
}
pSql->cmd.command = (pSql->numOfSubs <= 0)? TSDB_SQL_RETRIEVE_EMPTY_RESULT:TSDB_SQL_METRIC_JOIN_RETRIEVE;
pSql->cmd.command = (pSql->numOfSubs <= 0)? TSDB_SQL_RETRIEVE_EMPTY_RESULT:TSDB_SQL_TABLE_JOIN_RETRIEVE;
return TSDB_CODE_SUCCESS;
}
@ -1533,8 +1507,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pPObj->cmd, 0);
tscClearInterpInfo(pPQueryInfo);
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel,
&pPObj->cmd, &pPObj->res);
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel, pPObj);
tscTrace("%p build loser tree completed", pPObj);
pPObj->res.precision = pSql->res.precision;
@ -1950,7 +1923,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows);
if(pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE) {
if(pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
if (pRes->completed) {
tfree(pRes->tsrow);
}

View File

@ -157,7 +157,7 @@ bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
}
// for select query super table, the super table vgroup list can not be null in any cases.
if (pQueryInfo->command == TSDB_SQL_SELECT && UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (pQueryInfo->command == TSDB_SQL_SELECT && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
assert(pTableMetaInfo->vgroupList != NULL);
}
@ -172,7 +172,7 @@ bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
if (((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) != TSDB_QUERY_TYPE_STABLE_SUBQUERY) &&
pQueryInfo->command == TSDB_SQL_SELECT) {
return UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo);
return UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
}
return false;
@ -187,7 +187,7 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) {
* 4. show queries, instead of a select query
*/
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
if (pTableMetaInfo == NULL || !UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo) ||
if (pTableMetaInfo == NULL || !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) ||
pQueryInfo->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || numOfExprs == 0) {
return false;
}
@ -386,14 +386,16 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
int32_t cmd = pCmd->command;
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
cmd == TSDB_SQL_METRIC_JOIN_RETRIEVE) {
cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscRemoveFromSqlList(pSql);
}
// pSql->sqlstr will be used by tscBuildQueryStreamDesc
if (pObj->signature == pObj) {
pthread_mutex_lock(&pObj->mutex);
tfree(pSql->sqlstr);
pthread_mutex_unlock(&pObj->mutex);
}
tscFreeSqlResult(pSql);
@ -1209,18 +1211,18 @@ void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex) {
}
}
void tscColumnListDestroy(SArray* pColumnBaseInfo) {
if (pColumnBaseInfo == NULL) {
void tscColumnListDestroy(SArray* pColumnList) {
if (pColumnList == NULL) {
return;
}
size_t num = taosArrayGetSize(pColumnBaseInfo);
size_t num = taosArrayGetSize(pColumnList);
for (int32_t i = 0; i < num; ++i) {
SColumn* pCol = taosArrayGetP(pColumnBaseInfo, i);
SColumn* pCol = taosArrayGetP(pColumnList, i);
tscColumnDestroy(pCol);
}
taosArrayDestroy(pColumnBaseInfo);
taosArrayDestroy(pColumnList);
}
/*
@ -1348,7 +1350,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId) {
return false;
}
if (colId == -1 && UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (colId == -1 && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return true;
}
@ -1459,10 +1461,11 @@ bool tscShouldFreeHeatBeat(SSqlObj* pHb) {
}
/*
* the following three kinds of SqlObj should not be freed
* the following four kinds of SqlObj should not be freed
* 1. SqlObj for stream computing
* 2. main SqlObj
* 3. heartbeat SqlObj
* 4. SqlObj for subscription
*
* If res code is error and SqlObj does not belong to above types, it should be
* automatically freed for async query, ignoring that connection should be kept.
@ -1475,7 +1478,7 @@ bool tscShouldBeFreed(SSqlObj* pSql) {
}
STscObj* pTscObj = pSql->pTscObj;
if (pSql->pStream != NULL || pTscObj->pHb == pSql || pTscObj->pSql == pSql) {
if (pSql->pStream != NULL || pTscObj->pHb == pSql || pTscObj->pSql == pSql || pSql->pSubscription != NULL) {
return false;
}
@ -1644,9 +1647,11 @@ void doRemoveTableMetaInfo(SQueryInfo* pQueryInfo, int32_t index, bool removeFro
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache) {
tscTrace("%p deref the table meta in cache, numOfTables:%d", address, pQueryInfo->numOfTables);
int32_t index = pQueryInfo->numOfTables;
while (index >= 0) {
doRemoveTableMetaInfo(pQueryInfo, --index, removeFromCache);
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
tscClearTableMetaInfo(pTableMetaInfo, removeFromCache);
free(pTableMetaInfo);
}
tfree(pQueryInfo->pTableMetaInfo);
@ -1666,8 +1671,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
assert(pTableMetaInfo != NULL);
if (name != NULL) {
assert(strlen(name) <= TSDB_TABLE_ID_LEN);
strcpy(pTableMetaInfo->name, name);
strncpy(pTableMetaInfo->name, name, TSDB_TABLE_ID_LEN);
}
pTableMetaInfo->pTableMeta = pTableMeta;
@ -1678,10 +1682,9 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
memcpy(pTableMetaInfo->vgroupList, vgroupList, size);
}
if (pTagCols == NULL) {
pTableMetaInfo->tagColList = taosArrayInit(4, POINTER_BYTES);
} else {
pTableMetaInfo->tagColList = taosArrayClone(pTagCols);
if (pTagCols != NULL) {
tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
}
pQueryInfo->numOfTables += 1;
@ -1700,11 +1703,9 @@ void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache)
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), removeFromCache);
tfree(pTableMetaInfo->vgroupList);
if (pTableMetaInfo->tagColList != NULL) {
taosArrayDestroy(pTableMetaInfo->tagColList);
tscColumnListDestroy(pTableMetaInfo->tagColList);
pTableMetaInfo->tagColList = NULL;
}
}
void tscResetForNextRetrieve(SSqlRes* pRes) {
if (pRes == NULL) {
@ -1847,7 +1848,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
if (pPrevSql == NULL) {
STableMeta* pTableMeta = taosCacheAcquireByName(tscCacheHandle, name);
// todo handle error
assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
@ -1859,8 +1861,15 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList);
}
assert(pFinalInfo->pTableMeta != NULL && pNewQueryInfo->numOfTables == 1);
if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) {
if (pFinalInfo->pTableMeta == NULL) {
tscError("%p new subquery failed for get pMeterMeta is NULL from cache", pSql);
tscFreeSqlObj(pNew);
return NULL;
}
assert(pNewQueryInfo->numOfTables == 1);
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
assert(pFinalInfo->vgroupList != NULL);
}
@ -1986,22 +1995,28 @@ char* tscGetErrorMsgPayload(SSqlCmd* pCmd) { return pCmd->payload; }
/**
* If current vnode query does not return results anymore (pRes->numOfRows == 0), try the next vnode if exists,
* in case of multi-vnode super table projection query and the result does not reach the limitation.
* while multi-vnode super table projection query and the result does not reach the limitation.
*/
bool hasMoreVnodesToTry(SSqlObj* pSql) {
// SSqlCmd* pCmd = &pSql->cmd;
// SSqlRes* pRes = &pSql->res;
// SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// if (!UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo) || (pTableMetaInfo->pMetricMeta == NULL)) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
if (pCmd->command != TSDB_SQL_FETCH) {
return false;
// }
}
// int32_t totalVnode = pTableMetaInfo->pMetricMeta->numOfVnodes;
// return pRes->numOfRows == 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) &&
// (!tscHasReachLimitation(pQueryInfo, pRes)) && (pTableMetaInfo->vgroupIndex < totalVnode - 1);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
assert(pRes->completed);
// for normal table, do not try any more if result are exhausted
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) || (pTableMetaInfo->vgroupList == NULL)) {
return false;
}
int32_t numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups;
return tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) &&
(!tscHasReachLimitation(pQueryInfo, pRes)) && (pTableMetaInfo->vgroupIndex < numOfVgroups - 1);
}
void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
@ -2017,12 +2032,11 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
assert(pRes->numOfRows == 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !tscHasReachLimitation(pQueryInfo, pRes));
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int32_t totalVnode = 0;
// int32_t totalVnode = pTableMetaInfo->pMetricMeta->numOfVnodes;
while (++pTableMetaInfo->vgroupIndex < totalVnode) {
int32_t totalVgroups = pTableMetaInfo->vgroupList->numOfVgroups;
while (++pTableMetaInfo->vgroupIndex < totalVgroups) {
tscTrace("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql,
pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVnode, pRes->numOfTotalInCurrentClause);
pTableMetaInfo->vgroupIndex - 1, pTableMetaInfo->vgroupIndex, totalVgroups, pRes->numOfTotalInCurrentClause);
/*
* update the limit and offset value for the query on the next vnode,
@ -2038,10 +2052,10 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
}
pQueryInfo->limit.offset = pRes->offset;
assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0));
tscTrace("%p new query to next vnode, vnode index:%d, limit:%" PRId64 ", offset:%" PRId64 ", glimit:%" PRId64, pSql,
pTableMetaInfo->vgroupIndex, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->clauseLimit);
tscTrace("%p new query to next vgroup, index:%d, limit:%" PRId64 ", offset:%" PRId64 ", glimit:%" PRId64,
pSql, pTableMetaInfo->vgroupIndex, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->clauseLimit);
/*
* For project query with super table join, the numOfSub is equalled to the number of all subqueries.
@ -2055,43 +2069,46 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
tscResetForNextRetrieve(pRes);
// in case of async query, set the callback function
void* fp1 = pSql->fp;
// void* fp1 = pSql->fp;
pSql->fp = fp;
if (fp1 != NULL) {
assert(fp != NULL);
// if (fp1 != NULL) {
// assert(fp != NULL);
// }
int32_t ret = tscProcessSql(pSql);
if (ret == TSDB_CODE_SUCCESS) {
return;
} else {// todo check for failure
}
int32_t ret = tscProcessSql(pSql); // todo check for failure
// in case of async query, return now
if (fp != NULL) {
return;
// if (fp != NULL) {
// return;
// }
//
// if (ret != TSDB_CODE_SUCCESS) {
// pSql->res.code = ret;
// return;
// }
//
// // retrieve data
// assert(pCmd->command == TSDB_SQL_SELECT);
// pCmd->command = TSDB_SQL_FETCH;
//
// if ((ret = tscProcessSql(pSql)) != TSDB_CODE_SUCCESS) {
// pSql->res.code = ret;
// return;
// }
//
// // if the result from current virtual node are empty, try next if exists. otherwise, return the results.
// if (pRes->numOfRows > 0) {
// break;
// }
}
if (ret != TSDB_CODE_SUCCESS) {
pSql->res.code = ret;
return;
}
// retrieve data
assert(pCmd->command == TSDB_SQL_SELECT);
pCmd->command = TSDB_SQL_FETCH;
if ((ret = tscProcessSql(pSql)) != TSDB_CODE_SUCCESS) {
pSql->res.code = ret;
return;
}
// if the result from current virtual node are empty, try next if exists. otherwise, return the results.
if (pRes->numOfRows > 0) {
break;
}
}
if (pRes->numOfRows == 0) {
tscTrace("%p all vnodes exhausted, prj query completed. total res:%d", pSql, totalVnode, pRes->numOfTotal);
}
// if (pRes->numOfRows == 0) {
// tscTrace("%p all vnodes exhausted, prj query completed. total res:%d", pSql, totalVnode, pRes->numOfTotal);
// }
}
void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) {
@ -2155,3 +2172,26 @@ void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t column
}
}
void* malloc_throw(size_t size) {
void* p = malloc(size);
if (p == NULL) {
THROW(TSDB_CODE_CLI_OUT_OF_MEMORY);
}
return p;
}
void* calloc_throw(size_t nmemb, size_t size) {
void* p = calloc(nmemb, size);
if (p == NULL) {
THROW(TSDB_CODE_CLI_OUT_OF_MEMORY);
}
return p;
}
char* strdup_throw(const char* str) {
char* p = strdup(str);
if (p == NULL) {
THROW(TSDB_CODE_CLI_OUT_OF_MEMORY);
}
return p;
}

View File

@ -23,4 +23,5 @@ void extractTableName(const char *tableId, char *name);
char* extractDBName(const char *tableId, char *name);
#endif // TDENGINE_NAME_H

View File

@ -84,7 +84,7 @@ int32_t main(int32_t argc, char *argv[]) {
}
/* Set termination handler. */
struct sigaction act = {0};
struct sigaction act = {{0}};
act.sa_flags = SA_SIGINFO;
act.sa_sigaction = signal_handler;
sigaction(SIGTERM, &act, NULL);

View File

@ -33,7 +33,7 @@ typedef struct {
void (*stopFp)();
} SModule;
static SModule tsModule[TSDB_MOD_MAX] = {0};
static SModule tsModule[TSDB_MOD_MAX] = {{0}};
static uint32_t tsModuleStatus = 0;
static void dnodeSetModuleStatus(int32_t module) {

View File

@ -32,6 +32,9 @@ extern "C" {
#define TSKEY int64_t
#endif
#define TSWINDOW_INITIALIZER {INT64_MIN, INT64_MAX};
#define TSKEY_INITIAL_VAL INT64_MIN
// ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR
typedef int32_t VarDataOffsetT;
typedef int16_t VarDataLenT;
@ -341,8 +344,6 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_MAX_DBS 100
#define TSDB_MAX_VGROUPS 1000
#define TSDB_MAX_SUPER_TABLES 100
#define TSDB_MAX_NORMAL_TABLES 1000
#define TSDB_MAX_CHILD_TABLES 100000
#define TSDB_PORT_DNODESHELL 0
#define TSDB_PORT_DNODEDNODE 5

View File

@ -627,7 +627,6 @@ typedef struct {
typedef struct STableMetaMsg {
int32_t contLen;
char tableId[TSDB_TABLE_ID_LEN + 1]; // table id
char stableId[TSDB_TABLE_ID_LEN + 1]; // stable name if it is created according to super table
uint8_t numOfTags;
uint8_t precision;
uint8_t tableType;

View File

@ -34,12 +34,14 @@ extern "C" {
#define TSDB_INVALID_SUPER_TABLE_ID -1
#define TSDB_STATUS_COMMIT_START 1
#define TSDB_STATUS_COMMIT_OVER 2
// --------- TSDB APPLICATION HANDLE DEFINITION
typedef struct {
// WAL handle
void *appH;
void *cqH;
int (*walCallBack)(void *);
int (*notifyStatus)(void *, int status);
int (*eventCallBack)(void *);
} STsdbAppH;

View File

@ -1203,8 +1203,10 @@ void mgmtDropAllSuperTables(SDbObj *pDropDb) {
static int32_t mgmtSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pTable) {
int32_t numOfCols = pTable->numOfColumns + pTable->numOfTags;
assert(numOfCols <= TSDB_MAX_COLUMNS);
for (int32_t i = 0; i < numOfCols; ++i) {
strncpy(pSchema->name, pTable->schema[i].name, TSDB_TABLE_ID_LEN);
strncpy(pSchema->name, pTable->schema[i].name, TSDB_COL_NAME_LEN);
pSchema->type = pTable->schema[i].type;
pSchema->bytes = htons(pTable->schema[i].bytes);
pSchema->colId = htons(pTable->schema[i].colId);
@ -1675,7 +1677,6 @@ static int32_t mgmtDoGetChildTableMeta(SQueuedMsg *pMsg, STableMetaMsg *pMeta) {
pMeta->numOfTags = (int8_t)pTable->superTable->numOfTags;
pMeta->numOfColumns = htons((int16_t)pTable->superTable->numOfColumns);
pMeta->contLen = sizeof(STableMetaMsg) + mgmtSetSchemaFromSuperTable(pMeta->schema, pTable->superTable);
strncpy(pMeta->stableId, pTable->superTable->info.tableId, tListLen(pMeta->stableId));
} else {
pMeta->sversion = htons(pTable->sversion);
pMeta->numOfTags = 0;

View File

@ -253,6 +253,17 @@ void taosBlockSIGPIPE();
#define BUILDIN_CLZ(val) __builtin_clz(val)
#define BUILDIN_CTZ(val) __builtin_ctz(val)
#undef threadlocal
#ifdef _ISOC11_SOURCE
#define threadlocal _Thread_local
#elif defined(__APPLE__)
#define threadlocal
#elif defined(__GNUC__) && !defined(threadlocal)
#define threadlocal __thread
#else
#define threadlocal
#endif
#ifdef __cplusplus
}
#endif

View File

@ -157,7 +157,7 @@ void *taosProcessAlarmSignal(void *tharg) {
void (*callback)(int) = tharg;
static timer_t timerId;
struct sigevent sevent = {0};
struct sigevent sevent = {{0}};
#ifdef _ALPINE
sevent.sigev_notify = SIGEV_THREAD;

View File

@ -120,12 +120,6 @@ typedef struct tExtMemBuffer {
EXT_BUFFER_FLUSH_MODEL flushModel;
} tExtMemBuffer;
//typedef struct tTagSchema {
// struct SSchema *pSchema;
// int32_t numOfCols;
// int32_t colOffset[];
//} tTagSchema;
/**
*
* @param inMemSize

View File

@ -61,7 +61,7 @@ enum _sql_type {
TSDB_SQL_LOCAL, // SQL below for client local
TSDB_SQL_DESCRIBE_TABLE,
TSDB_SQL_RETRIEVE_LOCALMERGE,
TSDB_SQL_METRIC_JOIN_RETRIEVE,
TSDB_SQL_TABLE_JOIN_RETRIEVE,
/*
* build empty result instead of accessing dnode to fetch result

View File

@ -182,6 +182,7 @@ typedef struct SQInfo {
SQueryRuntimeEnv runtimeEnv;
int32_t groupIndex;
int32_t offset; // offset in group result set of subgroup, todo refactor
SArray* arrTableIdInfo;
T_REF_DECLARE()
/*

View File

@ -113,7 +113,6 @@ static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
static void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols);
static void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
static bool hasMainOutput(SQuery *pQuery);
static void createTableQueryInfo(SQInfo *pQInfo);
static void buildTagQueryResult(SQInfo *pQInfo);
static int32_t setAdditionalInfo(SQInfo *pQInfo, STableId *pTableId, STableQueryInfo *pTableQueryInfo);
@ -507,7 +506,7 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
w.ekey = pQuery->window.ekey;
}
assert(ts >= w.skey && ts <= w.ekey && w.skey != 0);
assert(ts >= w.skey && ts <= w.ekey);
return w;
}
@ -624,7 +623,7 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey,
setQueryStatus(pQuery, QUERY_COMPLETED | QUERY_RESBUF_FULL);
} else { // set the current index to be the last unclosed window
int32_t i = 0;
int64_t skey = 0;
int64_t skey = TSKEY_INITIAL_VAL;
for (i = 0; i < pWindowResInfo->size; ++i) {
SWindowResult *pResult = &pWindowResInfo->pResult[i];
@ -642,7 +641,7 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey,
}
// all windows are closed, set the last one to be the skey
if (skey == 0) {
if (skey == TSKEY_INITIAL_VAL) {
assert(i == pWindowResInfo->size);
pWindowResInfo->curIndex = pWindowResInfo->size - 1;
} else {
@ -660,7 +659,7 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey,
qTrace("QInfo:%p total window:%d, closed:%d", GET_QINFO_ADDR(pRuntimeEnv), pWindowResInfo->size, n);
}
assert(pWindowResInfo->prevSKey != 0);
assert(pWindowResInfo->prevSKey != TSKEY_INITIAL_VAL);
}
static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo, TSKEY *pPrimaryColumn,
@ -2399,7 +2398,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
// todo extract methods
if (isIntervalQuery(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == 0) {
TSKEY skey1, ekey1;
STimeWindow w = {0};
STimeWindow w = TSWINDOW_INITIALIZER;
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
if (QUERY_IS_ASC_QUERY(pQuery)) {
@ -3080,6 +3079,9 @@ void disableFuncInReverseScan(SQInfo *pQInfo) {
int32_t functId = pQuery->pSelectExpr[j].base.functionId;
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[j];
if (pCtx->resultInfo == NULL) {
continue; // resultInfo is NULL, means no data checked in previous scan
}
if (((functId == TSDB_FUNC_FIRST || functId == TSDB_FUNC_FIRST_DST) && order == TSDB_ORDER_ASC) ||
((functId == TSDB_FUNC_LAST || functId == TSDB_FUNC_LAST_DST) && order == TSDB_ORDER_DESC)) {
@ -3460,7 +3462,11 @@ static bool hasMainOutput(SQuery *pQuery) {
return false;
}
STableQueryInfo *createTableQueryInfoImpl(SQueryRuntimeEnv *pRuntimeEnv, STableId tableId, STimeWindow win) {
static STableQueryInfo *createTableQueryInfo(
SQueryRuntimeEnv *pRuntimeEnv,
STableId tableId,
STimeWindow win
) {
STableQueryInfo *pTableQueryInfo = calloc(1, sizeof(STableQueryInfo));
pTableQueryInfo->win = win;
@ -3590,7 +3596,6 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) {
if (pTableQueryInfo->queryRangeSet) {
pTableQueryInfo->lastKey = key;
} else {
// pQuery->window.skey = key;
pTableQueryInfo->win.skey = key;
STimeWindow win = {.skey = key, .ekey = pQuery->window.ekey};
@ -3613,18 +3618,16 @@ void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) {
getAlignQueryTimeWindow(pQuery, win.skey, win.skey, win.ekey, &skey1, &ekey1, &w);
pWindowResInfo->startTime = pTableQueryInfo->win.skey; // windowSKey may be 0 in case of 1970 timestamp
if (pWindowResInfo->prevSKey == 0) {
if (QUERY_IS_ASC_QUERY(pQuery)) {
pWindowResInfo->prevSKey = w.skey;
} else {
if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) {
if (!QUERY_IS_ASC_QUERY(pQuery)) {
assert(win.ekey == pQuery->window.skey);
pWindowResInfo->prevSKey = w.skey;
}
pWindowResInfo->prevSKey = w.skey;
}
pTableQueryInfo->queryRangeSet = 1;
pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
pTableQueryInfo->win.skey = pTableQueryInfo->win.skey;
}
}
@ -3870,6 +3873,17 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
data += bytes * numOfRows;
}
int32_t numOfTables = (int32_t)taosArrayGetSize(pQInfo->arrTableIdInfo);
*(int32_t*)data = htonl(numOfTables);
data += sizeof(int32_t);
for(int32_t i = 0; i < numOfTables; i++) {
STableIdInfo* pSrc = taosArrayGet(pQInfo->arrTableIdInfo, i);
STableIdInfo* pDst = (STableIdInfo*)data;
pDst->uid = htobe64(pSrc->uid);
pDst->tid = htonl(pSrc->tid);
pDst->key = htobe64(pSrc->key);
data += sizeof(STableIdInfo);
}
// all data returned, set query over
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
@ -4057,10 +4071,11 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv) {
* pQuery->limit.offset times. Since hole exists, pQuery->intervalTime*pQuery->limit.offset value is
* not valid. otherwise, we only forward pQuery->limit.offset number of points
*/
assert(pRuntimeEnv->windowResInfo.prevSKey == 0);
assert(pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL);
TSKEY skey1, ekey1;
STimeWindow w = {0};
STimeWindow w = TSWINDOW_INITIALIZER;
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
STableQueryInfo *pTableQueryInfo = pQuery->current;
@ -4148,14 +4163,18 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv) {
return true;
}
int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool isSTableQuery) {
static void setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
int32_t code = TSDB_CODE_SUCCESS;
setScanLimitationByResultBuffer(pQuery);
changeExecuteScanOrder(pQuery, false);
if (onlyQueryTags(pQuery)) {
return;
}
if (isSTableQuery && (!isIntervalQuery(pQuery)) && (!isFixedOutputQuery(pQuery))) {
return;
}
STsdbQueryCond cond = {
.twindow = pQuery->window,
@ -4164,18 +4183,31 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
.numOfCols = pQuery->numOfCols,
};
if (!isSTableQuery
&& (pQInfo->groupInfo.numOfTables == 1)
&& (cond.order == TSDB_ORDER_ASC)
&& (!isIntervalQuery(pQuery))
&& (!isGroupbyNormalCol(pQuery->pGroupbyExpr))
&& (!isFixedOutputQuery(pQuery))
) {
SArray* pa = taosArrayGetP(pQInfo->groupInfo.pGroupList, 0);
SGroupItem* pItem = taosArrayGet(pa, 0);
cond.twindow = pItem->info->win;
}
// normal query setup the queryhandle here
if (!onlyQueryTags(pQuery)) {
if (!isSTableQuery && isFirstLastRowQuery(pQuery)) { // in case of last_row query, invoke a different API.
pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQInfo->tableIdGroupInfo);
} else if (!isSTableQuery || isIntervalQuery(pQuery) || isFixedOutputQuery(pQuery)) {
pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableIdGroupInfo);
}
// create the table query support structures
createTableQueryInfo(pQInfo);
}
int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool isSTableQuery) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
int32_t code = TSDB_CODE_SUCCESS;
setScanLimitationByResultBuffer(pQuery);
changeExecuteScanOrder(pQuery, false);
setupQueryHandle(tsdb, pQInfo, isSTableQuery);
pQInfo->tsdb = tsdb;
pQInfo->vgId = vgId;
@ -4594,6 +4626,13 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
* to ensure that, we can reset the query range once query on a meter is completed.
*/
pQInfo->tableIndex++;
STableIdInfo tidInfo;
tidInfo.uid = item->id.uid;
tidInfo.tid = item->id.tid;
tidInfo.key = pQuery->current->lastKey;
taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo);
// if the buffer is full or group by each table, we need to jump out of the loop
if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL) /*||
isGroupbyEachTable(pQuery->pGroupbyExpr, pSupporter->pSidSet)*/) {
@ -4663,35 +4702,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
pQuery->limit.offset);
}
static void createTableQueryInfo(SQInfo *pQInfo) {
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
// todo make sure the table are added the reference count to gauranteed that all involved tables are valid
size_t numOfGroups = taosArrayGetSize(pQInfo->groupInfo.pGroupList);
int32_t index = 0;
for (int32_t i = 0; i < numOfGroups; ++i) { // load all meter meta info
SArray *group = *(SArray **)taosArrayGet(pQInfo->groupInfo.pGroupList, i);
size_t s = taosArrayGetSize(group);
for (int32_t j = 0; j < s; ++j) {
SGroupItem* item = (SGroupItem *)taosArrayGet(group, j);
// STableQueryInfo has been created for each table
if (item->info != NULL) {
return;
}
STableQueryInfo* pInfo = createTableQueryInfoImpl(&pQInfo->runtimeEnv, item->id, pQuery->window);
pInfo->groupIdx = i;
pInfo->tableIndex = index;
item->info = pInfo;
index += 1;
}
}
}
static void doSaveContext(SQInfo *pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
@ -4730,7 +4740,7 @@ static void doRestoreContext(SQInfo *pQInfo) {
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
if (pRuntimeEnv->pTSBuf != NULL) {
pRuntimeEnv->pTSBuf->cur.order = pRuntimeEnv->pTSBuf->cur.order ^ 1;
SWITCH_ORDER(pRuntimeEnv->pTSBuf->cur.order);
}
switchCtxOrder(pRuntimeEnv);
@ -4913,6 +4923,12 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) {
qTrace("QInfo:%p query paused due to output limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo,
pQuery->current->lastKey, pQuery->window.ekey);
} else if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
STableIdInfo tidInfo;
tidInfo.uid = pQuery->current->id.uid;
tidInfo.tid = pQuery->current->id.tid;
tidInfo.key = pQuery->current->lastKey;
taosArrayPush(pQInfo->arrTableIdInfo, &tidInfo);
}
if (!isTSCompQuery(pQuery)) {
@ -5196,20 +5212,10 @@ static bool validateQuerySourceCols(SQueryTableMsg *pQueryMsg, SSqlFuncMsg** pEx
static char *createTableIdList(SQueryTableMsg *pQueryMsg, char *pMsg, SArray **pTableIdList) {
assert(pQueryMsg->numOfTables > 0);
*pTableIdList = taosArrayInit(pQueryMsg->numOfTables, sizeof(STableId));
*pTableIdList = taosArrayInit(pQueryMsg->numOfTables, sizeof(STableIdInfo));
for (int32_t j = 0; j < pQueryMsg->numOfTables; ++j) {
STableIdInfo* pTableIdInfo = (STableIdInfo *)pMsg;
pTableIdInfo->tid = htonl(pTableIdInfo->tid);
pTableIdInfo->uid = htobe64(pTableIdInfo->uid);
pTableIdInfo->key = htobe64(pTableIdInfo->key);
STableId id = {.uid = pTableIdInfo->uid, .tid = pTableIdInfo->tid};
taosArrayPush(*pTableIdList, &id);
pMsg += sizeof(STableIdInfo);
for (int32_t j = 1; j < pQueryMsg->numOfTables; ++j) {
pTableIdInfo = (STableIdInfo *)pMsg;
pTableIdInfo->tid = htonl(pTableIdInfo->tid);
pTableIdInfo->uid = htobe64(pTableIdInfo->uid);
@ -5660,7 +5666,16 @@ static void doUpdateExprColumnIndex(SQuery *pQuery) {
}
}
static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
static int compareTableIdInfo( const void* a, const void* b ) {
const STableIdInfo* x = (const STableIdInfo*)a;
const STableIdInfo* y = (const STableIdInfo*)b;
if (x->uid > y->uid) return 1;
if (x->uid < y->uid) return -1;
return 0;
}
static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
STableGroupInfo *groupInfo, SColumnInfo* pTagCols) {
SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo));
if (pQInfo == NULL) {
@ -5753,6 +5768,9 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou
pQInfo->groupInfo.pGroupList = taosArrayInit(numOfGroups, POINTER_BYTES);
pQInfo->groupInfo.numOfTables = groupInfo->numOfTables;
int tableIndex = 0;
STimeWindow window = pQueryMsg->window;
taosArraySort( pTableIdList, compareTableIdInfo );
for(int32_t i = 0; i < numOfGroups; ++i) {
SArray* pa = taosArrayGetP(groupInfo->pGroupList, i);
size_t s = taosArrayGetSize(pa);
@ -5760,13 +5778,26 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou
SArray* p1 = taosArrayInit(s, sizeof(SGroupItem));
for(int32_t j = 0; j < s; ++j) {
SGroupItem item = { .id = *(STableId*) taosArrayGet(pa, j), .info = NULL, };
STableId id = *(STableId*) taosArrayGet(pa, j);
SGroupItem item = { .id = id };
// NOTE: compare STableIdInfo with STableId
// not a problem at present because we only use their 1st int64_t field
STableIdInfo* pTableId = taosArraySearch( pTableIdList, compareTableIdInfo, &id );
if (pTableId != NULL ) {
window.skey = pTableId->key;
} else {
window.skey = pQueryMsg->window.skey;
}
item.info = createTableQueryInfo(&pQInfo->runtimeEnv, item.id, window);
item.info->groupIdx = i;
item.info->tableIndex = tableIndex++;
taosArrayPush(p1, &item);
}
taosArrayPush(pQInfo->groupInfo.pGroupList, &p1);
}
pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo));
pQuery->pos = -1;
pQuery->window = pQueryMsg->window;
@ -5918,6 +5949,7 @@ static void freeQInfo(SQInfo *pQInfo) {
}
taosArrayDestroy(pQInfo->tableIdGroupInfo.pGroupList);
taosArrayDestroy(pQInfo->arrTableIdInfo);
if (pQuery->pGroupbyExpr != NULL) {
taosArrayDestroy(pQuery->pGroupbyExpr->columnInfo);
@ -6045,13 +6077,17 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi
if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY|TSDB_QUERY_TYPE_TABLE_QUERY)) {
isSTableQuery = TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
STableId *id = taosArrayGet(pTableIdList, 0);
STableIdInfo *id = taosArrayGet(pTableIdList, 0);
if ((code = tsdbGetOneTableGroup(tsdb, id->uid, &groupInfo)) != TSDB_CODE_SUCCESS) {
goto _over;
}
} else if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_STABLE_QUERY)) {
isSTableQuery = true;
STableId *id = taosArrayGet(pTableIdList, 0);
// TODO: need a macro from TSDB to check if table is super table,
// also note there's possiblity that only one table in the super table
if (taosArrayGetSize(pTableIdList) == 1) {
STableIdInfo *id = taosArrayGet(pTableIdList, 0);
// if array size is 1 and assert super table
// group by normal column, do not pass the group by condition to tsdb to group table into different group
int32_t numOfGroupByCols = pQueryMsg->numOfGroupCols;
@ -6066,11 +6102,23 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi
code = TSDB_CODE_SUCCESS;
goto _over;
}
} else {
groupInfo.numOfTables = taosArrayGetSize(pTableIdList);
SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES);
SArray* sa = taosArrayInit(groupInfo.numOfTables, sizeof(STableId));
for(int32_t i = 0; i < groupInfo.numOfTables; ++i) {
STableIdInfo* tableId = taosArrayGet(pTableIdList, i);
taosArrayPush(sa, tableId);
}
taosArrayPush(pTableGroup, &sa);
groupInfo.pGroupList = pTableGroup;
}
} else {
assert(0);
}
(*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, &groupInfo, pTagColumnInfo);
(*pQInfo) = createQInfoImpl(pQueryMsg, pTableIdList, pGroupbyExpr, pExprs, &groupInfo, pTagColumnInfo);
if ((*pQInfo) == NULL) {
code = TSDB_CODE_SERV_OUT_OF_MEMORY;
goto _over;
@ -6168,6 +6216,8 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
size_t size = getResultSize(pQInfo, &pQuery->rec.rows);
size += sizeof(int32_t);
size += sizeof(STableIdInfo) * taosArrayGetSize(pQInfo->arrTableIdInfo);
*contLen = size + sizeof(SRetrieveTableRsp);
// todo handle failed to allocate memory

View File

@ -161,7 +161,7 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo) {
SFileGroup *pFGroup = NULL;
SFileGroupIter iter;
SRWHelper rhelper = {0};
SRWHelper rhelper = {{0}};
if (tsdbInitReadHelper(&rhelper, pRepo) < 0) goto _err;
tsdbInitFileGroupIter(pFileH, &iter, TSDB_ORDER_ASC);
@ -284,6 +284,7 @@ int32_t tsdbCloseRepo(TsdbRepoT *repo) {
pRepo->tsdbCache->curBlock = NULL;
tsdbUnLockRepo(repo);
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START);
tsdbCommitData((void *)repo);
tsdbCloseFileH(pRepo->tsdbFileH);
@ -330,7 +331,7 @@ int32_t tsdbConfigRepo(TsdbRepoT *repo, STsdbCfg *pCfg) {
int32_t tsdbTriggerCommit(TsdbRepoT *repo) {
STsdbRepo *pRepo = (STsdbRepo *)repo;
if (pRepo->appH.walCallBack) pRepo->appH.walCallBack(pRepo->appH.appH);
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START);
tsdbLockRepo(repo);
if (pRepo->commit) {
@ -942,15 +943,16 @@ static void tsdbFreeMemTable(SMemTable *pMemTable) {
// Commit to file
static void *tsdbCommitData(void *arg) {
printf("Starting to commit....\n");
STsdbRepo * pRepo = (STsdbRepo *)arg;
STsdbMeta * pMeta = pRepo->tsdbMeta;
STsdbCache *pCache = pRepo->tsdbCache;
STsdbCfg * pCfg = &(pRepo->config);
SDataCols * pDataCols = NULL;
SRWHelper whelper = {0};
SRWHelper whelper = {{0}};
if (pCache->imem == NULL) return NULL;
tsdbPrint("vgId: %d, starting to commit....", pRepo->config.tsdbId);
// Create the iterator to read from cache
SSkipListIterator **iters = tsdbCreateTableIters(pMeta, pCfg->maxTables);
if (iters == NULL) {
@ -974,6 +976,7 @@ static void *tsdbCommitData(void *arg) {
// Do retention actions
tsdbFitRetention(pRepo);
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER);
_exit:
tdFreeDataCols(pDataCols);

View File

@ -54,12 +54,12 @@ typedef struct SQueryFilePos {
typedef struct SDataBlockLoadInfo {
SFileGroup* fileGroup;
int32_t slot;
int32_t sid;
int32_t tid;
SArray* pLoadedCols;
} SDataBlockLoadInfo;
typedef struct SLoadCompBlockInfo {
int32_t sid; /* table sid */
int32_t tid; /* table tid */
int32_t fileId;
int32_t fileListIndex;
} SLoadCompBlockInfo;
@ -127,12 +127,12 @@ static void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle);
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
pBlockLoadInfo->slot = -1;
pBlockLoadInfo->sid = -1;
pBlockLoadInfo->tid = -1;
pBlockLoadInfo->fileGroup = NULL;
}
static void tsdbInitCompBlockLoadInfo(SLoadCompBlockInfo* pCompBlockLoadInfo) {
pCompBlockLoadInfo->sid = -1;
pCompBlockLoadInfo->tid = -1;
pCompBlockLoadInfo->fileId = -1;
pCompBlockLoadInfo->fileListIndex = -1;
}
@ -423,7 +423,7 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
bool blockLoaded = false;
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
if (pCheckInfo->pDataCols == NULL) {
if (pCheckInfo->pDataCols == NULL) { // todo: why not the real data?
pCheckInfo->pDataCols = tdNewDataCols(pRepo->tsdbMeta->maxRowBytes, pRepo->tsdbMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
}
@ -434,7 +434,7 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup;
pBlockLoadInfo->slot = pQueryHandle->cur.slot;
pBlockLoadInfo->sid = pCheckInfo->pTableObj->tableId.tid;
pBlockLoadInfo->tid = pCheckInfo->pTableObj->tableId.tid;
blockLoaded = true;
}
@ -614,7 +614,7 @@ static void filterDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInf
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
int32_t bytes = pCol->info.bytes;
for (int32_t j = 0; j < numOfCols; ++j) {
for (int32_t j = 0; j < numOfCols; ++j) { //todo opt performance
SDataCol* src = &pQueryHandle->rhelper.pDataCols[0]->cols[j];
if (pCol->info.colId == src->colId) {
@ -1197,7 +1197,9 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) {
} else {
// data block has been loaded, todo extract method
SDataBlockLoadInfo* pBlockLoadInfo = &pHandle->dataBlockLoadInfo;
if (pBlockLoadInfo->slot == pHandle->cur.slot && pBlockLoadInfo->sid == pCheckInfo->pTableObj->tableId.tid) {
if (pBlockLoadInfo->slot == pHandle->cur.slot && pBlockLoadInfo->fileGroup->fileId == pHandle->cur.fid &&
pBlockLoadInfo->tid == pCheckInfo->pTableObj->tableId.tid) {
return pHandle->pColumns;
} else {
SCompBlock* pBlock = pBlockInfoEx->pBlock.compBlock;
@ -1347,7 +1349,7 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) {
int32_t type = 0;
int32_t bytes = 0;
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) {
if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor extract method , to queryExecutor to generate tags values
f1 = (char*) pTable1->name;
f2 = (char*) pTable2->name;
type = TSDB_DATA_TYPE_BINARY;
@ -1355,6 +1357,7 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) {
} else {
STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
bytes = pCol->bytes;
type = pCol->type;
f1 = tdGetRowDataOfCol(pTable1->tagVal, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
f2 = tdGetRowDataOfCol(pTable2->tagVal, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset);
@ -1522,7 +1525,7 @@ int32_t tsdbQuerySTableByTagCond(TsdbRepoT *tsdb, int64_t uid, const char *pTagC
}
if (pTable->type != TSDB_SUPER_TABLE) {
uError("%p query normal tag not allowed, uid:%, tid:%d, name:%s" PRIu64,
uError("%p query normal tag not allowed, uid:%" PRIu64 ", tid:%d, name:%s",
tsdb, uid, pTable->tableId.tid, pTable->name);
return TSDB_CODE_OPS_NOT_SUPPORT; //basically, this error is caused by invalid sql issued by client

View File

@ -73,6 +73,7 @@ void cleanupPush_void_ptr_bool ( bool failOnly, void* func, void* arg1, bool ar
void cleanupPush_void_ptr ( bool failOnly, void* func, void* arg );
void cleanupPush_int_int ( bool failOnly, void* func, int arg );
void cleanupPush_void ( bool failOnly, void* func );
void cleanupPush_int_ptr ( bool failOnly, void* func, void* arg );
int32_t cleanupGetActionCount();
void cleanupExecuteTo( int32_t anchor, bool failed );
@ -83,8 +84,10 @@ void cleanupExecute( SExceptionNode* node, bool failed );
#define CLEANUP_PUSH_VOID_PTR( failOnly, func, arg ) cleanupPush_void_ptr( (failOnly), (void*)(func), (void*)(arg) )
#define CLEANUP_PUSH_INT_INT( failOnly, func, arg ) cleanupPush_void_ptr( (failOnly), (void*)(func), (int)(arg) )
#define CLEANUP_PUSH_VOID( failOnly, func ) cleanupPush_void( (failOnly), (void*)(func) )
#define CLEANUP_PUSH_INT_PTR( failOnly, func, arg ) cleanupPush_int_ptr( (failOnly), (void*)(func), (void*)(arg) )
#define CLEANUP_PUSH_FREE( failOnly, arg ) cleanupPush_void_ptr( (failOnly), free, (void*)(arg) )
#define CLEANUP_PUSH_CLOSE( failOnly, arg ) cleanupPush_int_int( (failOnly), close, (int)(arg) )
#define CLEANUP_PUSH_FCLOSE( failOnly, arg ) cleanupPush_int_ptr( (failOnly), fclose, (void*)(arg) )
#define CLEANUP_GET_ANCHOR() cleanupGetActionCount()
#define CLEANUP_EXECUTE_TO( anchor, failed ) cleanupExecuteTo( (anchor), (failed) )
@ -95,7 +98,7 @@ void cleanupExecute( SExceptionNode* node, bool failed );
void exceptionPushNode( SExceptionNode* node );
int32_t exceptionPopNode();
void exceptionThrow( int code );
void exceptionThrow( int32_t code );
#define TRY(maxCleanupActions) do { \
SExceptionNode exceptionNode = { 0 }; \
@ -106,10 +109,10 @@ void exceptionThrow( int code );
int caughtException = setjmp( exceptionNode.jb ); \
if( caughtException == 0 )
#define CATCH( code ) int code = exceptionPopNode(); \
#define CATCH( code ) int32_t code = exceptionPopNode(); \
if( caughtException == 1 )
#define FINALLY( code ) int code = exceptionPopNode();
#define FINALLY( code ) int32_t code = exceptionPopNode();
#define END_TRY } while( 0 );

View File

@ -94,7 +94,7 @@ size_t taosHashGetSize(const SHashObj *pHashObj);
* @param size
* @return
*/
int32_t taosHashPut(SHashObj *pHashObj, const char *key, size_t keyLen, void *data, size_t size);
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size);
/**
* return the payload data with the specified key
@ -104,7 +104,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const char *key, size_t keyLen, void *da
* @param keyLen
* @return
*/
void *taosHashGet(SHashObj *pHashObj, const char *key, size_t keyLen);
void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen);
/**
* remove item with the specified key
@ -112,7 +112,7 @@ void *taosHashGet(SHashObj *pHashObj, const char *key, size_t keyLen);
* @param key
* @param keyLen
*/
void taosHashRemove(SHashObj *pHashObj, const char *key, size_t keyLen);
void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen);
/**
* clean up hash table

View File

@ -106,6 +106,12 @@ void taosArrayCopy(SArray* pDst, const SArray* pSrc);
*/
SArray* taosArrayClone(const SArray* pSrc);
/**
* clear the array (remove all element)
* @param pArray
*/
void taosArrayClear(SArray* pArray);
/**
* destroy array list
* @param pArray

View File

@ -1,7 +1,8 @@
#include "os.h"
#include "exception.h"
static _Thread_local SExceptionNode* expList;
static threadlocal SExceptionNode* expList;
void exceptionPushNode( SExceptionNode* node ) {
node->prev = expList;
@ -14,7 +15,7 @@ int32_t exceptionPopNode() {
return node->code;
}
void exceptionThrow( int code ) {
void exceptionThrow( int32_t code ) {
expList->code = code;
longjmp( expList->jb, 1 );
}
@ -38,21 +39,27 @@ static void cleanupWrapper_void_ptr( SCleanupAction* ca ) {
static void cleanupWrapper_int_int( SCleanupAction* ca ) {
int (*func)( int ) = ca->func;
func( (int)(intptr_t)(ca->arg1.Int) );
func( ca->arg1.Int );
}
static void cleanupWrapper_void_void( SCleanupAction* ca ) {
static void cleanupWrapper_void( SCleanupAction* ca ) {
void (*func)() = ca->func;
func();
}
static void cleanupWrapper_int_ptr( SCleanupAction* ca ) {
int (*func)( void* ) = ca->func;
func( ca->arg1.Ptr );
}
typedef void (*wrapper)(SCleanupAction*);
static wrapper wrappers[] = {
cleanupWrapper_void_ptr_ptr,
cleanupWrapper_void_ptr_bool,
cleanupWrapper_void_ptr,
cleanupWrapper_int_int,
cleanupWrapper_void_void,
cleanupWrapper_void,
cleanupWrapper_int_ptr,
};
@ -107,6 +114,15 @@ void cleanupPush_void( bool failOnly, void* func ) {
ca->func = func;
}
void cleanupPush_int_ptr( bool failOnly, void* func, void* arg ) {
assert( expList->numCleanupAction < expList->maxCleanupAction );
SCleanupAction *ca = expList->cleanupActions + expList->numCleanupAction++;
ca->wrapper = 5;
ca->failOnly = failOnly;
ca->func = func;
ca->arg1.Ptr = arg;
}
int32_t cleanupGetActionCount() {
@ -118,10 +134,11 @@ static void doExecuteCleanup( SExceptionNode* node, int32_t anchor, bool failed
while( node->numCleanupAction > anchor ) {
--node->numCleanupAction;
SCleanupAction *ca = node->cleanupActions + node->numCleanupAction;
if( failed || !(ca->failOnly) )
if( failed || !(ca->failOnly) ) {
wrappers[ca->wrapper]( ca );
}
}
}
void cleanupExecuteTo( int32_t anchor, bool failed ) {
doExecuteCleanup( expList, anchor, failed );

View File

@ -17,7 +17,6 @@
#include "hash.h"
#include "tulog.h"
#include "ttime.h"
#include "tutil.h"
static FORCE_INLINE void __wr_lock(void *lock) {
@ -91,153 +90,64 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
/**
* inplace update node in hash table
* @param pHashObj hash table object
* @param pNode data node
* @param pNode hash data node
*/
static void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode) {
if (pNode->prev1) {
pNode->prev1->next = pNode;
}
if (pNode->next) {
(pNode->next)->prev = pNode;
}
}
static void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode);
/**
* get SHashNode from hashlist, nodes from trash are not included.
* Get SHashNode from hashlist, nodes from trash are not included.
* @param pHashObj Cache objection
* @param key key for hash
* @param keyLen key length
* @param hashVal hash value by hash function
* @return
*/
static SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const char *key, uint32_t keyLen, uint32_t *hashVal) {
uint32_t hash = (*pHashObj->hashFp)(key, keyLen);
int32_t slot = HASH_INDEX(hash, pHashObj->capacity);
SHashEntry *pEntry = pHashObj->hashList[slot];
SHashNode *pNode = pEntry->next;
while (pNode) {
if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) {
break;
}
pNode = pNode->next;
}
if (pNode) {
assert(HASH_INDEX(pNode->hashVal, pHashObj->capacity) == slot);
}
// return the calculated hash value, to avoid calculating it again in other functions
if (hashVal != NULL) {
*hashVal = hash;
}
return pNode;
}
static SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal);
/**
* resize the hash list if the threshold is reached
* Resize the hash list if the threshold is reached
*
* @param pHashObj
*/
static void taosHashTableResize(SHashObj *pHashObj) {
if (pHashObj->size < pHashObj->capacity * HASH_DEFAULT_LOAD_FACTOR) {
return;
}
// double the original capacity
SHashNode *pNode = NULL;
SHashNode *pNext = NULL;
int32_t newSize = pHashObj->capacity << 1u;
if (newSize > HASH_MAX_CAPACITY) {
// uTrace("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached",
// pHashObj->capacity, HASH_MAX_CAPACITY);
return;
}
// int64_t st = taosGetTimestampUs();
SHashEntry **pNewEntry = realloc(pHashObj->hashList, sizeof(SHashEntry *) * newSize);
if (pNewEntry == NULL) {
// uTrace("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity);
return;
}
pHashObj->hashList = pNewEntry;
for (int32_t i = pHashObj->capacity; i < newSize; ++i) {
pHashObj->hashList[i] = calloc(1, sizeof(SHashEntry));
}
pHashObj->capacity = newSize;
for (int32_t i = 0; i < pHashObj->capacity; ++i) {
SHashEntry *pEntry = pHashObj->hashList[i];
pNode = pEntry->next;
if (pNode != NULL) {
assert(pNode->prev1 == pEntry && pEntry->num > 0);
}
while (pNode) {
int32_t j = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
if (j == i) { // this key resides in the same slot, no need to relocate it
pNode = pNode->next;
} else {
pNext = pNode->next;
// remove from current slot
assert(pNode->prev1 != NULL);
if (pNode->prev1 == pEntry) { // first node of the overflow linked list
pEntry->next = pNode->next;
} else {
pNode->prev->next = pNode->next;
}
pEntry->num--;
assert(pEntry->num >= 0);
if (pNode->next != NULL) {
(pNode->next)->prev = pNode->prev;
}
// added into new slot
pNode->next = NULL;
pNode->prev1 = NULL;
SHashEntry *pNewIndexEntry = pHashObj->hashList[j];
if (pNewIndexEntry->next != NULL) {
assert(pNewIndexEntry->next->prev1 == pNewIndexEntry);
pNewIndexEntry->next->prev = pNode;
}
pNode->next = pNewIndexEntry->next;
pNode->prev1 = pNewIndexEntry;
pNewIndexEntry->next = pNode;
pNewIndexEntry->num++;
// continue
pNode = pNext;
}
}
}
// int64_t et = taosGetTimestampUs();
// uTrace("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity,
// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
}
static void taosHashTableResize(SHashObj *pHashObj);
/**
* @param capacity maximum slots available for hash elements
* @param fn hash function
* @param key key of object for hash, usually a null-terminated string
* @param keyLen length of key
* @param pData actually data. Requires a consecutive memory block, no pointer is allowed in pData.
* Pointer copy causes memory access error.
* @param dsize size of data
* @return SHashNode
*/
static SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal);
/**
* Update the hash node
*
* @param pNode hash node
* @param key key for generate hash value
* @param keyLen key length
* @param pData actual data
* @param dsize size of actual data
* @return hash node
*/
static SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, const void *pData, size_t dsize);
/**
* insert the hash node at the front of the linked list
*
* @param pHashObj
* @param pNode
*/
static void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode);
/**
* Get the next element in hash table for iterator
* @param pIter
* @return
*/
static SHashNode *getNextHashNode(SHashMutableIterator *pIter);
SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) {
if (capacity == 0 || fn == NULL) {
return NULL;
@ -285,79 +195,6 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool threadsafe) {
return pHashObj;
}
/**
* @param key key of object for hash, usually a null-terminated string
* @param keyLen length of key
* @param pData actually data. required a consecutive memory block, no pointer is allowed
* in pData. Pointer copy causes memory access error.
* @param size size of block
* @return SHashNode
*/
static SHashNode *doCreateHashNode(const char *key, size_t keyLen, const char *pData, size_t dataSize,
uint32_t hashVal) {
size_t totalSize = dataSize + sizeof(SHashNode) + keyLen + 1; // one extra byte for null
SHashNode *pNewNode = calloc(1, totalSize);
if (pNewNode == NULL) {
uError("failed to allocate memory, reason:%s", strerror(errno));
return NULL;
}
memcpy(pNewNode->data, pData, dataSize);
pNewNode->key = pNewNode->data + dataSize;
memcpy(pNewNode->key, key, keyLen);
pNewNode->keyLen = keyLen;
pNewNode->hashVal = hashVal;
return pNewNode;
}
static SHashNode *doUpdateHashNode(SHashNode *pNode, const char *key, size_t keyLen, const char *pData,
size_t dataSize) {
size_t size = dataSize + sizeof(SHashNode) + keyLen;
SHashNode *pNewNode = (SHashNode *)realloc(pNode, size);
if (pNewNode == NULL) {
return NULL;
}
memcpy(pNewNode->data, pData, dataSize);
pNewNode->key = pNewNode->data + dataSize;
assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen);
memcpy(pNewNode->key, key, keyLen);
return pNewNode;
}
/**
* insert the hash node at the front of the linked list
*
* @param pHashObj
* @param pNode
*/
static void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode) {
assert(pNode != NULL);
int32_t index = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
SHashEntry *pEntry = pHashObj->hashList[index];
pNode->next = pEntry->next;
if (pEntry->next) {
pEntry->next->prev = pNode;
}
pEntry->next = pNode;
pNode->prev1 = pEntry;
pEntry->num++;
pHashObj->size++;
}
size_t taosHashGetSize(const SHashObj *pHashObj) {
if (pHashObj == NULL) {
return 0;
@ -366,12 +203,7 @@ size_t taosHashGetSize(const SHashObj *pHashObj) {
return pHashObj->size;
}
/**
* add data node into hash table
* @param pHashObj hash object
* @param pNode hash node
*/
int32_t taosHashPut(SHashObj *pHashObj, const char *key, size_t keyLen, void *data, size_t size) {
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) {
__wr_lock(pHashObj->lock);
uint32_t hashVal = 0;
@ -402,7 +234,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const char *key, size_t keyLen, void *da
return 0;
}
void *taosHashGet(SHashObj *pHashObj, const char *key, size_t keyLen) {
void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) {
__rd_lock(pHashObj->lock);
uint32_t hashVal = 0;
@ -419,12 +251,7 @@ void *taosHashGet(SHashObj *pHashObj, const char *key, size_t keyLen) {
}
}
/**
* remove node in hash list
* @param pHashObj
* @param pNode
*/
void taosHashRemove(SHashObj *pHashObj, const char *key, size_t keyLen) {
void taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) {
__wr_lock(pHashObj->lock);
uint32_t val = 0;
@ -518,23 +345,6 @@ SHashMutableIterator *taosHashCreateIter(SHashObj *pHashObj) {
return pIter;
}
static SHashNode *getNextHashNode(SHashMutableIterator *pIter) {
assert(pIter != NULL);
pIter->entryIndex++;
while (pIter->entryIndex < pIter->pHashObj->capacity) {
SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex];
if (pEntry->next == NULL) {
pIter->entryIndex++;
continue;
}
return pEntry->next;
}
return NULL;
}
bool taosHashIterNext(SHashMutableIterator *pIter) {
if (pIter == NULL) {
return false;
@ -617,3 +427,205 @@ int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) {
return num;
}
void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode) {
if (pNode->prev1) {
pNode->prev1->next = pNode;
}
if (pNode->next) {
(pNode->next)->prev = pNode;
}
}
SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal) {
uint32_t hash = (*pHashObj->hashFp)(key, keyLen);
int32_t slot = HASH_INDEX(hash, pHashObj->capacity);
SHashEntry *pEntry = pHashObj->hashList[slot];
SHashNode *pNode = pEntry->next;
while (pNode) {
if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) {
break;
}
pNode = pNode->next;
}
if (pNode) {
assert(HASH_INDEX(pNode->hashVal, pHashObj->capacity) == slot);
}
// return the calculated hash value, to avoid calculating it again in other functions
if (hashVal != NULL) {
*hashVal = hash;
}
return pNode;
}
void taosHashTableResize(SHashObj *pHashObj) {
if (pHashObj->size < pHashObj->capacity * HASH_DEFAULT_LOAD_FACTOR) {
return;
}
// double the original capacity
SHashNode *pNode = NULL;
SHashNode *pNext = NULL;
int32_t newSize = pHashObj->capacity << 1u;
if (newSize > HASH_MAX_CAPACITY) {
// uTrace("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached",
// pHashObj->capacity, HASH_MAX_CAPACITY);
return;
}
// int64_t st = taosGetTimestampUs();
SHashEntry **pNewEntry = realloc(pHashObj->hashList, sizeof(SHashEntry *) * newSize);
if (pNewEntry == NULL) {
// uTrace("cache resize failed due to out of memory, capacity remain:%d", pHashObj->capacity);
return;
}
pHashObj->hashList = pNewEntry;
for (int32_t i = pHashObj->capacity; i < newSize; ++i) {
pHashObj->hashList[i] = calloc(1, sizeof(SHashEntry));
}
pHashObj->capacity = newSize;
for (int32_t i = 0; i < pHashObj->capacity; ++i) {
SHashEntry *pEntry = pHashObj->hashList[i];
pNode = pEntry->next;
if (pNode != NULL) {
assert(pNode->prev1 == pEntry && pEntry->num > 0);
}
while (pNode) {
int32_t j = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
if (j == i) { // this key resides in the same slot, no need to relocate it
pNode = pNode->next;
} else {
pNext = pNode->next;
// remove from current slot
assert(pNode->prev1 != NULL);
if (pNode->prev1 == pEntry) { // first node of the overflow linked list
pEntry->next = pNode->next;
} else {
pNode->prev->next = pNode->next;
}
pEntry->num--;
assert(pEntry->num >= 0);
if (pNode->next != NULL) {
(pNode->next)->prev = pNode->prev;
}
// added into new slot
pNode->next = NULL;
pNode->prev1 = NULL;
SHashEntry *pNewIndexEntry = pHashObj->hashList[j];
if (pNewIndexEntry->next != NULL) {
assert(pNewIndexEntry->next->prev1 == pNewIndexEntry);
pNewIndexEntry->next->prev = pNode;
}
pNode->next = pNewIndexEntry->next;
pNode->prev1 = pNewIndexEntry;
pNewIndexEntry->next = pNode;
pNewIndexEntry->num++;
// continue
pNode = pNext;
}
}
}
// int64_t et = taosGetTimestampUs();
// uTrace("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pHashObj->capacity,
// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
}
SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) {
size_t totalSize = dsize + sizeof(SHashNode) + keyLen + 1; // one extra byte for null
SHashNode *pNewNode = calloc(1, totalSize);
if (pNewNode == NULL) {
uError("failed to allocate memory, reason:%s", strerror(errno));
return NULL;
}
memcpy(pNewNode->data, pData, dsize);
pNewNode->key = pNewNode->data + dsize;
memcpy(pNewNode->key, key, keyLen);
pNewNode->keyLen = keyLen;
pNewNode->hashVal = hashVal;
return pNewNode;
}
SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, const void *pData, size_t dsize) {
size_t size = dsize + sizeof(SHashNode) + keyLen;
SHashNode *pNewNode = (SHashNode *)realloc(pNode, size);
if (pNewNode == NULL) {
return NULL;
}
memcpy(pNewNode->data, pData, dsize);
pNewNode->key = pNewNode->data + dsize;
assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen);
memcpy(pNewNode->key, key, keyLen);
return pNewNode;
}
void doAddToHashTable(SHashObj *pHashObj, SHashNode *pNode) {
assert(pNode != NULL);
int32_t index = HASH_INDEX(pNode->hashVal, pHashObj->capacity);
SHashEntry *pEntry = pHashObj->hashList[index];
pNode->next = pEntry->next;
if (pEntry->next) {
pEntry->next->prev = pNode;
}
pEntry->next = pNode;
pNode->prev1 = pEntry;
pEntry->num++;
pHashObj->size++;
}
SHashNode *getNextHashNode(SHashMutableIterator *pIter) {
assert(pIter != NULL);
pIter->entryIndex++;
while (pIter->entryIndex < pIter->pHashObj->capacity) {
SHashEntry *pEntry = pIter->pHashObj->hashList[pIter->entryIndex];
if (pEntry->next == NULL) {
pIter->entryIndex++;
continue;
}
return pEntry->next;
}
return NULL;
}

View File

@ -176,6 +176,11 @@ SArray* taosArrayClone(const SArray* pSrc) {
return dst;
}
void taosArrayClear(SArray* pArray) {
assert( pArray != NULL );
pArray->size = 0;
}
void taosArrayDestroy(SArray* pArray) {
if (pArray == NULL) {
return;

View File

@ -439,6 +439,8 @@ void *taosCachePut(SCacheObj *pCacheObj, const char *key, const void *pData, siz
uTrace("key:%s %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", total:%d, size:%" PRId64 " bytes",
key, pNode, pNode->addedTime, pNode->expiredTime, pCacheObj->totalSize, dataSize);
} else {
uError("key:%s failed to added into cache, out of memory", key);
}
} else { // old data exists, update the node
pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L);

View File

@ -25,7 +25,7 @@
#include "tsystem.h"
#include "tutil.h"
SGlobalCfg tsGlobalConfig[TSDB_CFG_MAX_NUM] = {0};
SGlobalCfg tsGlobalConfig[TSDB_CFG_MAX_NUM] = {{0}};
int32_t tsGlobalConfigNum = 0;
static char *tsGlobalUnit[] = {

View File

@ -32,7 +32,7 @@ char* taosDesImp(unsigned char* key, char* src, unsigned int len, int process_mo
unsigned int number_of_blocks = len / 8;
unsigned char data_block[9] = {0};
unsigned char processed_block[9] = {0};
key_set key_sets[17] = {0};
key_set key_sets[17]; memset(key_sets, 0, sizeof(key_sets));
char* dest = calloc(len + 1, 1);
generate_sub_keys(key, key_sets);

View File

@ -27,10 +27,11 @@ typedef struct {
} STaosError;
#include "os.h"
#include "taoserror.h"
static _Thread_local int32_t tsErrno;
static threadlocal int32_t tsErrno;
int32_t* taosGetErrno() {
return &tsErrno;
}

View File

@ -38,6 +38,7 @@ typedef struct {
int status;
int8_t role;
int64_t version;
int64_t savedVersion;
void *wqueue;
void *rqueue;
void *wal;

View File

@ -33,12 +33,11 @@ static int32_t tsOpennedVnodes;
static void *tsDnodeVnodesHash;
static void vnodeCleanUp(SVnodeObj *pVnode);
static void vnodeBuildVloadMsg(char *pNode, void * param);
static int vnodeWalCallback(void *arg);
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg);
static int32_t vnodeReadCfg(SVnodeObj *pVnode);
static int32_t vnodeSaveVersion(SVnodeObj *pVnode);
static bool vnodeReadVersion(SVnodeObj *pVnode);
static int vnodeWalCallback(void *arg);
static int vnodeProcessTsdbStatus(void *arg, int status);
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size);
static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
static void vnodeNotifyRole(void *ahandle, int8_t role);
@ -206,7 +205,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
STsdbAppH appH = {0};
appH.appH = (void *)pVnode;
appH.walCallBack = vnodeWalCallback;
appH.notifyStatus = vnodeProcessTsdbStatus;
appH.cqH = pVnode->cq;
sprintf(temp, "%s/tsdb", rootDir);
@ -374,16 +373,24 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
walClose(pVnode->wal);
pVnode->wal = NULL;
vnodeSaveVersion(pVnode);
vnodeRelease(pVnode);
}
// TODO: this is a simple implement
static int vnodeWalCallback(void *arg) {
static int vnodeProcessTsdbStatus(void *arg, int status) {
SVnodeObj *pVnode = arg;
if (status == TSDB_STATUS_COMMIT_START) {
pVnode->savedVersion = pVnode->version;
return walRenew(pVnode->wal);
}
if (status == TSDB_STATUS_COMMIT_OVER)
return vnodeSaveVersion(pVnode);
return 0;
}
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size) {
SVnodeObj *pVnode = ahandle;
return tsdbGetFileInfo(pVnode->tsdb, name, index, size);
@ -414,7 +421,7 @@ static void vnodeNotifyFileSynced(void *ahandle) {
tsdbCloseRepo(pVnode->tsdb);
STsdbAppH appH = {0};
appH.appH = (void *)pVnode;
appH.walCallBack = vnodeWalCallback;
appH.notifyStatus = vnodeProcessTsdbStatus;
appH.cqH = pVnode->cq;
pVnode->tsdb = tsdbOpenRepo(rootDir, &appH);
}
@ -685,14 +692,14 @@ static int32_t vnodeSaveVersion(SVnodeObj *pVnode) {
char * content = calloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->version);
len += snprintf(content + len, maxLen - len, " \"version\": %" PRId64 "\n", pVnode->savedVersion);
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
fclose(fp);
free(content);
vPrint("vgId:%d, save vnode version:%" PRId64 " successed", pVnode->vgId, pVnode->version);
vPrint("vgId:%d, save vnode version:%" PRId64 " succeed", pVnode->vgId, pVnode->savedVersion);
return 0;
}
@ -734,7 +741,7 @@ static bool vnodeReadVersion(SVnodeObj *pVnode) {
ret = true;
vPrint("vgId:%d, read vnode version successed, version:%%" PRId64, pVnode->vgId, pVnode->version);
vPrint("vgId:%d, read vnode version succeed, version:%" PRId64, pVnode->vgId, pVnode->version);
PARSE_OVER:
free(content);

View File

@ -56,32 +56,46 @@ void run_test(TAOS* taos) {
taos_query(taos, "drop database if exists test;");
usleep(100000);
taos_query(taos, "create database test tables 5;");
//taos_query(taos, "create database test tables 5;");
taos_query(taos, "create database test;");
usleep(100000);
taos_query(taos, "use test;");
usleep(100000);
taos_query(taos, "create table meters(ts timestamp, a int, b binary(20)) tags(loc binary(20), area int);");
taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:00:00.000', 0, 'china');");
taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:01:00.000', 0, 'china');");
taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:02:00.000', 0, 'china');");
taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:00:00.000', 0, 'china');");
taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:01:00.000', 0, 'china');");
taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:02:00.000', 0, 'china');");
taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:03:00.000', 0, 'china');");
taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:00:00.000', 0, 'UK');");
taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:00.000', 0, 'UK');");
taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:01.000', 0, 'UK');");
taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:02.000', 0, 'UK');");
taos_query(taos, "insert into t3 using meters tags('tianjin', 0) values('2020-01-01 00:01:02.000', 0, 'china');");
taos_query(taos, "insert into t4 using meters tags('wuhan', 0) values('2020-01-01 00:01:02.000', 0, 'china');");
taos_query(taos, "insert into t5 using meters tags('jinan', 0) values('2020-01-01 00:01:02.000', 0, 'china');");
taos_query(taos, "insert into t6 using meters tags('haikou', 0) values('2020-01-01 00:01:02.000', 0, 'china');");
taos_query(taos, "insert into t7 using meters tags('nanjing', 0) values('2020-01-01 00:01:02.000', 0, 'china');");
taos_query(taos, "insert into t8 using meters tags('lanzhou', 0) values('2020-01-01 00:01:02.000', 0, 'china');");
taos_query(taos, "insert into t9 using meters tags('tokyo', 0) values('2020-01-01 00:01:02.000', 0, 'japan');");
usleep(100000);
taos_query(taos, "create table meters(ts timestamp, a int) tags(area int);");
taos_query(taos, "create table t0 using meters tags(0);");
taos_query(taos, "create table t1 using meters tags(1);");
taos_query(taos, "create table t2 using meters tags(2);");
taos_query(taos, "create table t3 using meters tags(3);");
taos_query(taos, "create table t4 using meters tags(4);");
taos_query(taos, "create table t5 using meters tags(5);");
taos_query(taos, "create table t6 using meters tags(6);");
taos_query(taos, "create table t7 using meters tags(7);");
taos_query(taos, "create table t8 using meters tags(8);");
taos_query(taos, "create table t9 using meters tags(9);");
taos_query(taos, "insert into t0 values('2020-01-01 00:00:00.000', 0);");
taos_query(taos, "insert into t0 values('2020-01-01 00:01:00.000', 0);");
taos_query(taos, "insert into t0 values('2020-01-01 00:02:00.000', 0);");
taos_query(taos, "insert into t1 values('2020-01-01 00:00:00.000', 0);");
taos_query(taos, "insert into t1 values('2020-01-01 00:01:00.000', 0);");
taos_query(taos, "insert into t1 values('2020-01-01 00:02:00.000', 0);");
taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.000', 0);");
taos_query(taos, "insert into t2 values('2020-01-01 00:00:00.000', 0);");
taos_query(taos, "insert into t2 values('2020-01-01 00:01:00.000', 0);");
taos_query(taos, "insert into t2 values('2020-01-01 00:01:01.000', 0);");
taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.000', 0);");
taos_query(taos, "insert into t3 values('2020-01-01 00:01:02.000', 0);");
taos_query(taos, "insert into t4 values('2020-01-01 00:01:02.000', 0);");
taos_query(taos, "insert into t5 values('2020-01-01 00:01:02.000', 0);");
taos_query(taos, "insert into t6 values('2020-01-01 00:01:02.000', 0);");
taos_query(taos, "insert into t7 values('2020-01-01 00:01:02.000', 0);");
taos_query(taos, "insert into t8 values('2020-01-01 00:01:02.000', 0);");
taos_query(taos, "insert into t9 values('2020-01-01 00:01:02.000', 0);");
// super tables subscription
usleep(1000000);
TAOS_SUB* tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
TAOS_RES* res = taos_consume(tsub);
@ -90,23 +104,23 @@ void run_test(TAOS* taos) {
res = taos_consume(tsub);
check_row_count(__LINE__, res, 0);
taos_query(taos, "insert into t0 values('2020-01-01 00:03:00.000', 0, 'china');");
taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0, 'china');");
taos_query(taos, "insert into t0 values('2020-01-01 00:02:00.001', 0);");
taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0);");
res = taos_consume(tsub);
check_row_count(__LINE__, res, 2);
taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0, 'UK');");
taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0, 'UK');");
taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0);");
taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0);");
res = taos_consume(tsub);
check_row_count(__LINE__, res, 2);
taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0, 'china');");
taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0);");
res = taos_consume(tsub);
check_row_count(__LINE__, res, 1);
// keep progress information and restart subscription
taos_unsubscribe(tsub, 1);
taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0, 'china');");
taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0);");
tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0);
res = taos_consume(tsub);
check_row_count(__LINE__, res, 24);
@ -133,7 +147,7 @@ void run_test(TAOS* taos) {
res = taos_consume(tsub);
check_row_count(__LINE__, res, 0);
taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0, 'china');");
taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0);");
res = taos_consume(tsub);
check_row_count(__LINE__, res, 1);
@ -197,7 +211,7 @@ int main(int argc, char *argv[]) {
// init TAOS
taos_init();
TAOS* taos = taos_connect(host, user, passwd, "test", 0);
TAOS* taos = taos_connect(host, user, passwd, "", 0);
if (taos == NULL) {
printf("failed to connect to db, reason:%s\n", taos_errstr(taos));
exit(1);
@ -209,6 +223,7 @@ int main(int argc, char *argv[]) {
exit(0);
}
taos_query(taos, "use test;");
TAOS_SUB* tsub = NULL;
if (async) {
// create an asynchronized subscription, the callback function will be called every 1s

View File

@ -27,7 +27,7 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool)
$i = 0
while $i < 5
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( 0 )
sql create table $tb using $mt tags( $i )
$x = 0
while $x < $rowNum
$val = $x * 60000

View File

@ -48,41 +48,41 @@ $tb = $tbPrefix . $i
sql select leastsquares(tbcol, 1, 1) from $tb
print ===> $data00
if $data00 != @(1.000000, 1.000000)@ then
if $data00 != @{slop:1.000000, intercept:1.000000}@ then
return -1
endi
print =============== step3
sql select leastsquares(tbcol, 1, 1) from $tb where ts < now + 4m
print ===> $data00
if $data00 != @(1.000000, 1.000000)@ then
if $data00 != @{slop:1.000000, intercept:1.000000}@ then
return -1
endi
print =============== step4
sql select leastsquares(tbcol, 1, 1) as b from $tb
print ===> $data00
if $data00 != @(1.000000, 1.000000)@ then
if $data00 != @{slop:1.000000, intercept:1.000000}@ then
return -1
endi
print =============== step5
sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1m)
print ===> $data01
if $data01 != @(1.000000, 1.000000)@ then
if $data01 != @{slop:1.000000, intercept:1.000000}@ then
return -1
endi
sql select leastsquares(tbcol, 1, 1) as b from $tb interval(1d)
print ===> $data01
if $data01 != @(1.000000, 1.000000)@ then
if $data01 != @{slop:1.000000, intercept:1.000000}@ then
return -1
endi
print =============== step6
sql select leastsquares(tbcol, 1, 1) as b from $tb where ts < now + 4m interval(1m)
print ===> $data01
if $data01 != @(1.000000, 1.000000)@ then
if $data01 != @{slop:1.000000, intercept:1.000000}@ then
return -1
endi
print ===> $rows

View File

@ -112,6 +112,7 @@ sql import into tb values(1520000050001, 50001)
sql select * from tb;
print $rows
if $rows != 19 then
print expect 19, actual: $rows
return -1
endi