Merge branch 'develop' into hotfix/TD-2518

This commit is contained in:
dapan1121 2021-02-04 14:25:02 +08:00
commit fbfe4a3b47
89 changed files with 3080 additions and 1133 deletions

View File

@ -89,11 +89,12 @@ pid_t getppid(void); /* Get parent PID */
/* Path management */
#if defined(_WIN32)
#if defined(_UTF8_SOURCE) || defined(_BSD_SOURCE) || defined(_GNU_SOURCE)
#define realpath realpathU
#if defined(_UTF8_SOURCE) || defined(_BSD_SOURCE) || defined(_GNU_SOURCE)
// #define realpath realpathU
#define CompactPath CompactPathU
#else /* _ANSI_SOURCE */
#define realpath realpathA
// #define realpath realpathA
#define CompactPath CompactPathA
#endif
#endif /* defined(_WIN32) */

View File

@ -84,11 +84,11 @@ static void monotonicInit_x86linux() {
regfree(&constTscRegex);
if (mono_ticksPerMicrosecond == 0) {
fprintf(stderr, "monotonic: x86 linux, unable to determine clock rate");
//fprintf(stderr, "monotonic: x86 linux, unable to determine clock rate");
return;
}
if (!constantTsc) {
fprintf(stderr, "monotonic: x86 linux, 'constant_tsc' flag not present");
//fprintf(stderr, "monotonic: x86 linux, 'constant_tsc' flag not present");
return;
}

View File

@ -852,7 +852,7 @@ npm install td2.0-connector
### Linux
- `python` (建议`v2.7` , `v3.x.x` 目前还不支持)
- `node` 必须采用v10.x版本其他版本存在包兼容性的问题。
- `node` 2.0.6支持v12.x和v10.x2.0.5及更早版本支持v10.x版本其他版本可能存在包兼容性的问题。
- `make`
- c语言编译器比如<a href="https://gcc.gnu.org">GCC</a>

View File

@ -33,6 +33,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index);
void tscHandleMasterJoinQuery(SSqlObj* pSql);
int32_t tscHandleMasterSTableQuery(SSqlObj *pSql);
int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql);
int32_t tscHandleMultivnodeInsert(SSqlObj *pSql);

View File

@ -132,8 +132,9 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscQueryTags(SQueryInfo* pQueryInfo);
bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType);
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql);
@ -174,6 +175,7 @@ SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIn
SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
int16_t size);
size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo);
void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index);
int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);

View File

@ -224,7 +224,9 @@ typedef struct SQueryInfo {
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
bool distinctTag; // distinct tag or not
int32_t round; // 0/1/....
int32_t bufLen;
char* buf;
} SQueryInfo;
typedef struct {
@ -412,10 +414,9 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code);
int tscProcessLocalCmd(SSqlObj *pSql);
int tscCfgDynamicOptions(char *msg);
int taos_retrieve(TAOS_RES *res);
int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscTansformFuncForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);

View File

@ -68,7 +68,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr
SQLFunctionCtx *pCtx = &pReducer->pCtx[i];
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, i);
pCtx->aOutputBuf = pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity;
pCtx->pOutput = pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity;
pCtx->order = pQueryInfo->order.order;
pCtx->functionId = pExpr->functionId;
@ -76,7 +76,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr
int16_t offset = getColumnModelOffset(pDesc->pColumnModel, i);
SSchema *pSchema = getColumnModelSchema(pDesc->pColumnModel, i);
pCtx->aInputElemBuf = pReducer->pTempBuffer->data + offset;
pCtx->pInput = pReducer->pTempBuffer->data + offset;
// input data format comes from pModel
pCtx->inputType = pSchema->type;
@ -94,7 +94,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr
// for top/bottom function, the output of timestamp is the first column
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf;
pCtx->ptsOutputBuf = pReducer->pCtx[0].pOutput;
pCtx->param[2].i64 = pQueryInfo->order.order;
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
pCtx->param[1].i64 = pQueryInfo->order.orderColId;
@ -118,7 +118,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr
if (pExpr->functionId == TSDB_FUNC_TAG_DUMMY || pExpr->functionId == TSDB_FUNC_TS_DUMMY) {
tagLen += pExpr->resBytes;
pTagCtx[n++] = &pReducer->pCtx[i];
} else if ((aAggs[pExpr->functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
} else if ((aAggs[pExpr->functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
pCtx = &pReducer->pCtx[i];
}
}
@ -311,7 +311,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
pReducer->pCtx = (SQLFunctionCtx *)calloc(tscSqlExprNumOfExprs(pQueryInfo), sizeof(SQLFunctionCtx));
pReducer->rowSize = pMemBuffer[0]->nElemSize;
tscRestoreSQLFuncForSTableQuery(pQueryInfo);
tscRestoreFuncForSTableQuery(pQueryInfo);
tscFieldInfoUpdateOffset(pQueryInfo);
if (pReducer->rowSize > pMemBuffer[0]->pageSize) {
@ -383,7 +383,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol, pSql);
}
@ -720,7 +720,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
SSchema p1 = {0};
if (pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
p1 = tGetTableNameColumnSchema();
p1 = *tGetTbnameColumnSchema();
} else if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
p1.bytes = pExpr->resBytes;
p1.type = (uint8_t) pExpr->resType;
@ -744,6 +744,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
functionId = TSDB_FUNC_FIRST;
} else if (functionId == TSDB_FUNC_LAST_DST) {
functionId = TSDB_FUNC_LAST;
} else if (functionId == TSDB_FUNC_STDDEV_DST) {
functionId = TSDB_FUNC_STDDEV;
}
int32_t ret = getResultDataInfo(p1.type, p1.bytes, functionId, 0, &type, &bytes, &inter, 0, false);
@ -1041,7 +1043,7 @@ static void savePreviousRow(SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
pLocalMerge->hasPrevRow = true;
}
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) {
static void doExecuteFinalMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) {
// the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
@ -1053,7 +1055,7 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bo
int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS_DUMMY) {
tVariantDestroy(&pCtx->tag);
char* input = pCtx->aInputElemBuf;
char* input = pCtx->pInput;
if (pCtx->inputType == TSDB_DATA_TYPE_BINARY || pCtx->inputType == TSDB_DATA_TYPE_NCHAR) {
assert(varDataLen(input) <= pCtx->inputBytes);
@ -1061,6 +1063,7 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bo
} else {
tVariantCreateFromBinary(&pCtx->tag, input, pCtx->inputBytes, pCtx->inputType);
}
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j);
pCtx->param[0].i64 = pExpr->param[0].i64;
@ -1086,7 +1089,7 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bo
static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
if (pLocalMerge->hasUnprocessedRow) {
pLocalMerge->hasUnprocessedRow = false;
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
doExecuteFinalMerge(pCmd, pLocalMerge, true);
savePreviousRow(pLocalMerge, tmpBuffer);
}
}
@ -1142,11 +1145,11 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
int32_t inc = numOfRes - 1; // tsdb_func_tag function only produce one row of result
memset(buf, 0, (size_t)maxBufSize);
memcpy(buf, pCtx->aOutputBuf, (size_t)pCtx->outputBytes);
memcpy(buf, pCtx->pOutput, (size_t)pCtx->outputBytes);
for (int32_t i = 0; i < inc; ++i) {
pCtx->aOutputBuf += pCtx->outputBytes;
memcpy(pCtx->aOutputBuf, buf, (size_t)pCtx->outputBytes);
pCtx->pOutput += pCtx->outputBytes;
memcpy(pCtx->pOutput, buf, (size_t)pCtx->outputBytes);
}
}
@ -1289,10 +1292,10 @@ void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {// reset
size_t t = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < t; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
pLocalMerge->pCtx[i].aOutputBuf = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity;
pLocalMerge->pCtx[i].pOutput = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity;
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) {
pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].aOutputBuf;
pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].pOutput;
}
}
@ -1404,7 +1407,7 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
pCtx->aOutputBuf += pCtx->outputBytes * numOfRes;
pCtx->pOutput += pCtx->outputBytes * numOfRes;
// set the correct output timestamp column position
if (pCtx->functionId == TSDB_FUNC_TOP || pCtx->functionId == TSDB_FUNC_BOTTOM) {
@ -1412,7 +1415,7 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
}
}
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
doExecuteFinalMerge(pCmd, pLocalMerge, true);
}
int32_t tscDoLocalMerge(SSqlObj *pSql) {
@ -1504,7 +1507,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
if (pLocalMerge->hasPrevRow) {
if (needToMerge(pQueryInfo, pLocalMerge, tmpBuffer)) {
// belong to the group of the previous row, continue process it
doExecuteSecondaryMerge(pCmd, pLocalMerge, false);
doExecuteFinalMerge(pCmd, pLocalMerge, false);
// copy to buffer
savePreviousRow(pLocalMerge, tmpBuffer);
@ -1576,7 +1579,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
}
}
} else {
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
doExecuteFinalMerge(pCmd, pLocalMerge, true);
savePreviousRow(pLocalMerge, tmpBuffer); // copy the processed row to buffer
}

View File

@ -787,10 +787,10 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQ
}
SSchema s = {.bytes = TSDB_KEYSIZE, .type = TSDB_DATA_TYPE_TIMESTAMP, .colId = PRIMARYKEY_TIMESTAMP_COL_INDEX};
tstrncpy(s.name, aAggs[TSDB_FUNC_TS].aName, sizeof(s.name));
tstrncpy(s.name, aAggs[TSDB_FUNC_TS].name, sizeof(s.name));
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL);
tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL);
if (parseOffsetClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
@ -1319,7 +1319,7 @@ int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStr
return (totalLen < TSDB_TABLE_FNAME_LEN) ? TSDB_CODE_SUCCESS : TSDB_CODE_TSC_INVALID_SQL;
}
static void tscInsertPrimaryTSSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscColumnListInsert(pQueryInfo->colList, &tsCol);
}
@ -1401,7 +1401,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr);
// add ts column
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
tbufCloseWriter(&bw);
taosArrayDestroy(colList);
@ -1506,7 +1506,7 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) {
// add the timestamp column into the output columns
SColumnIndex index = {0}; // primary timestamp column info
int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
tscAddSpecialColumnForSelect(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL);
tscAddFuncInSelectClause(pQueryInfo, numOfCols, TSDB_FUNC_PRJ, &index, pSchema, TSDB_COL_NORMAL);
SInternalField* pSupInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfCols);
pSupInfo->visible = false;
@ -1602,7 +1602,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
* in dealing with super table queries such as: count/first/last
*/
if (isSTable) {
tscTansformSQLFuncForSTableQuery(pQueryInfo);
tscTansformFuncForSTableQuery(pQueryInfo);
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL;
@ -1656,7 +1656,7 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tabl
(functionId == TSDB_FUNC_TAGPRJ));
}
SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) {
int16_t colId = getNewResColId(pQueryInfo);
@ -1738,7 +1738,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
}
// add the primary timestamp column even though it is not required by user
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
} else if (optr == TK_STRING || optr == TK_INTEGER || optr == TK_FLOAT) { // simple column projection query
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
@ -1748,7 +1748,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
SSchema colSchema = tGetUserSpecifiedColumnSchema(&pItem->pNode->val, &pItem->pNode->token, pItem->aliasName);
SSqlExpr* pExpr =
tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC);
tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_UDC);
// NOTE: the first parameter is reserved for the tag column id during join query process.
pExpr->numOfParams = 2;
@ -1761,11 +1761,11 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
SSchema colSchema = tGetTableNameColumnSchema();
tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, TSDB_COL_TAG);
SSchema* colSchema = tGetTbnameColumnSchema();
tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, colSchema, TSDB_COL_TAG);
} else if (index.columnIndex == TSDB_BLOCK_DIST_COLUMN_INDEX) {
SSchema colSchema = tGetBlockDistColumnSchema();
tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_TAG);
tscAddFuncInSelectClause(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_TAG);
} else {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
@ -1779,7 +1779,7 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
}
// add the primary timestamp column even though it is not required by user
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
} else {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -1849,9 +1849,9 @@ void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrT
if (tsKeepOriginalColumnName) { // keep the original column name
tstrncpy(name, uname, TSDB_COL_NAME_LEN);
} else {
int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1;
char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1] = {0};
snprintf(tmp, size, "%s(%s)", aAggs[functionId].aName, uname);
int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1;
char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].name) + 2 + 1] = {0};
snprintf(tmp, size, "%s(%s)", aAggs[functionId].name, uname);
tstrncpy(name, tmp, TSDB_COL_NAME_LEN);
}
@ -1966,7 +1966,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// the time stamp may be always needed
if (index.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
}
return TSDB_CODE_SUCCESS;
@ -2036,7 +2036,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
getNewResColId(pQueryInfo), TSDB_KEYSIZE, false);
SColumnList ids = getColumnList(1, 0, 0);
insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName, pExpr);
insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
}
// functions can not be applied to tags
@ -2079,7 +2079,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
}
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
return TSDB_CODE_SUCCESS;
}
case TK_FIRST:
@ -2285,7 +2285,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if (convertFunctionId(optr, &functionId) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
tscInsertPrimaryTsSourceColumn(pQueryInfo, &index);
colIndex += 1; // the first column is ts
pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false);
@ -2308,12 +2308,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SColumnIndex index1 = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pQueryInfo),
TSDB_KEYSIZE, false);
tstrncpy(pExpr->aliasName, aAggs[TSDB_FUNC_TS].aName, sizeof(pExpr->aliasName));
tstrncpy(pExpr->aliasName, aAggs[TSDB_FUNC_TS].name, sizeof(pExpr->aliasName));
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
SColumnList ids = getColumnList(1, 0, TS_COLUMN_INDEX);
insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
aAggs[TSDB_FUNC_TS].aName, pExpr);
aAggs[TSDB_FUNC_TS].name, pExpr);
colIndex += 1; // the first column is ts
@ -2384,7 +2384,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
SSchema s = {0};
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
s = tGetTableNameColumnSchema();
s = *tGetTbnameColumnSchema();
} else {
s = pTagSchema[index.columnIndex];
}
@ -2400,7 +2400,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
s.bytes = bytes;
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY);
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG);
tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TID_TAG, &index, &s, TSDB_COL_TAG);
return TSDB_CODE_SUCCESS;
}
@ -2778,7 +2778,7 @@ bool validateIpAddress(const char* ip, size_t size) {
return epAddr != INADDR_NONE;
}
int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
int32_t tscTansformFuncForSTableQuery(SQueryInfo* pQueryInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pTableMetaInfo->pTableMeta == NULL || !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
@ -2800,7 +2800,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
SSchema* pSrcSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, colIndex);
if ((functionId >= TSDB_FUNC_SUM && functionId <= TSDB_FUNC_TWA) ||
(functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_LAST_DST) ||
(functionId >= TSDB_FUNC_FIRST_DST && functionId <= TSDB_FUNC_STDDEV_DST) ||
(functionId >= TSDB_FUNC_RATE && functionId <= TSDB_FUNC_AVG_IRATE)) {
if (getResultDataInfo(pSrcSchema->type, pSrcSchema->bytes, functionId, (int32_t)pExpr->param[0].i64, &type, &bytes,
&interBytes, 0, true) != TSDB_CODE_SUCCESS) {
@ -2818,7 +2818,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
}
/* transfer the field-info back to original input format */
void tscRestoreSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return;
@ -2842,6 +2842,8 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
functionId = TSDB_FUNC_FIRST;
} else if (functionId == TSDB_FUNC_LAST_DST) {
functionId = TSDB_FUNC_LAST;
} else if (functionId == TSDB_FUNC_STDDEV_DST) {
functionId = TSDB_FUNC_STDDEV;
}
getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &pExpr->resType, &pExpr->resBytes,
@ -2858,7 +2860,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId;
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_STABLE) == 0) {
if ((aAggs[functionId].status & TSDB_FUNCSTATE_STABLE) == 0) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
return true;
}
@ -2968,7 +2970,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
STableMeta* pTableMeta = NULL;
SSchema* pSchema = NULL;
SSchema s = tGetTbnameColumnSchema();
// SSchema s = tGetTbnameColumnSchema();
int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
@ -2995,7 +2997,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
pSchema = &s;
pSchema = tGetTbnameColumnSchema();
} else {
pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex);
}
@ -3552,38 +3554,6 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
return TSDB_CODE_SUCCESS;
}
// todo error handle / such as and /or mixed with +/-/*/
int32_t doArithmeticExprToString(tSQLExpr* pExpr, char** exprString) {
tSQLExpr* pLeft = pExpr->pLeft;
tSQLExpr* pRight = pExpr->pRight;
*(*exprString)++ = '(';
if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) {
doArithmeticExprToString(pLeft, exprString);
} else {
int32_t ret = tSQLExprNodeToString(pLeft, exprString);
if (ret != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
optrToString(pExpr, exprString);
if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) {
doArithmeticExprToString(pRight, exprString);
} else {
int32_t ret = tSQLExprNodeToString(pRight, exprString);
if (ret != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
*(*exprString)++ = ')';
return TSDB_CODE_SUCCESS;
}
static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList,
int32_t* type, uint64_t* uid) {
if (pExpr->nSQLOptr == TK_ID) {
@ -5233,7 +5203,7 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
size_t size = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < size; ++i) {
int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId;
if (!IS_STREAM_QUERY_VALID(aAggs[functId].nStatus)) {
if (!IS_STREAM_QUERY_VALID(aAggs[functId].status)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@ -5256,7 +5226,7 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu
bool hasSelectivity = false;
for (int32_t j = 0; j < size; ++j) {
SSqlExpr* pEx = tscSqlExprGet(pQueryInfo, j);
if ((aAggs[pEx->functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) {
if ((aAggs[pEx->functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == TSDB_FUNCSTATE_SELECTIVITY) {
hasSelectivity = true;
break;
}
@ -5710,7 +5680,7 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex) {
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->colIndex);
SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pColIndex->colIndex};
tscAddSpecialColumnForSelect(pQueryInfo, (int32_t)size, TSDB_FUNC_PRJ, &colIndex, pSchema, TSDB_COL_NORMAL);
tscAddFuncInSelectClause(pQueryInfo, (int32_t)size, TSDB_FUNC_PRJ, &colIndex, pSchema, TSDB_COL_NORMAL);
int32_t numOfFields = tscNumOfFields(pQueryInfo);
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, numOfFields - 1);
@ -5874,7 +5844,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
continue;
}
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
numOfSelectivity++;
} else {
numOfAggregation++;
@ -5906,7 +5876,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
for (int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
int16_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TAGPRJ || (aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) == 0) {
if (functionId == TSDB_FUNC_TAGPRJ || (aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) == 0) {
continue;
}
@ -5949,7 +5919,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SSchema s = tGetTableNameColumnSchema();
SSchema s = *tGetTbnameColumnSchema();
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
int16_t bytes = 0;
int16_t type = 0;
@ -6093,7 +6063,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
}
}
if (IS_MULTIOUTPUT(aAggs[functId].nStatus) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM &&
if (IS_MULTIOUTPUT(aAggs[functId].status) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM &&
functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -6283,7 +6253,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex) {
char tmpBuf[1024] = {0};
int32_t tmpLen = 0;
tmpLen =
sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].aName, pExpr->uid, pExpr->colInfo.colId);
sprintf(tmpBuf, "%s(uid:%" PRId64 ", %d)", aAggs[pExpr->functionId].name, pExpr->uid, pExpr->colInfo.colId);
if (tmpLen + offset >= totalBufSize - 1) break;
@ -6984,3 +6954,4 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
return false;
}

View File

@ -752,6 +752,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->queryType = htonl(pQueryInfo->type);
pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit);
pQueryMsg->sqlstrLen = htonl(sqlLen);
pQueryMsg->prevResultLen = htonl(pQueryInfo->bufLen);
size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo);
pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number
@ -989,6 +990,11 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
}
if (pQueryInfo->bufLen > 0) {
memcpy(pMsg, pQueryInfo->buf, pQueryInfo->bufLen);
pMsg += pQueryInfo->bufLen;
}
SCond* pCond = &pQueryInfo->tagCond.tbnameCond;
if (pCond->len > 0) {
strncpy(pMsg, pCond->cond, pCond->len);

View File

@ -443,24 +443,6 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
return pFieldInfo->final;
}
int taos_retrieve(TAOS_RES *res) {
if (res == NULL) return 0;
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pSql == NULL || pSql->signature != pSql) return 0;
if (pRes->qhandle == 0) return 0;
tscResetForNextRetrieve(pRes);
if (pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
return pRes->numOfRows;
}
static bool needToFetchNewBlock(SSqlObj* pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;

View File

@ -103,7 +103,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
// failed to get table Meta or vgroup list, retry in 10sec.
if (code == TSDB_CODE_SUCCESS) {
tscTansformSQLFuncForSTableQuery(pQueryInfo);
tscTansformFuncForSTableQuery(pQueryInfo);
tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, tNameGetTableName(&pTableMetaInfo->name));
pSql->fp = tscProcessStreamQueryCallback;

View File

@ -13,7 +13,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _GNU_SOURCE
#include "os.h"
#include "texpr.h"
@ -23,6 +23,7 @@
#include "tscSubquery.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "qUtil.h"
typedef struct SInsertSupporter {
SSqlObj* pSql;
@ -501,7 +502,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
int16_t functionId = tscIsProjectionQuery(pQueryInfo)? TSDB_FUNC_PRJ : TSDB_FUNC_TS;
tscAddSpecialColumnForSelect(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL);
tscAddFuncInSelectClause(pQueryInfo, 0, functionId, &index, s, TSDB_COL_NORMAL);
tscPrintSelectClause(pNew, 0);
tscFieldInfoUpdateOffset(pQueryInfo);
@ -681,7 +682,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr
}
}
static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* pParent) {
static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj* pParent) {
SSqlCmd* pCmd = &pSql->cmd;
tscClearSubqueryInfo(pCmd);
tscFreeSqlResult(pSql);
@ -701,7 +702,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex index = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL);
tscAddFuncInSelectClause(pQueryInfo, 0, TSDB_FUNC_TS_COMP, &index, &colSchema, TSDB_COL_NORMAL);
// set the tags value for ts_comp function
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
@ -970,7 +971,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) {
SSqlObj* sub = pParentSql->pSubs[m];
issueTSCompQuery(sub, sub->param, pParentSql);
issueTsCompQuery(sub, sub->param, pParentSql);
}
}
@ -1470,7 +1471,7 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
}
// restore the offset value for super table query in case of final result.
tscRestoreSQLFuncForSTableQuery(pQueryInfo);
tscRestoreFuncForSTableQuery(pQueryInfo);
tscFieldInfoUpdateOffset(pQueryInfo);
}
@ -1651,7 +1652,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
// set get tags query type
TSDB_QUERY_SET_TYPE(pNewQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY);
tscAddSpecialColumnForSelect(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG);
tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TID_TAG, &colIndex, &s1, TSDB_COL_TAG);
size_t numOfCols = taosArrayGetSize(pNewQueryInfo->colList);
tscDebug(
@ -1662,7 +1663,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
} else {
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddSpecialColumnForSelect(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL);
tscAddFuncInSelectClause(pNewQueryInfo, 0, TSDB_FUNC_TS_COMP, &colIndex, &colSchema, TSDB_COL_NORMAL);
// set the tags value for ts_comp function
SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0);
@ -1821,7 +1822,262 @@ void tscUnlockByThread(int64_t *lockedBy) {
}
}
typedef struct SFirstRoundQuerySup {
SSqlObj *pParent;
int32_t numOfRows;
SArray *pColsInfo;
int32_t tagLen;
STColumn *pTagCols;
SArray *pResult; // SArray<SInterResult>
int64_t interval;
char* buf;
int32_t bufLen;
} SFirstRoundQuerySup;
void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, SQueryInfo* pQueryInfo) {
TSKEY key = INT64_MIN;
for(int32_t i = 0; i < numOfCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
continue;
}
if (pExpr->colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
key = *(TSKEY*) row[i];
continue;
}
double v = 0;
if (row[i] != NULL) {
v = *(double*) row[i];
} else {
SET_DOUBLE_NULL(&v);
}
int32_t id = pExpr->colInfo.colId;
int32_t numOfQueriedCols = (int32_t) taosArrayGetSize(pInterResult->pResult);
SArray* p = NULL;
for(int32_t j = 0; j < numOfQueriedCols; ++j) {
SStddevInterResult* pColRes = taosArrayGet(pInterResult->pResult, j);
if (pColRes->colId == id) {
p = pColRes->pResult;
break;
}
}
//append a new column
if (p == NULL) {
SStddevInterResult t = {.colId = id, .pResult = taosArrayInit(10, sizeof(SResPair)),};
taosArrayPush(pInterResult->pResult, &t);
p = t.pResult;
}
SResPair pair = {.avg = v, .key = key};
taosArrayPush(p, &pair);
}
}
void tscFirstRoundRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
SSqlObj* pSql = (SSqlObj*)tres;
SSqlRes* pRes = &pSql->res;
SFirstRoundQuerySup* pSup = param;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (numOfRows > 0) {
TAOS_ROW row = NULL;
int32_t numOfCols = taos_field_count(tres);
if (pSup->tagLen == 0) { // no tags, all rows belong to one group
SInterResult interResult = {.tags = NULL, .pResult = taosArrayInit(4, sizeof(SStddevInterResult))};
taosArrayPush(pSup->pResult, &interResult);
while ((row = taos_fetch_row(tres)) != NULL) {
doAppendData(&interResult, row, numOfCols, pQueryInfo);
}
} else { // tagLen > 0
char* p = calloc(1, pSup->tagLen);
while ((row = taos_fetch_row(tres)) != NULL) {
int32_t* length = taos_fetch_lengths(tres);
memset(p, 0, pSup->tagLen);
int32_t offset = 0;
for (int32_t i = 0; i < numOfCols && offset < pSup->tagLen; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
memcpy(p + offset, row[i], length[i]);
offset += pExpr->resBytes;
}
}
assert(offset == pSup->tagLen);
size_t size = taosArrayGetSize(pSup->pResult);
if (size > 0) {
SInterResult* pInterResult = taosArrayGetLast(pSup->pResult);
if (memcmp(pInterResult->tags, p, pSup->tagLen) == 0) { // belongs to the same group
doAppendData(pInterResult, row, numOfCols, pQueryInfo);
} else {
char* tags = malloc( pSup->tagLen);
memcpy(tags, p, pSup->tagLen);
SInterResult interResult = {.tags = tags, .pResult = taosArrayInit(4, sizeof(SStddevInterResult))};
taosArrayPush(pSup->pResult, &interResult);
doAppendData(&interResult, row, numOfCols, pQueryInfo);
}
} else {
char* tags = malloc(pSup->tagLen);
memcpy(tags, p, pSup->tagLen);
SInterResult interResult = {.tags = tags, .pResult = taosArrayInit(4, sizeof(SStddevInterResult))};
taosArrayPush(pSup->pResult, &interResult);
doAppendData(&interResult, row, numOfCols, pQueryInfo);
}
}
tfree(p);
}
}
pSup->numOfRows += numOfRows;
if (!pRes->completed) {
taos_fetch_rows_a(tres, tscFirstRoundRetrieveCallback, param);
return;
}
// set the parameters for the second round query process
SSqlObj *pParent = pSup->pParent;
SSqlCmd *pPCmd = &pParent->cmd;
SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(pPCmd, 0);
if (pSup->numOfRows > 0) {
SBufferWriter bw = tbufInitWriter(NULL, false);
interResToBinary(&bw, pSup->pResult, pSup->tagLen);
pQueryInfo1->bufLen = (int32_t) tbufTell(&bw);
pQueryInfo1->buf = tbufGetData(&bw, true);
// set the serialized binary string as the parameter of arithmetic expression
tbufCloseWriter(&bw);
}
taosArrayDestroyEx(pSup->pResult, freeInterResult);
taosArrayDestroy(pSup->pColsInfo);
tfree(pSup);
taos_free_result(pSql);
pQueryInfo1->round = 1;
tscDoQuery(pParent);
}
void tscFirstRoundCallback(void* param, TAOS_RES* tres, int code) {
int32_t c = taos_errno(tres);
if (c != TSDB_CODE_SUCCESS) {
// TODO HANDLE ERROR
}
taos_fetch_rows_a(tres, tscFirstRoundRetrieveCallback, param);
}
int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo* pTableMetaInfo1 = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
SFirstRoundQuerySup *pSup = calloc(1, sizeof(SFirstRoundQuerySup));
pSup->pParent = pSql;
pSup->interval = pQueryInfo->interval.interval;
pSup->pResult = taosArrayInit(6, sizeof(SStddevInterResult));
pSup->pColsInfo = taosArrayInit(6, sizeof(int16_t)); // result column id
SSqlObj *pNew = createSubqueryObj(pSql, 0, tscFirstRoundCallback, pSup, TSDB_SQL_SELECT, NULL);
SSqlCmd *pCmd = &pNew->cmd;
tscClearSubqueryInfo(pCmd);
tscFreeSqlResult(pSql);
SQueryInfo* pNewQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
assert(pQueryInfo->numOfTables == 1);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pNewQueryInfo, 0);
tscInitQueryInfo(pNewQueryInfo);
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
if (pNewQueryInfo->groupbyExpr.columnInfo == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
// goto _error;
}
}
if (tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond) != 0) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
// goto _error;
}
pNewQueryInfo->interval = pQueryInfo->interval;
pCmd->command = TSDB_SQL_SELECT;
pNew->fp = tscFirstRoundCallback;
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
int32_t index = 0;
int32_t numOfTags = 0;
for(int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->functionId == TSDB_FUNC_TS && pQueryInfo->interval.interval > 0) {
taosArrayPush(pSup->pColsInfo, &pExpr->resColId);
SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
SSchema* schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId);
SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TS, &colIndex, schema, TSDB_COL_NORMAL);
p->resColId = pExpr->resColId; // update the result column id
} else if (pExpr->functionId == TSDB_FUNC_STDDEV_DST) {
taosArrayPush(pSup->pColsInfo, &pExpr->resColId);
SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->colInfo.colIndex};
SSchema schema = {.type = TSDB_DATA_TYPE_DOUBLE, .bytes = sizeof(double)};
tstrncpy(schema.name, pExpr->aliasName, tListLen(schema.name));
SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_AVG, &colIndex, &schema, TSDB_COL_NORMAL);
p->resColId = pExpr->resColId; // update the result column id
} else if (pExpr->functionId == TSDB_FUNC_TAG) {
pSup->tagLen += pExpr->resBytes;
SColumnIndex colIndex = {.tableIndex = 0, .columnIndex = pExpr->colInfo.colIndex};
SSchema* schema = NULL;
if (pExpr->colInfo.colId != TSDB_TBNAME_COLUMN_INDEX) {
schema = tscGetColumnSchemaById(pTableMetaInfo1->pTableMeta, pExpr->colInfo.colId);
} else {
schema = tGetTbnameColumnSchema();
}
SSqlExpr* p = tscAddFuncInSelectClause(pNewQueryInfo, index++, TSDB_FUNC_TAG, &colIndex, schema, TSDB_COL_TAG);
p->resColId = pExpr->resColId;
numOfTags += 1;
}
}
SColumnIndex columnIndex = {.tableIndex = 0, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscInsertPrimaryTsSourceColumn(pNewQueryInfo, &columnIndex);
tscTansformFuncForSTableQuery(pNewQueryInfo);
tscDebug(
"%p first round subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s",
pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
tscSqlExprNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscHandleMasterSTableQuery(pNew);
return TSDB_CODE_SUCCESS;
}
int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
@ -1833,7 +2089,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
return pRes->code;
}
tExtMemBuffer ** pMemoryBuf = NULL;
tExtMemBuffer **pMemoryBuf = NULL;
tOrderDescriptor *pDesc = NULL;
SColumnModel *pModel = NULL;
SColumnModel *pFinalModel = NULL;
@ -1863,10 +2119,8 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
return ret;
}
pSql->pSubs = calloc(pState->numOfSub, POINTER_BYTES);
tscDebug("%p retrieved query data from %d vnode(s)", pSql, pState->numOfSub);
pSql->pSubs = calloc(pState->numOfSub, POINTER_BYTES);
if (pSql->pSubs == NULL) {
tfree(pSql->pSubs);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -2739,7 +2993,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
return;
}
tscRestoreSQLFuncForSTableQuery(pQueryInfo);
tscRestoreFuncForSTableQuery(pQueryInfo);
}
assert (pRes->row >= pRes->numOfRows);

View File

@ -107,11 +107,6 @@ bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
return false;
}
// for select query super table, the super table vgroup list can not be null in any cases.
// if (pQueryInfo->command == TSDB_SQL_SELECT && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
// assert(pTableMetaInfo->vgroupList != NULL);
// }
if ((pQueryInfo->type & TSDB_QUERY_TYPE_FREE_RESOURCE) == TSDB_QUERY_TYPE_FREE_RESOURCE) {
return false;
}
@ -1074,7 +1069,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
memset(pFieldInfo, 0, sizeof(SFieldInfo));
}
static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
static SSqlExpr* doCreateSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, int32_t colType) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
@ -1127,14 +1122,14 @@ SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functi
return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
}
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
SSqlExpr* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
taosArrayInsert(pQueryInfo->exprList, index, &pExpr);
return pExpr;
}
SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) {
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
SSqlExpr* pExpr = doCreateSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
taosArrayPush(pQueryInfo->exprList, &pExpr);
return pExpr;
}
@ -1158,6 +1153,22 @@ SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functi
return pExpr;
}
bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t index) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pQueryInfo->pTableMetaInfo[index])) {
return false;
}
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for(int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->functionId == TSDB_FUNC_STDDEV_DST) {
return true;
}
}
return false;
}
size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo) {
return taosArrayGetSize(pQueryInfo->exprList);
}
@ -1422,9 +1433,9 @@ int32_t tscValidateName(SStrToken* pToken) {
if (sep == NULL) { // single part
if (pToken->type == TK_STRING) {
strdequote(pToken->z);
tscDequoteAndTrimToken(pToken);
tscStrToLower(pToken->z, pToken->n);
pToken->n = (uint32_t)strtrim(pToken->z);
//pToken->n = (uint32_t)strtrim(pToken->z);
int len = tSQLGetToken(pToken->z, &pToken->type);
@ -1762,6 +1773,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf);
tfree(pQueryInfo->fillVal);
tfree(pQueryInfo->buf);
}
void tscClearSubqueryInfo(SSqlCmd* pCmd) {
@ -2029,7 +2041,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNew->signature = pNew;
pNew->sqlstr = strdup(pSql->sqlstr);
SSqlCmd* pnCmd = &pNew->cmd;
SSqlCmd* pnCmd = &pNew->cmd;
memcpy(pnCmd, pCmd, sizeof(SSqlCmd));
pnCmd->command = cmd;
@ -2068,7 +2080,18 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
if (pNewQueryInfo->buf == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
if (pQueryInfo->bufLen > 0) {
memcpy(pNewQueryInfo->buf, pQueryInfo->buf, pQueryInfo->bufLen);
}
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
@ -2234,6 +2257,9 @@ void tscDoQuery(SSqlObj* pSql) {
}
}
return;
} else if (tscMultiRoundQuery(pQueryInfo, 0) && pQueryInfo->round == 0) {
tscHandleFirstRoundStableQuery(pSql); // todo lock?
return;
} else if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) { // super table query
tscLockByThread(&pSql->squeryLock);

View File

@ -36,6 +36,11 @@ typedef struct SColumnInfoData {
void* pData; // the corresponding block data in memory
} SColumnInfoData;
typedef struct SResPair {
TSKEY key;
double avg;
} SResPair;
#define TSDB_DB_NAME_T 1
#define TSDB_TABLE_NAME_T 2
@ -58,7 +63,7 @@ size_t tableIdPrefix(const char* name, char* prefix, int32_t len);
void extractTableNameFromToken(SStrToken *pToken, SStrToken* pTable);
SSchema tGetTableNameColumnSchema();
//SSchema tGetTbnameColumnSchema();
SSchema tGetBlockDistColumnSchema();
@ -68,7 +73,7 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters);
SSchema tGetTbnameColumnSchema();
SSchema* tGetTbnameColumnSchema();
/**
* check if the schema is valid or not, including following aspects:

View File

@ -407,7 +407,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
SSchema* pSchema = exception_calloc(1, sizeof(SSchema));
left->pSchema = pSchema;
*pSchema = tGetTbnameColumnSchema();
*pSchema = *tGetTbnameColumnSchema();
tExprNode* right = exception_calloc(1, sizeof(tExprNode));
expr->_node.pRight = right;

View File

@ -59,7 +59,6 @@ char tsLocale[TSDB_LOCALE_LEN] = {0};
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
int8_t tsEnableCoreFile = 0;
int32_t tsMaxBinaryDisplayWidth = 30;
char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@ -182,6 +181,7 @@ char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
char tsDataDir[TSDB_FILENAME_LEN] = {0};
char tsScriptDir[TSDB_FILENAME_LEN] = {0};
char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
int32_t tsDiskCfgNum = 0;

View File

@ -10,6 +10,7 @@
#define VALID_NAME_TYPE(x) ((x) == TSDB_DB_NAME_T || (x) == TSDB_TABLE_NAME_T)
//TODO remove it
void extractTableName(const char* tableId, char* name) {
size_t s1 = strcspn(tableId, &TS_PATH_DELIMITER[0]);
size_t s2 = strcspn(&tableId[s1 + 1], &TS_PATH_DELIMITER[0]);
@ -24,6 +25,7 @@ char* extractDBName(const char* tableId, char* name) {
return strncpy(name, &tableId[offset1 + 1], len);
}
// todo remove it
size_t tableIdPrefix(const char* name, char* prefix, int32_t len) {
tstrncpy(prefix, name, len);
strcat(prefix, TS_PATH_DELIMITER);
@ -31,14 +33,6 @@ size_t tableIdPrefix(const char* name, char* prefix, int32_t len) {
return strlen(prefix);
}
SSchema tGetTableNameColumnSchema() {
SSchema s = {0};
s.bytes = TSDB_TABLE_NAME_LEN - 1 + VARSTR_HEADER_SIZE;
s.type = TSDB_DATA_TYPE_BINARY;
s.colId = TSDB_TBNAME_COLUMN_INDEX;
tstrncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN);
return s;
}
SSchema tGetBlockDistColumnSchema() {
SSchema s = {0};
s.bytes = TSDB_MAX_BINARY_LEN;;
@ -189,15 +183,15 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
}
}
SSchema tGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
.bytes = TSDB_TABLE_NAME_LEN
};
static struct SSchema _s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
.bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE,
.name = TSQL_TBNAME_L,
};
strcpy(s.name, TSQL_TBNAME_L);
return s;
SSchema* tGetTbnameColumnSchema() {
return &_s;
}
static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen) {

View File

@ -86,43 +86,53 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
switch (type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
pVar->nLen = tDataTypes[type].bytes;
pVar->i64 = GET_INT8_VAL(pz);
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
pVar->nLen = tDataTypes[type].bytes;
pVar->u64 = GET_UINT8_VAL(pz);
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
pVar->nLen = tDataTypes[type].bytes;
pVar->i64 = GET_INT16_VAL(pz);
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
pVar->nLen = tDataTypes[type].bytes;
pVar->u64 = GET_UINT16_VAL(pz);
break;
}
case TSDB_DATA_TYPE_INT: {
pVar->nLen = tDataTypes[type].bytes;
pVar->i64 = GET_INT32_VAL(pz);
break;
}
case TSDB_DATA_TYPE_UINT: {
pVar->nLen = tDataTypes[type].bytes;
pVar->u64 = GET_UINT32_VAL(pz);
break;
}
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP: {
pVar->nLen = tDataTypes[type].bytes;
pVar->i64 = GET_INT64_VAL(pz);
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
pVar->nLen = tDataTypes[type].bytes;
pVar->u64 = GET_UINT64_VAL(pz);
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
pVar->nLen = tDataTypes[type].bytes;
pVar->dKey = GET_DOUBLE_VAL(pz);
break;
}
case TSDB_DATA_TYPE_FLOAT: {
pVar->nLen = tDataTypes[type].bytes;
pVar->dKey = GET_FLOAT_VAL(pz);
break;
}
@ -144,6 +154,7 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
default:
pVar->i64 = GET_INT32_VAL(pz);
pVar->nLen = tDataTypes[TSDB_DATA_TYPE_INT].bytes;
}
pVar->nType = type;

View File

@ -34,44 +34,37 @@ import java.util.*;
import java.util.concurrent.Executor;
public class TSDBConnection implements Connection {
protected Properties props = null;
private TSDBJNIConnector connector = null;
private String catalog = null;
private TSDBDatabaseMetaData dbMetaData = null;
private TSDBDatabaseMetaData dbMetaData;
private Properties clientInfoProps = new Properties();
private int timeoutMilliseconds = 0;
private boolean batchFetch = false;
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
this.dbMetaData = meta;
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME),
info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
String batchLoad = info.getProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD);
if (batchLoad != null) {
this.batchFetch = Boolean.parseBoolean(batchLoad);
this.batchFetch = Boolean.parseBoolean(batchLoad);
}
}
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
this.connector = new TSDBJNIConnector();
this.connector.connect(host, port, dbName, user, password);
try {
this.setCatalog(dbName);
} catch (SQLException e) {
e.printStackTrace();
}
this.setCatalog(dbName);
this.dbMetaData.setConnection(this);
}
@ -80,68 +73,86 @@ public class TSDBConnection implements Connection {
}
public Statement createStatement() throws SQLException {
if (!this.connector.isClosed()) {
TSDBStatement statement = new TSDBStatement(this, this.connector);
statement.setConnection(this);
return statement;
} else {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
TSDBStatement statement = new TSDBStatement(this, this.connector);
statement.setConnection(this);
return statement;
}
public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException {
if (this.connector.isClosed()) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
long id = this.connector.subscribe(topic, sql, restart, 0);
if (id == 0) {
throw new SQLException(TSDBConstants.WrapErrMsg("failed to create subscription"));
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED);
}
return new TSDBSubscribe(this.connector, id);
}
public PreparedStatement prepareStatement(String sql) throws SQLException {
if (!this.connector.isClosed()) {
return new TSDBPreparedStatement(this, this.connector, sql);
} else {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return new TSDBPreparedStatement(this, this.connector, sql);
}
public CallableStatement prepareCall(String sql) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public String nativeSQL(String sql) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public void setAutoCommit(boolean autoCommit) throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
}
public boolean getAutoCommit() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return true;
}
public void commit() throws SQLException {
}
public void rollback() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void close() throws SQLException {
if (this.connector != null && !this.connector.isClosed()) {
this.connector.closeConnection();
} else {
throw new SQLException(TSDBConstants.WrapErrMsg("connection is already closed!"));
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
}
public void rollback() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public void close() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
this.connector.closeConnection();
}
public boolean isClosed() throws SQLException {
return this.connector.isClosed();
return this.connector != null && this.connector.isClosed();
}
/**
@ -154,6 +165,9 @@ public class TSDBConnection implements Connection {
* @throws SQLException if a database access error occurs
*/
public DatabaseMetaData getMetaData() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return this.dbMetaData;
}
@ -165,17 +179,29 @@ public class TSDBConnection implements Connection {
* @throws SQLException
*/
public void setReadOnly(boolean readOnly) throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
}
public boolean isReadOnly() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return true;
}
public void setCatalog(String catalog) throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
this.catalog = catalog;
}
public String getCatalog() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return this.catalog;
}
@ -187,6 +213,19 @@ public class TSDBConnection implements Connection {
* @throws SQLException
*/
public void setTransactionIsolation(int level) throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
switch (level) {
case Connection.TRANSACTION_NONE:
case Connection.TRANSACTION_READ_COMMITTED:
case Connection.TRANSACTION_READ_UNCOMMITTED:
case Connection.TRANSACTION_REPEATABLE_READ:
case Connection.TRANSACTION_SERIALIZABLE:
break;
default:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
}
/**
@ -196,60 +235,81 @@ public class TSDBConnection implements Connection {
* @throws SQLException
*/
public int getTransactionIsolation() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return Connection.TRANSACTION_NONE;
}
public SQLWarning getWarnings() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
//todo: implement getWarnings according to the warning messages returned from TDengine
return null;
// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void clearWarnings() throws SQLException {
// left blank to support HikariCP connection
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
//todo: implement clearWarnings according to the warning messages returned from TDengine
}
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
throws SQLException {
// This method is implemented in the current way to support Spark
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) {
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY) {
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
return this.prepareStatement(sql);
}
public Boolean getBatchFetch() {
return this.batchFetch;
return this.batchFetch;
}
public void setBatchFetch(Boolean batchFetch) {
this.batchFetch = batchFetch;
this.batchFetch = batchFetch;
}
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public Map<String, Class<?>> getTypeMap() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public void setHoldability(int holdability) throws SQLException {
// intentionally left empty to support druid connection pool.
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
}
/**
@ -259,67 +319,111 @@ public class TSDBConnection implements Connection {
* @throws SQLException
*/
public int getHoldability() throws SQLException {
//intentionally left empty to support HikariCP connection.
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
public Savepoint setSavepoint() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public Savepoint setSavepoint(String name) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public void rollback(Savepoint savepoint) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
int resultSetHoldability) throws SQLException {
return this.prepareStatement(sql, resultSetType, resultSetConcurrency);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
int resultSetHoldability) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public Clob createClob() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public Blob createBlob() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public NClob createNClob() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public SQLXML createSQLXML() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public boolean isValid(int timeout) throws SQLException {
@ -338,31 +442,52 @@ public class TSDBConnection implements Connection {
}
public String getClientInfo(String name) throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return clientInfoProps.getProperty(name);
}
public Properties getClientInfo() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return clientInfoProps;
}
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public void setSchema(String schema) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public String getSchema() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public void abort(Executor executor) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
@ -370,14 +495,21 @@ public class TSDBConnection implements Connection {
}
public int getNetworkTimeout() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED);
}
return this.timeoutMilliseconds;
}
public <T> T unwrap(Class<T> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
}
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return iface.isInstance(this);
}
}

View File

@ -19,12 +19,12 @@ import java.util.Map;
public abstract class TSDBConstants {
public static final String STATEMENT_CLOSED = "Statement already closed.";
public static final String DEFAULT_PORT = "6200";
public static final String STATEMENT_CLOSED = "statement is closed";
public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables";
public static final String RESULT_SET_IS_CLOSED = "resultSet is closed.";
public static final String RESULT_SET_IS_CLOSED = "resultSet is closed";
public static final String DEFAULT_PORT = "6200";
public static Map<Integer, String> DATATYPE_MAP = null;
public static final long JNI_NULL_POINTER = 0L;

View File

@ -0,0 +1,31 @@
package com.taosdata.jdbc;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
public class TSDBError {
private static Map<Integer, String> TSDBErrorMap = new HashMap<>();
static {
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED, "connection already closed");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD, "this operation is NOT supported currently!");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "invalid variables");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED, "statement is closed");
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED, "resultSet is closed");
/**************************************************/
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED, "failed to create subscription");
}
public static String wrapErrMsg(String msg) {
return "TDengine Error: " + msg;
}
public static SQLException createSQLException(int errorNumber) {
// JDBC exception code is less than 0x2350
if (errorNumber <= 0x2350)
return new SQLException(TSDBErrorMap.get(errorNumber));
// JNI exception code is
return new SQLException(wrapErrMsg(TSDBErrorMap.get(errorNumber)));
}
}

View File

@ -0,0 +1,15 @@
package com.taosdata.jdbc;
public class TSDBErrorNumbers {
public static final int ERROR_CONNECTION_CLOSED = 0x2301; // connection already closed
public static final int ERROR_UNSUPPORTED_METHOD = 0x2302; //this operation is NOT supported currently!
public static final int ERROR_INVALID_VARIABLE = 0x2303; //invalid variables
public static final int ERROR_STATEMENT_CLOSED = 0x2304; //statement already closed
public static final int ERROR_RESULTSET_CLOSED = 0x2305; //resultSet is closed
public static final int ERROR_SUBSCRIBE_FAILED = 0x2350; //failed to create subscription
private TSDBErrorNumbers() {
}
}

View File

@ -22,7 +22,6 @@ import java.util.List;
public class TSDBStatement implements Statement {
private TSDBJNIConnector connector;
private TaosInfo taosInfo = TaosInfo.getInstance();
/**
* To store batched commands
@ -69,13 +68,12 @@ public class TSDBStatement implements Statement {
}
public ResultSet executeQuery(String sql) throws SQLException {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
}
// TODO make sure it is not a update query
pSql = this.connector.executeQuery(sql);
long resultSetPointer = this.connector.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connector.freeResultSet(pSql);
@ -100,8 +98,8 @@ public class TSDBStatement implements Statement {
}
public int executeUpdate(String sql) throws SQLException {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
}
// TODO check if current query is update query
@ -133,25 +131,33 @@ public class TSDBStatement implements Statement {
}
public int getMaxFieldSize() throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
}
return 0;
// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setMaxFieldSize(int max) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
}
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getMaxRows() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
}
// always set maxRows to zero, meaning unlimitted rows in a resultSet
return 0;
}
public void setMaxRows(int max) throws SQLException {
if (isClosed()) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
}
// always set maxRows to zero, meaning unlimited rows in a resultSet
}

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.4",
version="2.0.5",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",

View File

@ -1,7 +1,6 @@
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
import threading
class TDengineCursor(object):
@ -36,7 +35,6 @@ class TDengineCursor(object):
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
self._threadId = threading.get_ident()
if connection is not None:
self._connection = connection

View File

@ -91,6 +91,20 @@ void cqRmFromList(SCqObj *pObj) {
}
static void freeSCqContext(void *handle) {
if (handle == NULL) {
return;
}
SCqContext *pContext = handle;
pthread_mutex_destroy(&pContext->mutex);
taosTmrCleanUp(pContext->tmrCtrl);
pContext->tmrCtrl = NULL;
cDebug("vgId:%d, CQ is closed", pContext->vgId);
free(pContext);
}
void cqFree(void *handle) {
if (tsEnableStream == 0) {
return;
@ -125,13 +139,7 @@ void cqFree(void *handle) {
pthread_mutex_unlock(&pContext->mutex);
if (delete) {
pthread_mutex_destroy(&pContext->mutex);
taosTmrCleanUp(pContext->tmrCtrl);
pContext->tmrCtrl = NULL;
cDebug("vgId:%d, CQ is closed", pContext->vgId);
free(pContext);
freeSCqContext(pContext);
}
}
@ -184,18 +192,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
return pContext;
}
static void freeSCqContext(void *handle) {
if (handle == NULL) {
return;
}
SCqContext *pContext = handle;
pthread_mutex_destroy(&pContext->mutex);
taosTmrCleanUp(pContext->tmrCtrl);
pContext->tmrCtrl = NULL;
cDebug("vgId:%d, CQ is closed", pContext->vgId);
free(pContext);
}
void cqClose(void *handle) {
if (tsEnableStream == 0) {
return;
@ -204,6 +201,8 @@ void cqClose(void *handle) {
if (handle == NULL) return;
pContext->delete = 1;
int32_t hasCq = 0;
int32_t existLoop = 0;
// stop all CQs
cqStop(pContext);
@ -218,6 +217,12 @@ void cqClose(void *handle) {
cqRmFromList(pObj);
rid = pObj->rid;
hasCq = 1;
if (pContext->pHead == NULL) {
existLoop = 1;
}
} else {
pthread_mutex_unlock(&pContext->mutex);
break;
@ -226,9 +231,15 @@ void cqClose(void *handle) {
pthread_mutex_unlock(&pContext->mutex);
taosRemoveRef(cqObjRef, rid);
if (existLoop) {
break;
}
}
freeSCqContext(pContext);
if (hasCq == 0) {
freeSCqContext(pContext);
}
}
void cqStart(void *handle) {

View File

@ -36,12 +36,12 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_QUERY] = dnodeDispatchToVReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_FETCH] = dnodeDispatchToVReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = dnodeDispatchToVWriteQueue;
// the following message shall be treated as mnode write
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_ACCT] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_ACCT] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_ACCT] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_USER] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_ACCT] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_USER] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_USER] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_USER] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DNODE]= dnodeDispatchToMWriteQueue;
@ -57,13 +57,13 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_STREAM] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_KILL_CONN] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CONFIG_DNODE]= dnodeDispatchToMWriteQueue;
// the following message shall be treated as mnode query
// the following message shall be treated as mnode query
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_HEARTBEAT] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CONNECT] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_USE_DB] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLE_META] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_STABLE_VGROUP]= dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLE_META] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_STABLE_VGROUP]= dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLES_META] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SHOW] = dnodeDispatchToMReadQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = dnodeDispatchToMReadQueue;
@ -153,7 +153,7 @@ static int32_t dnodeAuthNettestUser(char *user, char *spi, char *encrypt, char *
}
static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey) {
if (dnodeAuthNettestUser(user, spi, encrypt, secret, ckey) == 0) return 0;
if (dnodeAuthNettestUser(user, spi, encrypt, secret, ckey) == 0) return 0;
int code = mnodeRetriveAuth(user, spi, encrypt, secret, ckey);
if (code != TSDB_CODE_APP_NOT_READY) return code;
@ -164,7 +164,7 @@ static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char
rpcMsg.pCont = pMsg;
rpcMsg.contLen = sizeof(SAuthMsg);
rpcMsg.msgType = TSDB_MSG_TYPE_DM_AUTH;
dDebug("user:%s, send auth msg to mnodes", user);
SRpcMsg rpcRsp = {0};
dnodeSendMsgToMnodeRecv(&rpcMsg, &rpcRsp);
@ -202,14 +202,14 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid) {
SRpcMsg rpcRsp = {0};
dnodeSendMsgToMnodeRecv(&rpcMsg, &rpcRsp);
terrno = rpcRsp.code;
if (rpcRsp.code != 0) {
rpcFreeCont(rpcRsp.pCont);
dError("vgId:%d, tid:%d failed to config table from mnode", vgId, tid);
return NULL;
} else {
dInfo("vgId:%d, tid:%d config table msg is received", vgId, tid);
// delete this after debug finished
SMDCreateTableMsg *pTable = rpcRsp.pCont;
int16_t numOfColumns = htons(pTable->numOfColumns);
@ -231,4 +231,4 @@ SStatisInfo dnodeGetStatisInfo() {
}
return info;
}
}

View File

@ -69,6 +69,6 @@ int32_t dnodeStepInit(SStep *pSteps, int32_t stepSize) {
return 0;
}
void dnodeStepCleanup(SStep *pSteps, int32_t stepSize) {
void dnodeStepCleanup(SStep *pSteps, int32_t stepSize) {
taosStepCleanupImp(pSteps, stepSize - 1);
}
}

View File

@ -181,4 +181,4 @@ static void sigintHandler(int32_t signum, void *sigInfo, void *context) {
#ifdef WINDOWS
tsem_wait(&exitSem);
#endif
}
}

View File

@ -53,7 +53,7 @@ void dnodeCleanupVWrite() {
for (int32_t i = 0; i < tsVWriteWP.max; ++i) {
SVWriteWorker *pWorker = tsVWriteWP.worker + i;
if (taosCheckPthreadValid(pWorker->thread)) {
taosQsetThreadResume(pWorker->qset);
if (pWorker->qset) taosQsetThreadResume(pWorker->qset);
}
}

View File

@ -496,6 +496,7 @@ typedef struct {
int32_t tsOrder; // ts comp block order
int32_t numOfTags; // number of tags columns involved
int32_t sqlstrLen; // sql query string
int32_t prevResultLen; // previous result length
SColumnInfo colList[];
} SQueryTableMsg;

View File

@ -908,6 +908,7 @@ int main(int argc, char *argv[]) {
}
pthread_join(read_id, NULL);
taos_close(rInfo->taos);
free(rInfo);
}
taos_cleanup();

View File

@ -93,6 +93,8 @@ extern char configDir[];
#define MAX_QUERY_SQL_COUNT 10
#define MAX_QUERY_SQL_LENGTH 256
#define MAX_DATABASE_COUNT 256
typedef enum CREATE_SUB_TALBE_MOD_EN {
PRE_CREATE_SUBTBL,
AUTO_CREATE_SUBTBL,
@ -116,7 +118,41 @@ enum QUERY_TYPE {
INSERT_TYPE,
QUERY_TYPE_BUT
} ;
enum _show_db_index {
TSDB_SHOW_DB_NAME_INDEX,
TSDB_SHOW_DB_CREATED_TIME_INDEX,
TSDB_SHOW_DB_NTABLES_INDEX,
TSDB_SHOW_DB_VGROUPS_INDEX,
TSDB_SHOW_DB_REPLICA_INDEX,
TSDB_SHOW_DB_QUORUM_INDEX,
TSDB_SHOW_DB_DAYS_INDEX,
TSDB_SHOW_DB_KEEP_INDEX,
TSDB_SHOW_DB_CACHE_INDEX,
TSDB_SHOW_DB_BLOCKS_INDEX,
TSDB_SHOW_DB_MINROWS_INDEX,
TSDB_SHOW_DB_MAXROWS_INDEX,
TSDB_SHOW_DB_WALLEVEL_INDEX,
TSDB_SHOW_DB_FSYNC_INDEX,
TSDB_SHOW_DB_COMP_INDEX,
TSDB_SHOW_DB_CACHELAST_INDEX,
TSDB_SHOW_DB_PRECISION_INDEX,
TSDB_SHOW_DB_UPDATE_INDEX,
TSDB_SHOW_DB_STATUS_INDEX,
TSDB_MAX_SHOW_DB
};
// -----------------------------------------SHOW TABLES CONFIGURE -------------------------------------
enum _show_stables_index {
TSDB_SHOW_STABLES_NAME_INDEX,
TSDB_SHOW_STABLES_CREATED_TIME_INDEX,
TSDB_SHOW_STABLES_COLUMNS_INDEX,
TSDB_SHOW_STABLES_METRIC_INDEX,
TSDB_SHOW_STABLES_UID_INDEX,
TSDB_SHOW_STABLES_TID_INDEX,
TSDB_SHOW_STABLES_VGID_INDEX,
TSDB_MAX_SHOW_STABLES
};
enum _describe_table_index {
TSDB_DESCRIBE_METRIC_FIELD_INDEX,
TSDB_DESCRIBE_METRIC_TYPE_INDEX,
@ -219,6 +255,28 @@ typedef struct SSuperTable_S {
int64_t totalAffectedRows;
} SSuperTable;
typedef struct {
char name[TSDB_DB_NAME_LEN + 1];
char create_time[32];
int32_t ntables;
int32_t vgroups;
int16_t replica;
int16_t quorum;
int16_t days;
char keeplist[32];
int32_t cache; //MB
int32_t blocks;
int32_t minrows;
int32_t maxrows;
int8_t wallevel;
int32_t fsync;
int8_t comp;
int8_t cachelast;
char precision[8]; // time resolution
int8_t update;
char status[16];
} SDbInfo;
typedef struct SDbCfg_S {
// int maxtablesPerVnode;
int minRows;
@ -1126,6 +1184,272 @@ static void printfQueryMeta() {
printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n");
}
static char* xFormatTimestamp(char* buf, int64_t val, int precision) {
time_t tt;
if (precision == TSDB_TIME_PRECISION_MICRO) {
tt = (time_t)(val / 1000000);
} else {
tt = (time_t)(val / 1000);
}
/* comment out as it make testcases like select_with_tags.sim fail.
but in windows, this may cause the call to localtime crash if tt < 0,
need to find a better solution.
if (tt < 0) {
tt = 0;
}
*/
#ifdef WINDOWS
if (tt < 0) tt = 0;
#endif
struct tm* ptm = localtime(&tt);
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
if (precision == TSDB_TIME_PRECISION_MICRO) {
sprintf(buf + pos, ".%06d", (int)(val % 1000000));
} else {
sprintf(buf + pos, ".%03d", (int)(val % 1000));
}
return buf;
}
static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) {
if (val == NULL) {
fprintf(fp, "%s", TSDB_DATA_NULL_STR);
return;
}
char buf[TSDB_MAX_BYTES_PER_ROW];
switch (field->type) {
case TSDB_DATA_TYPE_BOOL:
fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0));
break;
case TSDB_DATA_TYPE_TINYINT:
fprintf(fp, "%d", *((int8_t *)val));
break;
case TSDB_DATA_TYPE_SMALLINT:
fprintf(fp, "%d", *((int16_t *)val));
break;
case TSDB_DATA_TYPE_INT:
fprintf(fp, "%d", *((int32_t *)val));
break;
case TSDB_DATA_TYPE_BIGINT:
fprintf(fp, "%" PRId64, *((int64_t *)val));
break;
case TSDB_DATA_TYPE_FLOAT:
fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
break;
case TSDB_DATA_TYPE_DOUBLE:
fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
memcpy(buf, val, length);
buf[length] = 0;
fprintf(fp, "\'%s\'", buf);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
xFormatTimestamp(buf, *(int64_t*)val, precision);
fprintf(fp, "'%s'", buf);
break;
default:
break;
}
}
static int xDumpResultToFile(const char* fname, TAOS_RES* tres) {
TAOS_ROW row = taos_fetch_row(tres);
if (row == NULL) {
return 0;
}
FILE* fp = fopen(fname, "at");
if (fp == NULL) {
fprintf(stderr, "ERROR: failed to open file: %s\n", fname);
return -1;
}
int num_fields = taos_num_fields(tres);
TAOS_FIELD *fields = taos_fetch_fields(tres);
int precision = taos_result_precision(tres);
for (int col = 0; col < num_fields; col++) {
if (col > 0) {
fprintf(fp, ",");
}
fprintf(fp, "%s", fields[col].name);
}
fputc('\n', fp);
int numOfRows = 0;
do {
int32_t* length = taos_fetch_lengths(tres);
for (int i = 0; i < num_fields; i++) {
if (i > 0) {
fputc(',', fp);
}
xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision);
}
fputc('\n', fp);
numOfRows++;
row = taos_fetch_row(tres);
} while( row != NULL);
fclose(fp);
return numOfRows;
}
static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
TAOS_RES * res;
TAOS_ROW row = NULL;
int count = 0;
res = taos_query(taos, "show databases;");
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run <show databases>, reason: %s\n", taos_errstr(res));
return -1;
}
TAOS_FIELD *fields = taos_fetch_fields(res);
while ((row = taos_fetch_row(res)) != NULL) {
// sys database name : 'log'
if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue;
dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
if (dbInfos[count] == NULL) {
fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count);
return -1;
}
strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI);
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
strncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
count++;
if (count > MAX_DATABASE_COUNT) {
fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT);
break;
}
}
return count;
}
static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) {
FILE *fp = NULL;
if (filename[0] != 0) {
fp = fopen(filename, "at");
if (fp == NULL) {
fprintf(stderr, "failed to open file: %s\n", filename);
return;
}
}
fprintf(fp, "================ database[%d] ================\n", index);
fprintf(fp, "name: %s\n", dbInfos->name);
fprintf(fp, "created_time: %s\n", dbInfos->create_time);
fprintf(fp, "ntables: %d\n", dbInfos->ntables);
fprintf(fp, "vgroups: %d\n", dbInfos->vgroups);
fprintf(fp, "replica: %d\n", dbInfos->replica);
fprintf(fp, "quorum: %d\n", dbInfos->quorum);
fprintf(fp, "days: %d\n", dbInfos->days);
fprintf(fp, "keep1,keep2,keep(D): %s\n", dbInfos->keeplist);
fprintf(fp, "cache(MB): %d\n", dbInfos->cache);
fprintf(fp, "blocks: %d\n", dbInfos->blocks);
fprintf(fp, "minrows: %d\n", dbInfos->minrows);
fprintf(fp, "maxrows: %d\n", dbInfos->maxrows);
fprintf(fp, "wallevel: %d\n", dbInfos->wallevel);
fprintf(fp, "fsync: %d\n", dbInfos->fsync);
fprintf(fp, "comp: %d\n", dbInfos->comp);
fprintf(fp, "cachelast: %d\n", dbInfos->cachelast);
fprintf(fp, "precision: %s\n", dbInfos->precision);
fprintf(fp, "update: %d\n", dbInfos->update);
fprintf(fp, "status: %s\n", dbInfos->status);
fprintf(fp, "\n");
fclose(fp);
}
static void printfQuerySystemInfo(TAOS * taos) {
char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
TAOS_RES* res;
time_t t;
struct tm* lt;
time(&t);
lt = localtime(&t);
snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec);
// show variables
res = taos_query(taos, "show variables;");
//getResult(res, filename);
xDumpResultToFile(filename, res);
// show dnodes
res = taos_query(taos, "show dnodes;");
xDumpResultToFile(filename, res);
//getResult(res, filename);
// show databases
res = taos_query(taos, "show databases;");
SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *));
if (dbInfos == NULL) {
fprintf(stderr, "failed to allocate memory\n");
return;
}
int dbCount = getDbFromServer(taos, dbInfos);
if (dbCount <= 0) return;
for (int i = 0; i < dbCount; i++) {
// printf database info
printfDbInfoForQueryToFile(filename, dbInfos[i], i);
// show db.vgroups
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
// show db.stables
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
res = taos_query(taos, buffer);
xDumpResultToFile(filename, res);
free(dbInfos[i]);
}
free(dbInfos);
}
#ifdef TD_LOWA_CURL
static size_t responseCallback(void *contents, size_t size, size_t nmemb, void *userp)
{
@ -4134,7 +4458,7 @@ void *subQueryProcess(void *sarg) {
int queryTestProcess() {
TAOS * taos = NULL;
taos_init();
taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port);
taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, NULL, g_queryInfo.port);
if (taos == NULL) {
fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
exit(-1);
@ -4147,6 +4471,8 @@ int queryTestProcess() {
printfQueryMeta();
printf("Press enter key to continue\n\n");
(void)getchar();
printfQuerySystemInfo(taos);
pthread_t *pids = NULL;
threadInfo *infos = NULL;

View File

@ -679,7 +679,7 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
pShow->pIter = mnodeGetNextDb(pShow->pIter, &pDb);
if (pDb == NULL) break;
if (pDb->pAcct != pUser->pAcct) {
if (pDb->pAcct != pUser->pAcct || pDb->status != TSDB_DB_STATUS_READY) {
mnodeDecDbRef(pDb);
continue;
}

View File

@ -1081,20 +1081,13 @@ static int32_t mnodeDropSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
SSTableObj *pTable = (SSTableObj *)pMsg->pTable;
if (code != TSDB_CODE_SUCCESS) {
mError("msg:%p, app:%p stable:%s, failed to drop, sdb error", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId);
} else {
mLInfo("msg:%p, app:%p stable:%s, is dropped from sdb", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId);
return code;
}
return code;
}
static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
mLInfo("msg:%p, app:%p stable:%s, is dropped from sdb", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId);
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mInfo("msg:%p, app:%p stable:%s will be dropped, hash:%p sizeOfVgList:%d", pMsg, pMsg->rpcMsg.ahandle,
pStable->info.tableId, pStable->vgHash, taosHashGetSize(pStable->vgHash));
if (pStable->vgHash != NULL /*pStable->numOfTables != 0*/) {
int32_t *pVgId = taosHashIterate(pStable->vgHash, NULL);
while (pVgId) {
@ -1122,6 +1115,16 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
mnodeDropAllChildTablesInStable(pStable);
}
return TSDB_CODE_SUCCESS;
}
static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
mInfo("msg:%p, app:%p stable:%s will be dropped, hash:%p sizeOfVgList:%d", pMsg, pMsg->rpcMsg.ahandle,
pStable->info.tableId, pStable->vgHash, taosHashGetSize(pStable->vgHash));
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pTable = tsSuperTableSdb,
@ -1461,9 +1464,9 @@ static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow,
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
SSchema tbnameSchema = tGetTableNameColumnSchema();
pShow->bytes[cols] = tbnameSchema.bytes;
pSchema[cols].type = tbnameSchema.type;
SSchema* tbnameSchema = tGetTbnameColumnSchema();
pShow->bytes[cols] = tbnameSchema->bytes;
pSchema[cols].type = tbnameSchema->type;
strcpy(pSchema[cols].name, "name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@ -2821,9 +2824,9 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
SSchema s = tGetTableNameColumnSchema();
pShow->bytes[cols] = s.bytes;
pSchema[cols].type = s.type;
SSchema* s = tGetTbnameColumnSchema();
pShow->bytes[cols] = s->bytes;
pSchema[cols].type = s->type;
strcpy(pSchema[cols].name, "table_name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@ -2840,9 +2843,9 @@ static int32_t mnodeGetShowTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
SSchema tbCol = tGetTableNameColumnSchema();
pShow->bytes[cols] = tbCol.bytes + VARSTR_HEADER_SIZE;
pSchema[cols].type = tbCol.type;
SSchema* tbCol = tGetTbnameColumnSchema();
pShow->bytes[cols] = tbCol->bytes + VARSTR_HEADER_SIZE;
pSchema[cols].type = tbCol->type;
strcpy(pSchema[cols].name, "stable_name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
@ -3076,9 +3079,9 @@ static int32_t mnodeGetStreamTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, vo
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
SSchema tbnameColSchema = tGetTableNameColumnSchema();
pShow->bytes[cols] = tbnameColSchema.bytes;
pSchema[cols].type = tbnameColSchema.type;
SSchema* tbnameColSchema = tGetTbnameColumnSchema();
pShow->bytes[cols] = tbnameColSchema->bytes;
pSchema[cols].type = tbnameColSchema->type;
strcpy(pSchema[cols].name, "table_name");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;

View File

@ -85,6 +85,7 @@ extern "C" {
#define TAOS_OS_FUNC_STRING_STR2INT64
#define TAOS_OS_FUNC_SYSINFO
#define TAOS_OS_FUNC_TIMER
#define TAOS_OS_FUNC_SEMPHONE_PTHREAD
// specific
#define htobe64 htonll
@ -114,7 +115,7 @@ int64_t tsosStr2int64(char *str);
void taos_block_sigalrm(void);
#define TAOS_OS_DEF_EPOLL
#define TAOS_EPOLL_WAIT_TIME 500
#define TAOS_EPOLL_WAIT_TIME 500
typedef int32_t SOCKET;
typedef SOCKET EpollFd;
#define EpollClose(pollFd) epoll_close(pollFd)

View File

@ -26,10 +26,6 @@ extern "C" {
#endif
#endif
#ifndef STDERR_FILENO
#define STDERR_FILENO (2)
#endif
#define FD_VALID(x) ((x) > STDERR_FILENO)
#define FD_INITIALIZER ((int32_t)-1)

View File

@ -46,6 +46,8 @@
#include "msvcFcntl.h"
#include "msvcLibgen.h"
#include "msvcStdio.h"
#include "msvcUnistd.h"
#include "msvcLibgen.h"
#include "sys/msvcStat.h"
#include "sys/msvcTypes.h"
@ -144,7 +146,6 @@ typedef int (*__compar_fn_t)(const void *, const void *);
#define in_addr_t unsigned long
#define socklen_t int
#define htobe64 htonll
#define getpid _getpid
struct tm *localtime_r(const time_t *timep, struct tm *result);
char * strptime(const char *buf, const char *fmt, struct tm *tm);
@ -153,15 +154,8 @@ char * getpass(const char *prefix);
int flock(int fd, int option);
int fsync(int filedes);
char * strndup(const char *s, size_t n);
char * dirname(char *pszPathname);
int gettimeofday(struct timeval *ptv, void *pTimeZone);
// for access function in io.h
#define F_OK 00 //Existence only
#define W_OK 02 //Write - only
#define R_OK 04 //Read - only
#define X_OK 06 //Read and write
// for send function in tsocket.c
#define MSG_NOSIGNAL 0
#define SO_NO_CHECK 0x1234
@ -208,8 +202,6 @@ typedef struct {
int wordexp(char *words, wordexp_t *pwordexp, int flags);
void wordfree(wordexp_t *pwordexp);
char *realpath(char *path, char *resolved_path);
#define openlog(a, b, c)
#define closelog()
#define LOG_ERR 0

View File

@ -17,17 +17,29 @@
#include "os.h"
#include "tglobal.h"
static const char* expand_like_shell(const char *path) {
static __thread char buf[TSDB_FILENAME_LEN];
buf[0] = '\0';
wordexp_t we;
if (wordexp(path, &we, 0)) return "/tmp/taosd";
if (sizeof(buf)<=snprintf(buf, sizeof(buf), "%s", we.we_wordv[0])) return "/tmp/taosd";
wordfree(&we);
return buf;
}
void osInit() {
if (configDir[0] == 0) {
strcpy(configDir, "~/TDengine/cfg");
strcpy(configDir, expand_like_shell("~/TDengine/cfg"));
}
strcpy(tsVnodeDir, "");
strcpy(tsDnodeDir, "");
strcpy(tsMnodeDir, "");
strcpy(tsDataDir, "~/TDengine/data");
strcpy(tsLogDir, "~/TDengine/log");
strcpy(tsScriptDir, "~/TDengine/cfg");
strcpy(tsDataDir, expand_like_shell("~/TDengine/data"));
strcpy(tsLogDir, expand_like_shell("~/TDengine/log"));
strcpy(tsScriptDir, expand_like_shell("~/TDengine/cfg"));
strcpy(tsOsName, "Darwin");
}

View File

@ -17,71 +17,49 @@
#include "os.h"
#include "tulog.h"
#define _SEND_FILE_STEP_ 1000
int64_t taosFSendFile(FILE *out_file, FILE *in_file, int64_t *offset, int64_t count) {
fseek(in_file, (int32_t)(*offset), 0);
int writeLen = 0;
uint8_t buffer[_SEND_FILE_STEP_] = {0};
for (int len = 0; len < (count - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) {
size_t rlen = fread(buffer, 1, _SEND_FILE_STEP_, in_file);
if (rlen <= 0) {
return writeLen;
} else if (rlen < _SEND_FILE_STEP_) {
fwrite(buffer, 1, rlen, out_file);
return (int)(writeLen + rlen);
} else {
fwrite(buffer, 1, _SEND_FILE_STEP_, in_file);
writeLen += _SEND_FILE_STEP_;
}
int r = 0;
if (offset) {
r = fseek(in_file, *offset, SEEK_SET);
if (r==-1) return -1;
}
int remain = count - writeLen;
if (remain > 0) {
size_t rlen = fread(buffer, 1, remain, in_file);
if (rlen <= 0) {
return writeLen;
} else {
fwrite(buffer, 1, remain, out_file);
writeLen += remain;
off_t len = count;
while (len>0) {
char buf[1024*16];
off_t n = sizeof(buf);
if (len<n) n = len;
size_t m = fread(buf, 1, n, in_file);
if (m<n) {
int e = ferror(in_file);
if (e) return -1;
}
if (m==0) break;
if (m!=fwrite(buf, 1, m, out_file)) {
return -1;
}
len -= m;
}
return writeLen;
return count - len;
}
int64_t taosSendFile(SOCKET dfd, int32_t sfd, int64_t* offset, int64_t count) {
lseek(sfd, (int32_t)(*offset), 0);
int64_t writeLen = 0;
uint8_t buffer[_SEND_FILE_STEP_] = { 0 };
for (int64_t len = 0; len < (count - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) {
int32_t rlen = (int32_t)read(sfd, buffer, _SEND_FILE_STEP_);
if (rlen <= 0) {
return writeLen;
}
else if (rlen < _SEND_FILE_STEP_) {
taosWriteSocket(dfd, buffer, rlen);
return (int64_t)(writeLen + rlen);
}
else {
taosWriteSocket(dfd, buffer, _SEND_FILE_STEP_);
writeLen += _SEND_FILE_STEP_;
}
int r = 0;
if (offset) {
r = lseek(sfd, *offset, SEEK_SET);
if (r==-1) return -1;
}
int64_t remain = count - writeLen;
if (remain > 0) {
int32_t rlen = read(sfd, buffer, (int32_t)remain);
if (rlen <= 0) {
return writeLen;
}
else {
taosWriteSocket(sfd, buffer, (int32_t)remain);
writeLen += remain;
}
off_t len = count;
while (len>0) {
char buf[1024*16];
off_t n = sizeof(buf);
if (len<n) n = len;
size_t m = read(sfd, buf, n);
if (m==-1) return -1;
if (m==0) break;
size_t l = write(dfd, buf, m);
if (l==-1) return -1;
len -= l;
}
return writeLen;
return count - len;
}

View File

@ -21,6 +21,8 @@
#define _DEFAULT_SOURCE
#include "os.h"
#include <libproc.h>
// #define SEM_USE_PTHREAD
// #define SEM_USE_POSIX
#define SEM_USE_SEM
@ -279,3 +281,41 @@ int tsem_destroy(tsem_t *sem) {
return 0;
}
bool taosCheckPthreadValid(pthread_t thread) {
uint64_t id = 0;
int r = pthread_threadid_np(thread, &id);
return r ? false : true;
}
int64_t taosGetSelfPthreadId() {
return (int64_t)pthread_self();
}
int64_t taosGetPthreadId(pthread_t thread) {
return (int64_t)thread;
}
void taosResetPthread(pthread_t* thread) {
*thread = NULL;
}
bool taosComparePthread(pthread_t first, pthread_t second) {
return pthread_equal(first, second) ? true : false;
}
int32_t taosGetPId() {
return (int32_t)getpid();
}
int32_t taosGetCurrentAPPName(char* name, int32_t* len) {
char buf[PATH_MAX+1];
buf[0] = '\0';
proc_name(getpid(), buf, sizeof(buf)-1);
buf[PATH_MAX] = '\0';
size_t n = strlen(buf);
if (len) *len = n;
if (name) strcpy(name, buf);
return 0;
}

View File

@ -24,42 +24,134 @@
static void taosGetSystemTimezone() {
// get and set default timezone
SGlobalCfg *cfg_timezone = taosGetConfigOption("timezone");
if (cfg_timezone && cfg_timezone->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) {
char *tz = getenv("TZ");
if (tz == NULL || strlen(tz) == 0) {
strcpy(tsTimezone, "not configured");
}
else {
strcpy(tsTimezone, tz);
}
cfg_timezone->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT;
uInfo("timezone not configured, use default");
if (cfg_timezone == NULL) return;
if (cfg_timezone->cfgStatus >= TAOS_CFG_CSTATUS_DEFAULT) {
return;
}
/* load time zone string from /etc/localtime */
char buf[4096];
char *tz = NULL; {
int n = readlink("/etc/localtime", buf, sizeof(buf));
if (n<0) {
uError("read /etc/localtime error, reason:%s", strerror(errno));
return;
}
buf[n] = '\0';
for(int i=n-1; i>=0; --i) {
if (buf[i]=='/') {
if (tz) {
tz = buf + i + 1;
break;
}
tz = buf + i + 1;
}
}
if (!tz || 0==strchr(tz, '/')) {
uError("parsing /etc/localtime failed");
return;
}
setenv("TZ", tz, 1);
tzset();
}
/*
* NOTE: do not remove it.
* Enforce set the correct daylight saving time(DST) flag according
* to current time
*/
time_t tx1 = time(NULL);
struct tm tm1;
localtime_r(&tx1, &tm1);
/*
* format example:
*
* Asia/Shanghai (CST, +0800)
* Europe/London (BST, +0100)
*/
snprintf(tsTimezone, TSDB_TIMEZONE_LEN, "%s (%s, %+03ld00)",
tz, tm1.tm_isdst ? tzname[daylight] : tzname[0], -timezone/3600);
// cfg_timezone->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT;
uWarn("timezone not configured, set to system default:%s", tsTimezone);
}
static void taosGetSystemLocale() {
// get and set default locale
/*
* originally from src/os/src/detail/osSysinfo.c
* POSIX format locale string:
* (Language Strings)_(Country/Region Strings).(code_page)
*
* example: en_US.UTF-8, zh_CN.GB18030, zh_CN.UTF-8,
*
* if user does not specify the locale in taos.cfg the program use default LC_CTYPE as system locale.
*
* In case of some CentOS systems, their default locale is "en_US.utf8", which is not valid code_page
* for libiconv that is employed to convert string in this system. This program will automatically use
* UTF-8 instead as the charset.
*
* In case of windows client, the locale string is not valid POSIX format, user needs to set the
* correct code_page for libiconv. Usually, the code_page of windows system with simple chinese is
* CP936, CP437 for English charset.
*
*/
static void taosGetSystemLocale() { // get and set default locale
char sep = '.';
char *locale = NULL;
SGlobalCfg *cfg_locale = taosGetConfigOption("locale");
if (cfg_locale && cfg_locale->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) {
char *locale = setlocale(LC_CTYPE, "chs");
if (locale != NULL) {
locale = setlocale(LC_CTYPE, "");
if (locale == NULL) {
uError("can't get locale from system, set it to en_US.UTF-8 since error:%d:%s", errno, strerror(errno));
strcpy(tsLocale, "en_US.UTF-8");
} else {
tstrncpy(tsLocale, locale, TSDB_LOCALE_LEN);
cfg_locale->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT;
uInfo("locale not configured, set to default:%s", tsLocale);
uWarn("locale not configured, set to system default:%s", tsLocale);
}
}
/* if user does not specify the charset, extract it from locale */
SGlobalCfg *cfg_charset = taosGetConfigOption("charset");
if (cfg_charset && cfg_charset->cfgStatus < TAOS_CFG_CSTATUS_DEFAULT) {
strcpy(tsCharset, "cp936");
cfg_charset->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT;
uInfo("charset not configured, set to default:%s", tsCharset);
char *str = strrchr(tsLocale, sep);
if (str != NULL) {
str++;
char *revisedCharset = taosCharsetReplace(str);
tstrncpy(tsCharset, revisedCharset, TSDB_LOCALE_LEN);
free(revisedCharset);
uWarn("charset not configured, set to system default:%s", tsCharset);
} else {
strcpy(tsCharset, "UTF-8");
uWarn("can't get locale and charset from system, set it to UTF-8");
}
}
}
void taosPrintOsInfo() {}
void taosPrintOsInfo() {
uInfo(" os pageSize: %" PRId64 "(KB)", tsPageSize / 1024);
// uInfo(" os openMax: %" PRId64, tsOpenMax);
// uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
if (uname(&buf)) {
uInfo(" can't fetch os info");
return;
}
uInfo(" os sysname: %s", buf.sysname);
uInfo(" os nodename: %s", buf.nodename);
uInfo(" os release: %s", buf.release);
uInfo(" os version: %s", buf.version);
uInfo(" os machine: %s", buf.machine);
uInfo("==================================");
}
void taosKillSystem() {
uError("function taosKillSystem, exit!");
@ -67,6 +159,22 @@ void taosKillSystem() {
}
void taosGetSystemInfo() {
// taosGetProcInfos();
tsNumOfCores = sysconf(_SC_NPROCESSORS_ONLN);
long physical_pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGESIZE);
tsTotalMemoryMB = physical_pages * page_size / (1024 * 1024);
tsPageSize = page_size;
// float tmp1, tmp2;
// taosGetSysMemory(&tmp1);
// taosGetProcMemory(&tmp2);
// taosGetDisk();
// taosGetBandSpeed(&tmp1);
// taosGetCpuUsage(&tmp1, &tmp2);
// taosGetProcIO(&tmp1, &tmp2);
taosGetSystemTimezone();
taosGetSystemLocale();
}
@ -121,7 +229,6 @@ int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) {
char cmdline[1024];
char *taosGetCmdlineByPID(int pid) {
errno = 0;
if (proc_pidpath(pid, cmdline, sizeof(cmdline)) <= 0) {
@ -136,6 +243,7 @@ bool taosGetSystemUid(char *uid) {
uuid_t uuid = {0};
uuid_generate(uuid);
// it's caller's responsibility to make enough space for `uid`, that's 36-char + 1-null
uuid_unparse(uuid, uid);
uuid_unparse_lower(uuid, uid);
return true;
}

View File

@ -142,6 +142,8 @@ int64_t taosCopy(char *from, char *to) {
if (bytes < sizeof(buffer)) break;
}
fsync(fidto);
close(fidfrom);
close(fidto);
return size;

View File

@ -62,4 +62,4 @@ int32_t taosGetCurrentAPPName(char *name, int32_t* len) {
return 0;
}
#endif
#endif

View File

@ -75,18 +75,6 @@ char *getpass(const char *prefix) {
return passwd;
}
char *strndup(const char *s, size_t n) {
size_t len = strlen(s);
if (len >= n) {
len = n;
}
char *r = calloc(len + 1, 1);
memcpy(r, s, len);
r[len] = 0;
return r;
}
int twcslen(const wchar_t *wcs) {
int *wstr = (int *)wcs;
if (NULL == wstr) {

View File

@ -38,7 +38,3 @@ int wordexp(char *words, wordexp_t *pwordexp, int flags) {
}
void wordfree(wordexp_t *pwordexp) {}
char *realpath(char *path, char *resolved_path) {
return _fullpath(path, resolved_path, TSDB_FILENAME_LEN - 1);
}

View File

@ -92,6 +92,11 @@ void httpStopSystem() {
tsHttpServer.stop = 1;
#ifdef WINDOWS
closesocket(tsHttpServer.fd);
#elif __APPLE__
if (tsHttpServer.fd!=-1) {
close(tsHttpServer.fd);
tsHttpServer.fd = -1;
}
#else
shutdown(tsHttpServer.fd, SHUT_RD);
#endif

View File

@ -59,25 +59,27 @@ extern "C" {
#define TSDB_FUNC_FIRST_DST 25
#define TSDB_FUNC_LAST_DST 26
#define TSDB_FUNC_INTERP 27
#define TSDB_FUNC_STDDEV_DST 27
#define TSDB_FUNC_INTERP 28
#define TSDB_FUNC_RATE 28
#define TSDB_FUNC_IRATE 29
#define TSDB_FUNC_SUM_RATE 30
#define TSDB_FUNC_SUM_IRATE 31
#define TSDB_FUNC_AVG_RATE 32
#define TSDB_FUNC_AVG_IRATE 33
#define TSDB_FUNC_RATE 29
#define TSDB_FUNC_IRATE 30
#define TSDB_FUNC_SUM_RATE 31
#define TSDB_FUNC_SUM_IRATE 32
#define TSDB_FUNC_AVG_RATE 33
#define TSDB_FUNC_AVG_IRATE 34
#define TSDB_FUNC_TID_TAG 35
#define TSDB_FUNC_HISTOGRAM 36
#define TSDB_FUNC_HLL 37
#define TSDB_FUNC_MODE 38
#define TSDB_FUNC_SAMPLE 39
#define TSDB_FUNC_CEIL 40
#define TSDB_FUNC_FLOOR 41
#define TSDB_FUNC_ROUND 42
#define TSDB_FUNC_MAVG 43
#define TSDB_FUNC_CSUM 44
#define TSDB_FUNC_TID_TAG 34
#define TSDB_FUNC_HISTOGRAM 35
#define TSDB_FUNC_HLL 36
#define TSDB_FUNC_MODE 37
#define TSDB_FUNC_SAMPLE 38
#define TSDB_FUNC_CEIL 39
#define TSDB_FUNC_FLOOR 40
#define TSDB_FUNC_ROUND 41
#define TSDB_FUNC_MAVG 42
#define TSDB_FUNC_CSUM 43
#define TSDB_FUNCSTATE_SO 0x1u // single output
#define TSDB_FUNCSTATE_MO 0x2u // dynamic number of output, not multinumber of output e.g., TOP/BOTTOM
@ -90,15 +92,12 @@ extern "C" {
#define TSDB_BASE_FUNC_SO TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF
#define TSDB_BASE_FUNC_MO TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_OF
#define TSDB_FUNCTIONS_NAME_MAX_LENGTH 16
#define TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE 50
#define DATA_SET_FLAG ',' // to denote the output area has data, not null value
#define DATA_SET_FLAG_SIZE sizeof(DATA_SET_FLAG)
#define QUERY_ASC_FORWARD_STEP 1
#define QUERY_DESC_FORWARD_STEP -1
@ -167,8 +166,9 @@ typedef struct SExtTagsInfo {
// sql function runtime context
typedef struct SQLFunctionCtx {
int32_t startOffset;
int32_t startOffset; // todo remove it
int32_t size; // number of rows
void * pInput; //
uint32_t order; // asc|desc
int16_t inputType;
int16_t inputBytes;
@ -177,13 +177,12 @@ typedef struct SQLFunctionCtx {
int16_t outputBytes; // size of results, determined by function and input column data type
int32_t interBufBytes; // internal buffer size
bool hasNull; // null value exist in current block
bool requireNull; // require null in some function
bool requireNull; // require null in some function
bool stableQuery;
int16_t functionId; // function id
void * aInputElemBuf;
char * aOutputBuf; // final result output buffer, point to sdata->data
uint8_t currentStage; // record current running step, default: 0
int64_t nStartQueryTimestamp; // timestamp range of current query when function is executed on a specific data block
int16_t functionId; // function id
char * pOutput; // final result output buffer, point to sdata->data
uint8_t currentStage; // record current running step, default: 0
int64_t startTs; // timestamp range of current query when function is executed on a specific data block
int32_t numOfParams;
tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param */
int64_t *ptsList; // corresponding timestamp array list
@ -198,17 +197,16 @@ typedef struct SQLFunctionCtx {
SPoint1 end;
} SQLFunctionCtx;
typedef struct SQLAggFuncElem {
char aName[TSDB_FUNCTIONS_NAME_MAX_LENGTH];
uint8_t nAggIdx; // index of function in aAggs
typedef struct SAggFunctionInfo {
char name[TSDB_FUNCTIONS_NAME_MAX_LENGTH];
uint8_t index; // index of function in aAggs
int8_t stableFuncId; // transfer function for super table query
uint16_t nStatus;
uint16_t status;
bool (*init)(SQLFunctionCtx *pCtx); // setup the execute environment
void (*xFunction)(SQLFunctionCtx *pCtx); // blocks version function
void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version
void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function
// some sql function require scan data twice or more, e.g.,stddev, percentile
void (*xNextStep)(SQLFunctionCtx *pCtx);
@ -218,7 +216,7 @@ typedef struct SQLAggFuncElem {
void (*mergeFunc)(SQLFunctionCtx *pCtx);
int32_t (*dataReqFunc)(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId);
} SQLAggFuncElem;
} SAggFunctionInfo;
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
@ -246,7 +244,7 @@ typedef struct STwaInfo {
} STwaInfo;
/* global sql function array */
extern struct SQLAggFuncElem aAggs[];
extern struct SAggFunctionInfo aAggs[];
extern int32_t functionCompatList[]; // compatible check array list

View File

@ -143,6 +143,11 @@ typedef struct {
int64_t ts;
} SOrderedPrjQueryInfo;
typedef struct {
char* tags;
SArray* pResult; // SArray<SStddevInterResult>
} SInterResult;
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
@ -152,9 +157,14 @@ typedef struct SQuery {
int16_t precision;
int16_t numOfOutput;
int16_t fillType;
int16_t checkResultBuf; // check if the buffer is full during scan each block
int16_t checkResultBuf; // check if the buffer is full during scan each block
SLimitVal limit;
int32_t rowSize;
int32_t srcRowSize; // todo extract struct
int32_t resultRowSize;
int32_t maxSrcColumnSize;
int32_t tagLen; // tag value length of current query
SSqlGroupbyExpr* pGroupbyExpr;
SExprInfo* pExpr1;
SExprInfo* pExpr2;
@ -184,14 +194,13 @@ typedef struct SQueryRuntimeEnv {
uint16_t scanFlag; // denotes reversed scan of data or not
SFillInfo* pFillInfo;
SResultRowInfo windowResInfo;
STSBuf* pTsBuf;
STSCursor cur;
SQueryCostInfo summary;
void* pQueryHandle;
void* pSecQueryHandle; // another thread for
bool stableQuery; // super table query or not
bool topBotQuery; // TODO used bitwise flag
bool groupbyColumn; // denote if this is a groupby normal column query
bool groupbyColumn; // denote if this is a groupby normal column query
bool hasTagResults; // if there are tag values in final result or not
bool timeWindowInterpo;// if the time window start/end required interpolation
bool queryWindowIdentical; // all query time windows are identical for all tables in one group
@ -205,8 +214,12 @@ typedef struct SQueryRuntimeEnv {
int32_t* rowCellInfoOffset;// offset value for each row result cell info
char** prevRow;
char** nextRow;
SArray* prevResult; // intermediate result, SArray<SInterResult>
STSBuf* pTsBuf; // timestamp filter list
STSCursor cur;
char* tagVal; // tag value of current data block
SArithmeticSupport *sasArray;
} SQueryRuntimeEnv;

View File

@ -68,7 +68,7 @@ typedef struct SPoint {
void * val;
} SPoint;
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType,
SFillColInfo* pFillCol, void* handle);
@ -78,7 +78,7 @@ void* taosDestroyFillInfo(SFillInfo *pFillInfo);
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput);
void taosFillSetDataBlockFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput);
void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* pInput);

View File

@ -15,6 +15,8 @@
#ifndef TDENGINE_QUERYUTIL_H
#define TDENGINE_QUERYUTIL_H
#include "tbuffer.h"
#define SET_RES_WINDOW_KEY(_k, _ori, _len, _uid) \
do { \
assert(sizeof(_uid) == sizeof(uint64_t)); \
@ -74,5 +76,13 @@ int32_t getNumOfUsedResultRows(SResultRowPool* p);
bool isPointInterpoQuery(SQuery *pQuery);
typedef struct {
SArray* pResult; // SArray<SResPair>
int32_t colId;
} SStddevInterResult;
void interResToBinary(SBufferWriter* bw, SArray* pRes, int32_t tagLen);
SArray* interResFromBinary(const char* data, int32_t len);
void freeInterResult(void* param);
#endif // TDENGINE_QUERYUTIL_H

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -321,7 +321,7 @@ static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) {
return pFillInfo->numOfRows - pFillInfo->index;
}
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType,
SFillColInfo* pCol, void* handle) {
if (fillType == TSDB_FILL_NONE) {
@ -414,7 +414,7 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey)
}
// copy the data into source data buffer
void taosFillCopyInputDataFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput) {
void taosFillSetDataBlockFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput) {
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
memcpy(pFillInfo->pData[i], pInput[i]->data, pFillInfo->numOfRows * pFillInfo->pFillCol[i].col.bytes);
}

View File

@ -279,7 +279,7 @@ tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
pExpr->nSQLOptr = optrType;
pExpr->pLeft = pLeft;
if (pRight == NULL) {
if (pLeft != NULL && pRight == NULL) {
pRight = calloc(1, sizeof(tSQLExpr));
}

View File

@ -19,6 +19,7 @@
#include "qExecutor.h"
#include "qUtil.h"
#include "tbuffer.h"
int32_t getOutputInterResultBufSize(SQuery* pQuery) {
int32_t size = 0;
@ -228,4 +229,97 @@ void* destroyResultRowPool(SResultRowPool* p) {
tfree(p);
return NULL;
}
void interResToBinary(SBufferWriter* bw, SArray* pRes, int32_t tagLen) {
uint32_t numOfGroup = (uint32_t) taosArrayGetSize(pRes);
tbufWriteUint32(bw, numOfGroup);
tbufWriteUint16(bw, tagLen);
for(int32_t i = 0; i < numOfGroup; ++i) {
SInterResult* pOne = taosArrayGet(pRes, i);
if (tagLen > 0) {
tbufWriteBinary(bw, pOne->tags, tagLen);
}
uint32_t numOfCols = (uint32_t) taosArrayGetSize(pOne->pResult);
tbufWriteUint32(bw, numOfCols);
for(int32_t j = 0; j < numOfCols; ++j) {
SStddevInterResult* p = taosArrayGet(pOne->pResult, j);
uint32_t numOfRows = (uint32_t) taosArrayGetSize(p->pResult);
tbufWriteUint16(bw, p->colId);
tbufWriteUint32(bw, numOfRows);
for(int32_t k = 0; k < numOfRows; ++k) {
SResPair v = *(SResPair*) taosArrayGet(p->pResult, k);
tbufWriteDouble(bw, v.avg);
tbufWriteInt64(bw, v.key);
}
}
}
}
SArray* interResFromBinary(const char* data, int32_t len) {
SBufferReader br = tbufInitReader(data, len, false);
uint32_t numOfGroup = tbufReadUint32(&br);
uint16_t tagLen = tbufReadUint16(&br);
char* tag = NULL;
if (tagLen > 0) {
tag = calloc(1, tagLen);
}
SArray* pResult = taosArrayInit(4, sizeof(SInterResult));
for(int32_t i = 0; i < numOfGroup; ++i) {
if (tagLen > 0) {
memset(tag, 0, tagLen);
tbufReadToBinary(&br, tag, tagLen);
}
uint32_t numOfCols = tbufReadUint32(&br);
SArray* p = taosArrayInit(numOfCols, sizeof(SStddevInterResult));
for(int32_t j = 0; j < numOfCols; ++j) {
int16_t colId = tbufReadUint16(&br);
int32_t numOfRows = tbufReadUint32(&br);
SStddevInterResult interRes = {.colId = colId, .pResult = taosArrayInit(4, sizeof(struct SResPair)),};
for(int32_t k = 0; k < numOfRows; ++k) {
SResPair px = {0};
px.avg = tbufReadDouble(&br);
px.key = tbufReadInt64(&br);
taosArrayPush(interRes.pResult, &px);
}
taosArrayPush(p, &interRes);
}
char* p1 = NULL;
if (tagLen > 0) {
p1 = malloc(tagLen);
memcpy(p1, tag, tagLen);
}
SInterResult d = {.pResult = p, .tags = p1,};
taosArrayPush(pResult, &d);
}
tfree(tag);
return pResult;
}
void freeInterResult(void* param) {
SInterResult* pResult = (SInterResult*) param;
tfree(pResult->tags);
int32_t numOfCols = (int32_t) taosArrayGetSize(pResult->pResult);
for(int32_t i = 0; i < numOfCols; ++i) {
SStddevInterResult *p = taosArrayGet(pResult->pResult, i);
taosArrayDestroy(p->pResult);
}
taosArrayDestroy(pResult->pResult);
}

View File

@ -17,7 +17,7 @@
#include "tsocket.h"
#include "tutil.h"
#include "taosdef.h"
#include "taoserror.h"
#include "taoserror.h"
#include "rpcLog.h"
#include "rpcHead.h"
#include "rpcTcp.h"
@ -81,7 +81,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pServerObj = (SServerObj *)calloc(sizeof(SServerObj), 1);
if (pServerObj == NULL) {
tError("TCP:%s no enough memory", label);
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
@ -95,7 +95,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pServerObj->pThreadObj = (SThreadObj **)calloc(sizeof(SThreadObj *), numOfThreads);
if (pServerObj->pThreadObj == NULL) {
tError("TCP:%s no enough memory", label);
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TAOS_SYSTEM_ERROR(errno);
free(pServerObj);
return NULL;
}
@ -105,18 +105,18 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
// initialize parameters in case it may encounter error later
// initialize parameters in case it may encounter error later
for (int i = 0; i < numOfThreads; ++i) {
pThreadObj = (SThreadObj *)calloc(sizeof(SThreadObj), 1);
if (pThreadObj == NULL) {
tError("TCP:%s no enough memory", label);
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TAOS_SYSTEM_ERROR(errno);
for (int j=0; j<i; ++j) free(pServerObj->pThreadObj[j]);
free(pServerObj->pThreadObj);
free(pServerObj);
return NULL;
}
pServerObj->pThreadObj[i] = pThreadObj;
pThreadObj->pollFd = -1;
taosResetPthread(&pThreadObj->thread);
@ -152,9 +152,9 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
}
pServerObj->fd = taosOpenTcpServerSocket(pServerObj->ip, pServerObj->port);
if (pServerObj->fd < 0) code = -1;
if (pServerObj->fd < 0) code = -1;
if (code == 0) {
if (code == 0) {
code = pthread_create(&pServerObj->thread, &thattr, taosAcceptTcpConnection, (void *)pServerObj);
if (code != 0) {
tError("%s failed to create TCP accept thread(%s)", label, strerror(code));
@ -162,7 +162,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
}
if (code != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TAOS_SYSTEM_ERROR(errno);
taosCleanUpTcpServer(pServerObj);
pServerObj = NULL;
} else {
@ -175,8 +175,8 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
static void taosStopTcpThread(SThreadObj* pThreadObj) {
if (pThreadObj == NULL) { return;}
// save thread into local variable and signal thread to stop
pthread_t thread = pThreadObj->thread;
// save thread into local variable and signal thread to stop
pthread_t thread = pThreadObj->thread;
if (!taosCheckPthreadValid(thread)) {
return;
}
@ -197,6 +197,11 @@ void taosStopTcpServer(void *handle) {
if (pServerObj->fd >= 0) {
#ifdef WINDOWS
closesocket(pServerObj->fd);
#elif defined(__APPLE__)
if (pServerObj->fd!=-1) {
close(pServerObj->fd);
pServerObj->fd = -1;
}
#else
shutdown(pServerObj->fd, SHUT_RD);
#endif
@ -265,7 +270,7 @@ static void *taosAcceptTcpConnection(void *arg) {
taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port));
continue;
}
// pick up the thread to handle this connection
pThreadObj = pServerObj->pThreadObj[threadId];
@ -274,13 +279,13 @@ static void *taosAcceptTcpConnection(void *arg) {
if (pFdObj) {
pFdObj->ip = caddr.sin_addr.s_addr;
pFdObj->port = htons(caddr.sin_port);
tDebug("%s new TCP connection from %s:%hu, fd:%d FD:%p numOfFds:%d", pServerObj->label,
tDebug("%s new TCP connection from %s:%hu, fd:%d FD:%p numOfFds:%d", pServerObj->label,
taosInetNtoa(caddr.sin_addr), pFdObj->port, connFd, pFdObj, pThreadObj->numOfFds);
} else {
taosCloseSocket(connFd);
tError("%s failed to malloc FdObj(%s) for connection from:%s:%hu", pServerObj->label, strerror(errno),
taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port));
}
}
// pick up next thread for next connection
threadId++;
@ -295,17 +300,17 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int numOfThread
SClientObj *pClientObj = (SClientObj *)calloc(1, sizeof(SClientObj));
if (pClientObj == NULL) {
tError("TCP:%s no enough memory", label);
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
}
tstrncpy(pClientObj->label, label, sizeof(pClientObj->label));
pClientObj->numOfThreads = numOfThreads;
pClientObj->pThreadObj = (SThreadObj **)calloc(numOfThreads, sizeof(SThreadObj*));
pClientObj->pThreadObj = (SThreadObj **)calloc(numOfThreads, sizeof(SThreadObj*));
if (pClientObj->pThreadObj == NULL) {
tError("TCP:%s no enough memory", label);
tfree(pClientObj);
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TAOS_SYSTEM_ERROR(errno);
}
int code = 0;
@ -317,7 +322,7 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int numOfThread
SThreadObj *pThreadObj = (SThreadObj *)calloc(1, sizeof(SThreadObj));
if (pThreadObj == NULL) {
tError("TCP:%s no enough memory", label);
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TAOS_SYSTEM_ERROR(errno);
for (int j=0; j<i; ++j) free(pClientObj->pThreadObj[j]);
free(pClientObj);
pthread_attr_destroy(&thattr);
@ -356,11 +361,11 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int numOfThread
pThreadObj->threadId = i;
}
if (code != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TAOS_SYSTEM_ERROR(errno);
taosCleanUpTcpClient(pClientObj);
pClientObj = NULL;
}
return pClientObj;
}
return pClientObj;
}
void taosStopTcpClient(void *chandle) {
@ -374,20 +379,20 @@ void taosStopTcpClient(void *chandle) {
void taosCleanUpTcpClient(void *chandle) {
SClientObj *pClientObj = chandle;
if (pClientObj == NULL) return;
for (int i = 0; i < pClientObj->numOfThreads; ++i) {
for (int i = 0; i < pClientObj->numOfThreads; ++i) {
SThreadObj *pThreadObj= pClientObj->pThreadObj[i];
taosStopTcpThread(pThreadObj);
taosStopTcpThread(pThreadObj);
}
tDebug("%s TCP client is cleaned up", pClientObj->label);
tfree(pClientObj->pThreadObj);
tfree(pClientObj);
tfree(pClientObj);
}
void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uint16_t port) {
SClientObj * pClientObj = shandle;
int32_t index = atomic_load_32(&pClientObj->index) % pClientObj->numOfThreads;
atomic_store_32(&pClientObj->index, index + 1);
atomic_store_32(&pClientObj->index, index + 1);
SThreadObj *pThreadObj = pClientObj->pThreadObj[index];
SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip);
@ -402,12 +407,12 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
}
SFdObj *pFdObj = taosMallocFdObj(pThreadObj, fd);
if (pFdObj) {
pFdObj->thandle = thandle;
pFdObj->port = port;
pFdObj->ip = ip;
tDebug("%s %p TCP connection to 0x%x:%hu is created, localPort:%hu FD:%p numOfFds:%d",
tDebug("%s %p TCP connection to 0x%x:%hu is created, localPort:%hu FD:%p numOfFds:%d",
pThreadObj->label, thandle, ip, port, localPort, pFdObj, pThreadObj->numOfFds);
} else {
tError("%s failed to malloc client FdObj(%s)", pThreadObj->label, strerror(errno));
@ -422,7 +427,7 @@ void taosCloseTcpConnection(void *chandle) {
if (pFdObj == NULL || pFdObj->signature != pFdObj) return;
SThreadObj *pThreadObj = pFdObj->pThreadObj;
tDebug("%s %p TCP connection will be closed, FD:%p", pThreadObj->label, pFdObj->thandle, pFdObj);
tDebug("%s %p TCP connection will be closed, FD:%p", pThreadObj->label, pFdObj->thandle, pFdObj);
// pFdObj->thandle = NULL;
pFdObj->closedByApp = 1;
@ -435,7 +440,7 @@ int taosSendTcpData(uint32_t ip, uint16_t port, void *data, int len, void *chand
SThreadObj *pThreadObj = pFdObj->pThreadObj;
int ret = taosWriteMsg(pFdObj->fd, data, len);
tTrace("%s %p TCP data is sent, FD:%p fd:%d bytes:%d", pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, ret);
tTrace("%s %p TCP data is sent, FD:%p fd:%d bytes:%d", pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, ret);
return ret;
}
@ -458,7 +463,7 @@ static void taosReportBrokenLink(SFdObj *pFdObj) {
recvInfo.chandle = NULL;
recvInfo.connType = RPC_CONN_TCP;
(*(pThreadObj->processData))(&recvInfo);
}
}
taosFreeFdObj(pFdObj);
}
@ -473,7 +478,7 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) {
headLen = taosReadMsg(pFdObj->fd, &rpcHead, sizeof(SRpcHead));
if (headLen != sizeof(SRpcHead)) {
tDebug("%s %p read error, FD:%p headLen:%d", pThreadObj->label, pFdObj->thandle, pFdObj, headLen);
return -1;
return -1;
}
msgLen = (int32_t)htonl((uint32_t)rpcHead.msgLen);
@ -491,14 +496,14 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) {
retLen = taosReadMsg(pFdObj->fd, msg + headLen, leftLen);
if (leftLen != retLen) {
tError("%s %p read error, leftLen:%d retLen:%d FD:%p",
tError("%s %p read error, leftLen:%d retLen:%d FD:%p",
pThreadObj->label, pFdObj->thandle, leftLen, retLen, pFdObj);
free(buffer);
return -1;
}
memcpy(msg, &rpcHead, sizeof(SRpcHead));
pInfo->msg = msg;
pInfo->msgLen = msgLen;
pInfo->ip = pFdObj->ip;
@ -509,7 +514,7 @@ static int taosReadTcpData(SFdObj *pFdObj, SRecvInfo *pInfo) {
pInfo->connType = RPC_CONN_TCP;
if (pFdObj->closedByApp) {
free(buffer);
free(buffer);
return -1;
}
@ -557,7 +562,7 @@ static void *taosProcessTcpData(void *param) {
}
if (taosReadTcpData(pFdObj, &recvInfo) < 0) {
shutdown(pFdObj->fd, SHUT_WR);
shutdown(pFdObj->fd, SHUT_WR);
continue;
}
@ -565,7 +570,7 @@ static void *taosProcessTcpData(void *param) {
if (pFdObj->thandle == NULL) taosFreeFdObj(pFdObj);
}
if (pThreadObj->stop) break;
if (pThreadObj->stop) break;
}
if (pThreadObj->pollFd >=0) {
@ -603,7 +608,7 @@ static SFdObj *taosMallocFdObj(SThreadObj *pThreadObj, SOCKET fd) {
event.data.ptr = pFdObj;
if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) {
tfree(pFdObj);
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
@ -637,7 +642,7 @@ static void taosFreeFdObj(SFdObj *pFdObj) {
pThreadObj->numOfFds--;
if (pThreadObj->numOfFds < 0)
tError("%s %p TCP thread:%d, number of FDs is negative!!!",
tError("%s %p TCP thread:%d, number of FDs is negative!!!",
pThreadObj->label, pFdObj->thandle, pThreadObj->threadId);
if (pFdObj->prev) {
@ -652,7 +657,7 @@ static void taosFreeFdObj(SFdObj *pFdObj) {
pthread_mutex_unlock(&pThreadObj->mutex);
tDebug("%s %p TCP connection is closed, FD:%p fd:%d numOfFds:%d",
tDebug("%s %p TCP connection is closed, FD:%p fd:%d numOfFds:%d",
pThreadObj->label, pFdObj->thandle, pFdObj, pFdObj->fd, pThreadObj->numOfFds);
tfree(pFdObj);

View File

@ -103,6 +103,11 @@ void syncCloseTcpThreadPool(void *param) {
#ifdef WINDOWS
closesocket(pPool->acceptFd);
#elif defined(__APPLE__)
if (pPool->acceptFd!=-1) {
close(pPool->acceptFd);
pPool->acceptFd = -1;
}
#else
shutdown(pPool->acceptFd, SHUT_RD);
#endif

View File

@ -593,7 +593,7 @@ void taosGetDisk() {
tsAvailLogDirGB = (float)(diskSize.avail / unit);
}
if (taosGetDiskSize("/tmp", &diskSize) == 0) {
if (taosGetDiskSize(tsTempDir, &diskSize) == 0) {
tsTotalTmpDirGB = (float)(diskSize.tsize / unit);
tsAvailTmpDirectorySpace = (float)(diskSize.avail / unit);
}

View File

@ -20,6 +20,8 @@
#define TSDB_FILE_DELIMITER 0xF00AFA0F
#define TSDB_FILE_INIT_MAGIC 0xFFFFFFFF
#define TSDB_IVLD_FID INT_MIN
#define TSDB_FILE_STATE_OK 0
#define TSDB_FILE_STATE_BAD 1
#define TSDB_FILE_INFO(tf) (&((tf)->info))
#define TSDB_FILE_F(tf) (&((tf)->f))
@ -31,6 +33,10 @@
#define TSDB_FILE_LEVEL(tf) TFILE_LEVEL(TSDB_FILE_F(tf))
#define TSDB_FILE_ID(tf) TFILE_ID(TSDB_FILE_F(tf))
#define TSDB_FILE_FSYNC(tf) fsync(TSDB_FILE_FD(tf))
#define TSDB_FILE_STATE(tf) ((tf)->state)
#define TSDB_FILE_SET_STATE(tf, s) ((tf)->state = (s))
#define TSDB_FILE_IS_OK(tf) (TSDB_FILE_STATE(tf) == TSDB_FILE_STATE_OK)
#define TSDB_FILE_IS_BAD(tf) (TSDB_FILE_STATE(tf) == TSDB_FILE_STATE_BAD)
typedef enum { TSDB_FILE_HEAD = 0, TSDB_FILE_DATA, TSDB_FILE_LAST, TSDB_FILE_MAX, TSDB_FILE_META } TSDB_FILE_T;
@ -47,10 +53,11 @@ typedef struct {
SMFInfo info;
TFILE f;
int fd;
uint8_t state;
} SMFile;
void tsdbInitMFile(SMFile* pMFile, SDiskID did, int vid, uint32_t ver);
void tsdbInitMFileEx(SMFile* pMFile, SMFile* pOMFile);
void tsdbInitMFileEx(SMFile* pMFile, const SMFile* pOMFile);
int tsdbEncodeSMFile(void** buf, SMFile* pMFile);
void* tsdbDecodeSMFile(void* buf, SMFile* pMFile);
int tsdbEncodeSMFileEx(void** buf, SMFile* pMFile);
@ -165,6 +172,7 @@ typedef struct {
SDFInfo info;
TFILE f;
int fd;
uint8_t state;
} SDFile;
void tsdbInitDFile(SDFile* pDFile, SDiskID did, int vid, int fid, uint32_t ver, TSDB_FILE_T ftype);
@ -346,4 +354,14 @@ static FORCE_INLINE void tsdbGetFidKeyRange(int days, int8_t precision, int fid,
*maxKey = *minKey + days * tsMsPerDay[precision] - 1;
}
static FORCE_INLINE bool tsdbFSetIsOk(SDFileSet* pSet) {
for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) {
if (TSDB_FILE_IS_BAD(TSDB_DFILE_IN_SET(pSet, ftype))) {
return false;
}
}
return true;
}
#endif /* _TS_TSDB_FILE_H_ */

View File

@ -52,7 +52,7 @@ static int tsdbCommitMeta(STsdbRepo *pRepo);
static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen);
static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid);
static int tsdbCommitTSData(STsdbRepo *pRepo);
static int tsdbStartCommit(STsdbRepo *pRepo);
static void tsdbStartCommit(STsdbRepo *pRepo);
static void tsdbEndCommit(STsdbRepo *pRepo, int eno);
static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid);
static int tsdbCreateCommitIters(SCommitH *pCommith);
@ -84,10 +84,7 @@ static int tsdbApplyRtn(STsdbRepo *pRepo);
static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn);
void *tsdbCommitData(STsdbRepo *pRepo) {
if (tsdbStartCommit(pRepo) < 0) {
tsdbError("vgId:%d failed to commit data while startting to commit since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
tsdbStartCommit(pRepo);
// Commit to update meta file
if (tsdbCommitMeta(pRepo) < 0) {
@ -138,11 +135,15 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)));
if (tsdbCreateMFile(&mf, true) < 0) {
tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf));
} else {
tsdbInitMFileEx(&mf, pOMFile);
if (tsdbOpenMFile(&mf, O_WRONLY) < 0) {
tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
}
@ -154,12 +155,20 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
if (pAct->act == TSDB_UPDATE_META) {
pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj));
if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
tstrerror(terrno));
tsdbCloseMFile(&mf);
tsdbApplyMFileChange(&mf, pOMFile);
// TODO: need to reload metaCache
return -1;
}
} else if (pAct->act == TSDB_DROP_META) {
if (tsdbDropMetaRecord(pfs, &mf, pAct->uid) < 0) {
tsdbError("vgId:%d failed to drop META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
tstrerror(terrno));
tsdbCloseMFile(&mf);
tsdbApplyMFileChange(&mf, pOMFile);
// TODO: need to reload metaCache
return -1;
}
} else {
@ -168,6 +177,9 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
}
if (tsdbUpdateMFileHeader(&mf) < 0) {
tsdbError("vgId:%d failed to update META file header since %s, revert it", REPO_ID(pRepo), tstrerror(terrno));
tsdbApplyMFileChange(&mf, pOMFile);
// TODO: need to reload metaCache
return -1;
}
@ -208,6 +220,8 @@ void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn) {
pRtn->minFid = (int)(TSDB_KEY_FID(minKey, pCfg->daysPerFile, pCfg->precision));
pRtn->midFid = (int)(TSDB_KEY_FID(midKey, pCfg->daysPerFile, pCfg->precision));
pRtn->maxFid = (int)(TSDB_KEY_FID(maxKey, pCfg->daysPerFile, pCfg->precision));
tsdbDebug("vgId:%d now:%" PRId64 " minKey:%" PRId64 " minFid:%d, midFid:%d, maxFid:%d", REPO_ID(pRepo), now, minKey,
pRtn->minFid, pRtn->midFid, pRtn->maxFid);
}
static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen) {
@ -238,7 +252,7 @@ static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void
tsdbUpdateMFileMagic(pMFile, POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM)));
SKVRecord *pRecord = taosHashGet(pfs->metaCache, (void *)&uid, sizeof(uid));
if (pRecord != NULL) {
pMFile->info.tombSize += pRecord->size;
pMFile->info.tombSize += (pRecord->size + sizeof(SKVRecord));
} else {
pMFile->info.nRecords++;
}
@ -253,7 +267,7 @@ static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid) {
SKVRecord *pRecord = taosHashGet(pfs->metaCache, (void *)(&uid), sizeof(uid));
if (pRecord == NULL) {
tsdbError("failed to drop KV store record with key %" PRIu64 " since not find", uid);
tsdbError("failed to drop META record with key %" PRIu64 " since not find", uid);
return -1;
}
@ -264,11 +278,11 @@ static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid) {
void *pBuf = buf;
tsdbEncodeKVRecord(&pBuf, &rInfo);
if (tsdbAppendMFile(pMFile, buf, POINTER_DISTANCE(pBuf, buf), NULL) < 0) {
if (tsdbAppendMFile(pMFile, buf, sizeof(SKVRecord), NULL) < 0) {
return -1;
}
pMFile->info.magic = taosCalcChecksum(pMFile->info.magic, (uint8_t *)buf, (uint32_t)POINTER_DISTANCE(pBuf, buf));
pMFile->info.magic = taosCalcChecksum(pMFile->info.magic, (uint8_t *)buf, sizeof(SKVRecord));
pMFile->info.nDels++;
pMFile->info.nRecords--;
pMFile->info.tombSize += (rInfo.size + sizeof(SKVRecord) * 2);
@ -302,7 +316,12 @@ static int tsdbCommitTSData(STsdbRepo *pRepo) {
// Skip expired memory data and expired FSET
tsdbSeekCommitIter(&commith, commith.rtn.minKey);
while ((pSet = tsdbFSIterNext(&(commith.fsIter)))) {
if (pSet->fid >= commith.rtn.minFid) break;
if (pSet->fid < commith.rtn.minFid) {
tsdbInfo("vgId:%d FSET %d on level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid,
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet));
} else {
break;
}
}
// Loop to commit to each file
@ -349,7 +368,7 @@ static int tsdbCommitTSData(STsdbRepo *pRepo) {
return 0;
}
static int tsdbStartCommit(STsdbRepo *pRepo) {
static void tsdbStartCommit(STsdbRepo *pRepo) {
SMemTable *pMem = pRepo->imem;
ASSERT(pMem->numOfRows > 0 || listNEles(pMem->actList) > 0);
@ -360,7 +379,6 @@ static int tsdbStartCommit(STsdbRepo *pRepo) {
tsdbStartFSTxn(pRepo, pMem->pointsAdd, pMem->storageAdd);
pRepo->code = TSDB_CODE_SUCCESS;
return 0;
}
static void tsdbEndCommit(STsdbRepo *pRepo, int eno) {
@ -413,14 +431,18 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
if (pIter->pTable == NULL) continue;
if (tsdbCommitToTable(pCommith, tid) < 0) {
// TODO: revert the file change
tsdbCloseCommitFile(pCommith, true);
// revert the file change
tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet);
return -1;
}
}
if (tsdbWriteBlockIdx(pCommith) < 0) {
tsdbError("vgId:%d failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
tsdbCloseCommitFile(pCommith, true);
// revert the file change
tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet);
return -1;
}
@ -674,7 +696,11 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid) {
TSDB_RUNLOCK_TABLE(pIter->pTable);
if (tsdbWriteBlockInfo(pCommith) < 0) return -1;
if (tsdbWriteBlockInfo(pCommith) < 0) {
tsdbError("vgId:%d failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith),
TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno));
return -1;
}
return 0;
}
@ -926,6 +952,8 @@ static int tsdbWriteBlockIdx(SCommitH *pCommih) {
if (nidx <= 0) {
// All data are deleted
pHeadf->info.offset = 0;
pHeadf->info.len = 0;
return 0;
}
@ -1227,7 +1255,6 @@ static void tsdbResetCommitFile(SCommitH *pCommith) {
}
static void tsdbResetCommitTable(SCommitH *pCommith) {
tdResetDataCols(pCommith->pDataCols);
taosArrayClear(pCommith->aSubBlk);
taosArrayClear(pCommith->aSupBlk);
pCommith->pTable = NULL;
@ -1256,6 +1283,9 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
tsdbCloseAndUnsetFSet(&(pCommith->readh));
return -1;
}
tsdbDebug("vgId:%d FSET %d at level %d disk id %d is opened to read to commit", REPO_ID(pRepo), TSDB_FSET_FID(pSet),
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet));
} else {
pCommith->isRFileSet = false;
}
@ -1266,6 +1296,8 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
tsdbInitDFileSet(pWSet, did, REPO_ID(pRepo), fid, FS_TXN_VERSION(REPO_FS(pRepo)));
if (tsdbCreateDFileSet(pWSet, true) < 0) {
tsdbError("vgId:%d failed to create FSET %d at level %d disk id %d since %s", REPO_ID(pRepo),
TSDB_FSET_FID(pWSet), TSDB_FSET_LEVEL(pWSet), TSDB_FSET_ID(pWSet), tstrerror(terrno));
if (pCommith->isRFileSet) {
tsdbCloseAndUnsetFSet(&(pCommith->readh));
}
@ -1274,6 +1306,9 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
pCommith->isDFileSame = false;
pCommith->isLFileSame = false;
tsdbDebug("vgId:%d FSET %d at level %d disk id %d is created to commit", REPO_ID(pRepo), TSDB_FSET_FID(pWSet),
TSDB_FSET_LEVEL(pWSet), TSDB_FSET_ID(pWSet));
} else {
did.level = TSDB_FSET_LEVEL(pSet);
did.id = TSDB_FSET_ID(pSet);
@ -1285,6 +1320,9 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
SDFile *pWHeadf = TSDB_COMMIT_HEAD_FILE(pCommith);
tsdbInitDFile(pWHeadf, did, REPO_ID(pRepo), fid, FS_TXN_VERSION(REPO_FS(pRepo)), TSDB_FILE_HEAD);
if (tsdbCreateDFile(pWHeadf, true) < 0) {
tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWHeadf),
tstrerror(terrno));
if (pCommith->isRFileSet) {
tsdbCloseAndUnsetFSet(&(pCommith->readh));
return -1;
@ -1296,7 +1334,10 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
SDFile *pWDataf = TSDB_COMMIT_DATA_FILE(pCommith);
tsdbInitDFileEx(pWDataf, pRDataf);
if (tsdbOpenDFile(pWDataf, O_WRONLY) < 0) {
tsdbCloseDFile(pWHeadf);
tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWDataf),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
tsdbRemoveDFile(pWHeadf);
if (pCommith->isRFileSet) {
tsdbCloseAndUnsetFSet(&(pCommith->readh));
@ -1313,6 +1354,9 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
pCommith->isLFileSame = true;
if (tsdbOpenDFile(pWLastf, O_WRONLY) < 0) {
tsdbError("vgId:%d failed to open file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
tsdbRemoveDFile(pWHeadf);
if (pCommith->isRFileSet) {
@ -1325,6 +1369,9 @@ static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid
pCommith->isLFileSame = false;
if (tsdbCreateDFile(pWLastf, true) < 0) {
tsdbError("vgId:%d failed to create file %s to commit since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pWLastf),
tstrerror(terrno));
tsdbCloseDFileSet(pWSet);
tsdbRemoveDFile(pWHeadf);
if (pCommith->isRFileSet) {
@ -1360,7 +1407,7 @@ static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *p
if (pBlock->last) {
if (pCommith->isLFileSame && mergeRows < pCfg->minRowsPerFileBlock) return true;
} else {
if (mergeRows < pCfg->maxRowsPerFileBlock) return true;
if (pCommith->isDFileSame && mergeRows <= pCfg->maxRowsPerFileBlock) return true;
}
}
@ -1373,12 +1420,16 @@ static int tsdbApplyRtn(STsdbRepo *pRepo) {
STsdbFS * pfs = REPO_FS(pRepo);
SDFileSet *pSet;
// Get retentioni snapshot
// Get retention snapshot
tsdbGetRtnSnap(pRepo, &rtn);
tsdbFSIterInit(&fsiter, pfs, TSDB_FS_ITER_FORWARD);
while ((pSet = tsdbFSIterNext(&fsiter))) {
if (pSet->fid < rtn.minFid) continue;
if (pSet->fid < rtn.minFid) {
tsdbInfo("vgId:%d FSET %d at level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid,
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet));
continue;
}
if (tsdbApplyRtnOnFSet(pRepo, pSet, &rtn) < 0) {
return -1;
@ -1392,10 +1443,13 @@ static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn) {
SDiskID did;
SDFileSet nSet;
STsdbFS * pfs = REPO_FS(pRepo);
int level;
ASSERT(pSet->fid >= pRtn->minFid);
tfsAllocDisk(tsdbGetFidLevel(pSet->fid, pRtn), &(did.level), &(did.id));
level = tsdbGetFidLevel(pSet->fid, pRtn);
tfsAllocDisk(level, &(did.level), &(did.id));
if (did.level == TFS_UNDECIDED_LEVEL) {
terrno = TSDB_CODE_TDB_NO_AVAIL_DISK;
return -1;
@ -1406,12 +1460,17 @@ static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn) {
tsdbInitDFileSet(&nSet, did, REPO_ID(pRepo), pSet->fid, FS_TXN_VERSION(pfs));
if (tsdbCopyDFileSet(pSet, &nSet) < 0) {
tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid,
TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno));
return -1;
}
if (tsdbUpdateDFileSet(pfs, &nSet) < 0) {
return -1;
}
tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid,
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id);
} else {
// On a correct level
if (tsdbUpdateDFileSet(pfs, pSet) < 0) {

View File

@ -701,6 +701,8 @@ int tsdbLoadMetaCache(STsdbRepo *pRepo, bool recoverMeta) {
int64_t maxBufSize = 0;
SMFInfo minfo;
taosHashEmpty(pfs->metaCache);
// No meta file, just return
if (pfs->cstatus->pmf == NULL) return 0;
@ -718,6 +720,12 @@ int tsdbLoadMetaCache(STsdbRepo *pRepo, bool recoverMeta) {
while (true) {
int64_t tsize = tsdbReadMFile(pMFile, tbuf, sizeof(SKVRecord));
if (tsize == 0) break;
if (tsize < 0) {
tsdbError("vgId:%d failed to read META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
if (tsize < sizeof(SKVRecord)) {
tsdbError("vgId:%d failed to read %" PRIzu " bytes from file %s", REPO_ID(pRepo), sizeof(SKVRecord),
TSDB_FILE_FULL_NAME(pMFile));
@ -840,7 +848,7 @@ static int tsdbScanRootDir(STsdbRepo *pRepo) {
continue;
}
if (tfsIsSameFile(pf, &(pfs->cstatus->pmf->f))) {
if (pfs->cstatus->pmf && tfsIsSameFile(pf, &(pfs->cstatus->pmf->f))) {
continue;
}

View File

@ -33,7 +33,7 @@ static int tsdbRollBackDFile(SDFile *pDFile);
void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver) {
char fname[TSDB_FILENAME_LEN];
TSDB_FILE_SET_CLOSED(pMFile);
TSDB_FILE_SET_STATE(pMFile, TSDB_FILE_STATE_OK);
memset(&(pMFile->info), 0, sizeof(pMFile->info));
pMFile->info.magic = TSDB_FILE_INIT_MAGIC;
@ -42,7 +42,7 @@ void tsdbInitMFile(SMFile *pMFile, SDiskID did, int vid, uint32_t ver) {
tfsInitFile(TSDB_FILE_F(pMFile), did.level, did.id, fname);
}
void tsdbInitMFileEx(SMFile *pMFile, SMFile *pOMFile) {
void tsdbInitMFileEx(SMFile *pMFile, const SMFile *pOMFile) {
*pMFile = *pOMFile;
TSDB_FILE_SET_CLOSED(pMFile);
}
@ -201,6 +201,7 @@ int tsdbScanAndTryFixMFile(STsdbRepo *pRepo) {
tsdbError("vgId:%d meta file %s not exit, report to upper layer to fix it", REPO_ID(pRepo),
TSDB_FILE_FULL_NAME(pMFile));
pRepo->state |= TSDB_STATE_BAD_META;
TSDB_FILE_SET_STATE(pMFile, TSDB_FILE_STATE_BAD);
return 0;
}
@ -232,6 +233,7 @@ int tsdbScanAndTryFixMFile(STsdbRepo *pRepo) {
tsdbError("vgId:%d meta file %s has wrong size %" PRId64 " expected %" PRId64 ", report to upper layer to fix it",
REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), mfstat.st_size, pMFile->info.size);
pRepo->state |= TSDB_STATE_BAD_META;
TSDB_FILE_SET_STATE(pMFile, TSDB_FILE_STATE_BAD);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return 0;
} else {
@ -293,6 +295,8 @@ static int tsdbRollBackMFile(SMFile *pMFile) {
void tsdbInitDFile(SDFile *pDFile, SDiskID did, int vid, int fid, uint32_t ver, TSDB_FILE_T ftype) {
char fname[TSDB_FILENAME_LEN];
TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_OK);
TSDB_FILE_SET_CLOSED(pDFile);
memset(&(pDFile->info), 0, sizeof(pDFile->info));
@ -439,6 +443,7 @@ static int tsdbScanAndTryFixDFile(STsdbRepo *pRepo, SDFile *pDFile) {
tsdbError("vgId:%d data file %s not exit, report to upper layer to fix it", REPO_ID(pRepo),
TSDB_FILE_FULL_NAME(pDFile));
pRepo->state |= TSDB_STATE_BAD_DATA;
TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_BAD);
return 0;
}
@ -470,6 +475,7 @@ static int tsdbScanAndTryFixDFile(STsdbRepo *pRepo, SDFile *pDFile) {
tsdbError("vgId:%d data file %s has wrong size %" PRId64 " expected %" PRId64 ", report to upper layer to fix it",
REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile), dfstat.st_size, pDFile->info.size);
pRepo->state |= TSDB_STATE_BAD_DATA;
TSDB_FILE_SET_STATE(pDFile, TSDB_FILE_STATE_BAD);
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
return 0;
} else {

View File

@ -216,11 +216,13 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
}
int tsdbAsyncCommit(STsdbRepo *pRepo) {
if (pRepo->mem == NULL) return 0;
tsem_wait(&(pRepo->readyToCommit));
ASSERT(pRepo->imem == NULL);
if (pRepo->mem == NULL) {
tsem_post(&(pRepo->readyToCommit));
return 0;
}
if (pRepo->code != TSDB_CODE_SUCCESS) {
tsdbWarn("vgId:%d try to commit when TSDB not in good state: %s", REPO_ID(pRepo), tstrerror(terrno));

View File

@ -2681,7 +2681,7 @@ static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *pa
f1 = (char*) TABLE_NAME(pTable1);
f2 = (char*) TABLE_NAME(pTable2);
type = TSDB_DATA_TYPE_BINARY;
bytes = tGetTableNameColumnSchema().bytes;
bytes = tGetTbnameColumnSchema()->bytes;
} else {
STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex);
bytes = pCol->bytes;

View File

@ -85,6 +85,7 @@ int32_t tsdbSyncRecv(void *tsdb, SOCKET socketFd) {
pRepo->state = TSDB_STATE_OK;
tsdbInitSyncH(&synch, pRepo, socketFd);
tsem_wait(&(pRepo->readyToCommit));
tsdbStartFSTxn(pRepo, 0, 0);
if (tsdbSyncRecvMeta(&synch) < 0) {
@ -98,6 +99,7 @@ int32_t tsdbSyncRecv(void *tsdb, SOCKET socketFd) {
}
tsdbEndFSTxn(pRepo);
tsem_post(&(pRepo->readyToCommit));
tsdbDestroySyncH(&synch);
// Reload file change
@ -107,6 +109,7 @@ int32_t tsdbSyncRecv(void *tsdb, SOCKET socketFd) {
_err:
tsdbEndFSTxnWithError(REPO_FS(pRepo));
tsem_post(&(pRepo->readyToCommit));
tsdbDestroySyncH(&synch);
return -1;
}
@ -191,7 +194,8 @@ static int32_t tsdbSyncRecvMeta(SSyncH *pSynch) {
return 0;
}
if (pLMFile == NULL || memcmp(&(pSynch->pmf->info), &(pLMFile->info), sizeof(SMFInfo)) != 0) {
if (pLMFile == NULL || memcmp(&(pSynch->pmf->info), &(pLMFile->info), sizeof(SMFInfo)) != 0 ||
TSDB_FILE_IS_BAD(pLMFile)) {
// Local has no meta file or has a different meta file, need to copy from remote
pSynch->mfChanged = true;
@ -409,7 +413,8 @@ static int32_t tsdbSyncRecvDFileSetArray(SSyncH *pSynch) {
pSynch->pdf != NULL ? pSynch->pdf->fid : -1);
pLSet = tsdbFSIterNext(&fsiter);
} else {
if (pLSet && pSynch->pdf && pLSet->fid == pSynch->pdf->fid && tsdbIsTowFSetSame(pLSet, pSynch->pdf)) {
if (pLSet && pSynch->pdf && pLSet->fid == pSynch->pdf->fid && tsdbIsTowFSetSame(pLSet, pSynch->pdf) &&
tsdbFSetIsOk(pLSet)) {
// Just keep local files and notify remote not to send
tsdbInfo("vgId:%d, fileset:%d is same and no need to recv", REPO_ID(pRepo), pLSet->fid);

View File

@ -157,7 +157,7 @@ void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*));
* @param pArray
* @param compar
*/
void taosArraySort(SArray* pArray, int (*compar)(const void*, const void*));
void taosArraySort(SArray* pArray, __compar_fn_t comparFn);
/**
* sort string array

View File

@ -220,7 +220,7 @@ void taosArrayDestroyEx(SArray* pArray, void (*fp)(void*)) {
taosArrayDestroy(pArray);
}
void taosArraySort(SArray* pArray, int (*compar)(const void*, const void*)) {
void taosArraySort(SArray* pArray, __compar_fn_t compar) {
assert(pArray != NULL);
assert(compar != NULL);

View File

@ -191,7 +191,8 @@ double tbufReadDouble(SBufferReader* buf) {
// writer functions
void tbufCloseWriter( SBufferWriter* buf ) {
(*buf->allocator)( buf->data, 0 );
tfree(buf->data);
// (*buf->allocator)( buf->data, 0 ); // potential memory leak.
buf->data = NULL;
buf->pos = 0;
buf->size = 0;

View File

@ -134,6 +134,11 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) {
wordfree(&full_path);
char tmp[1025] = {0};
if (realpath(option, tmp) != NULL) {
strcpy(option, tmp);
}
int code = taosMkDir(option, 0755);
if (code != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);

View File

@ -204,7 +204,7 @@ void *taosAcquireRef(int rsetId, int64_t rid)
void *p = NULL;
if (rsetId < 0 || rsetId >= TSDB_REF_OBJECTS) {
uTrace("rsetId:%d rid:%" PRId64 " failed to acquire, rsetId not valid", rsetId, rid);
//uTrace("rsetId:%d rid:%" PRId64 " failed to acquire, rsetId not valid", rsetId, rid);
terrno = TSDB_CODE_REF_INVALID_ID;
return NULL;
}

View File

@ -32,14 +32,27 @@ int32_t taosGetFqdn(char *fqdn) {
struct addrinfo hints = {0};
struct addrinfo *result = NULL;
#ifdef __APPLE__
// on macosx, hostname -f has the form of xxx.local
// which will block getaddrinfo for a few seconds if AI_CANONNAME is set
// thus, we choose AF_INET (ipv4 for the moment) to make getaddrinfo return
// immediately
hints.ai_family = AF_INET;
#else // __APPLE__
hints.ai_flags = AI_CANONNAME;
#endif // __APPLE__
int32_t ret = getaddrinfo(hostname, NULL, &hints, &result);
if (!result) {
uError("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret));
return -1;
}
#ifdef __APPLE__
// refer to comments above
strcpy(fqdn, hostname);
#else // __APPLE__
strcpy(fqdn, result->ai_canonname);
#endif // __APPLE__
freeaddrinfo(result);
return 0;
}
@ -286,7 +299,7 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie
int32_t bufSize = 1024 * 1024;
sockFd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
if (sockFd <= 2) {
uError("failed to open the socket: %d (%s)", errno, strerror(errno));
taosCloseSocketNoCheck(sockFd);

View File

@ -188,11 +188,7 @@ static void removeTimer(uintptr_t id) {
}
static int64_t getMonotonicMs(void) {
#ifdef WINDOWS
return (int64_t) getMonotonicUs() / 1000;
#else
return taosGetTimestampMs();
#endif
}
static void addToWheel(tmr_obj_t* timer, uint32_t delay) {

View File

@ -34,6 +34,7 @@ static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) {
pVnode->tsdbCfg.maxRowsPerFileBlock = vnodeMsg->cfg.maxRowsPerFileBlock;
pVnode->tsdbCfg.precision = vnodeMsg->cfg.precision;
pVnode->tsdbCfg.compression = vnodeMsg->cfg.compression;
pVnode->tsdbCfg.update = vnodeMsg->cfg.update;
pVnode->tsdbCfg.cacheLastRow = vnodeMsg->cfg.cacheLastRow;
pVnode->walCfg.walLevel = vnodeMsg->cfg.walLevel;
pVnode->walCfg.fsyncPeriod = vnodeMsg->cfg.fsyncPeriod;
@ -227,6 +228,15 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) {
}
vnodeMsg.cfg.quorum = (int8_t)quorum->valueint;
cJSON *update = cJSON_GetObjectItem(root, "update");
if (!update || update->type != cJSON_Number) {
vError("vgId: %d, failed to read %s, update not found", pVnode->vgId, file);
vnodeMsg.cfg.update = 0;
vnodeMsg.cfg.vgCfgVersion = 0;
} else {
vnodeMsg.cfg.update = (int8_t)update->valueint;
}
cJSON *cacheLastRow = cJSON_GetObjectItem(root, "cacheLastRow");
if (!cacheLastRow || cacheLastRow->type != cJSON_Number) {
vError("vgId: %d, failed to read %s, cacheLastRow not found", pVnode->vgId, file);
@ -325,6 +335,7 @@ int32_t vnodeWriteCfg(SCreateVnodeMsg *pMsg) {
len += snprintf(content + len, maxLen - len, " \"dbReplica\": %d,\n", pMsg->cfg.dbReplica);
len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pMsg->cfg.wals);
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pMsg->cfg.quorum);
len += snprintf(content + len, maxLen - len, " \"update\": %d,\n", pMsg->cfg.update);
len += snprintf(content + len, maxLen - len, " \"cacheLastRow\": %d,\n", pMsg->cfg.cacheLastRow);
len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
for (int32_t i = 0; i < pMsg->cfg.vgReplica; i++) {

View File

@ -194,12 +194,14 @@ int32_t vnodeOpen(int32_t vgId) {
int32_t code = vnodeReadCfg(pVnode);
if (code != TSDB_CODE_SUCCESS) {
vError("vgId:%d, failed to read config file, set cfgVersion to 0", pVnode->vgId);
vnodeCleanUp(pVnode);
return code;
return 0;
}
code = vnodeReadVersion(pVnode);
if (code != TSDB_CODE_SUCCESS) {
pVnode->version = 0;
vError("vgId:%d, failed to read file version, generate it from data file", pVnode->vgId);
// Allow vnode start even when read file version fails, set file version as wal version or zero
// vnodeCleanUp(pVnode);

View File

@ -38,6 +38,8 @@
#include <string.h>
#include <arpa/inet.h>
#include <libgen.h>
#include <locale.h>
#include <netdb.h>
#define D(fmt, ...) fprintf(stderr, "%s[%d]%s(): " fmt "\n", basename(__FILE__), __LINE__, __func__, ##__VA_ARGS__)
#define A(statement, fmt, ...) do { \
@ -56,6 +58,8 @@
##__VA_ARGS__); \
} while (0)
#include "os.h"
typedef struct ep_s ep_t;
struct ep_s {
int ep;
@ -214,7 +218,6 @@ static void* routine(void* arg) {
A(0==pthread_mutex_lock(&ep->lock), "");
ep->wakenup = 0;
A(0==pthread_mutex_unlock(&ep->lock), "");
D("........");
continue;
}
A(ev->data.ptr, "internal logic error");

View File

@ -44,12 +44,14 @@ class TDTestCase:
# stddev verifacation
tdSql.error("select stddev(ts) from test1")
tdSql.error("select stddev(col1) from test")
tdSql.error("select stddev(col2) from test")
tdSql.error("select stddev(col3) from test")
tdSql.error("select stddev(col4) from test")
tdSql.error("select stddev(col5) from test")
tdSql.error("select stddev(col6) from test")
# stddev support super table now
# tdSql.error("select stddev(col1) from test")
# tdSql.error("select stddev(col2) from test")
# tdSql.error("select stddev(col3) from test")
# tdSql.error("select stddev(col4) from test")
# tdSql.error("select stddev(col5) from test")
# tdSql.error("select stddev(col6) from test")
tdSql.error("select stddev(col7) from test1")
tdSql.error("select stddev(col8) from test1")
tdSql.error("select stddev(col9) from test1")

View File

@ -39,12 +39,6 @@ class TDTestCase:
# stddev verifacation
tdSql.error("select stddev(ts) from test1")
tdSql.error("select stddev(col1) from test")
tdSql.error("select stddev(col2) from test")
tdSql.error("select stddev(col3) from test")
tdSql.error("select stddev(col4) from test")
tdSql.error("select stddev(col5) from test")
tdSql.error("select stddev(col6) from test")
tdSql.error("select stddev(col7) from test1")
tdSql.error("select stddev(col8) from test1")
tdSql.error("select stddev(col9) from test1")

View File

@ -0,0 +1,91 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 100
self.ts = 1537146000000
self.clist1 = []
self.clist2 = []
self.clist3 = []
self.clist4 = []
self.clist5 = []
self.clist6 = []
def getData(self):
for i in range(tdSql.queryRows):
for j in range(6):
exec('self.clist{}.append(tdSql.queryResult[i][j+1])'.format(j+1))
def run(self):
tdSql.prepare()
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20)) tags(cid int,gbid binary(20),loc nchar(20))''')
tdSql.execute("create table test1 using test tags(1,'beijing','北京')")
tdSql.execute("create table test2 using test tags(2,'shanghai','深圳')")
tdSql.execute("create table test3 using test tags(2,'shenzhen','深圳')")
tdSql.execute("create table test4 using test tags(1,'shanghai','上海')")
for j in range(4):
for i in range(self.rowNum):
tdSql.execute("insert into test%d values(now-%dh, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
% (j+1,i, i + 1, i + 1, i + 1, i + 1, i + i * 0.1, i * 1.5, i % 2, i + 1, i + 1))
# stddev verifacation
tdSql.error("select stddev(ts) from test")
tdSql.error("select stddev(col7) from test")
tdSql.error("select stddev(col8) from test")
tdSql.error("select stddev(col9) from test")
con_list = [
' where cid = 1 and ts >=now - 1d and ts <now',
" where gbid = 'beijing' and ts >=now - 1d and ts <now",
' '
]
for condition in con_list:
tdSql.query("select * from test %s"%(condition))
self.getData()
for i in range(6):
exec('tdSql.query("select stddev(col{}) from test {}")'.format(i+1,condition))
exec('tdSql.checkData(0, 0, np.std(self.clist{}))'.format(i+1))
exec('self.clist{}.clear()'.format(i+1))
print('step 2')
con_group_list = {
' cid = 2 and ts >=now - 1d and ts <now group by tbname':2,
" loc = '深圳' and ts >=now - 1d and ts <now group by tbname " :2 ,
" gbid = 'shanghai' and ts >=now - 1d and ts <now group by cid " :2
}
result = [6.922186552,6.922186552,6.922186552,6.922186552,7.614405212,10.383279829]
for key,value in con_group_list.items():
for i in range(6):
exec('tdSql.query("select stddev(col{}) from test where {}")'.format(i+1,key))
for j in range(value):
tdSql.checkData(j, 0, result[i])
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -44,7 +44,8 @@ class TDTestRetetion:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, self.queryRows, expectRows)
os.system("sudo timedatectl set-ntp true")
time.sleep(40)
os.system("date -s '%s'"%(datetime.datetime.now()+datetime.timedelta(hours=1)))
time.sleep(5)
tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
def run(self):
@ -63,7 +64,7 @@ class TDTestRetetion:
tdLog.info("=============== step2")
tdDnodes.stop(1)
os.system("sudo timedatectl set-ntp false")
os.system("sudo date -s $(date -d \"${DATE} 2 days\" \"+%Y%m%d\")")
os.system("date -s '%s'"%(datetime.datetime.now()+datetime.timedelta(hours=48)))
tdDnodes.start(1)
cmd = 'insert into test values(now,5);'
tdDnodes.stop(1)
@ -79,7 +80,7 @@ class TDTestRetetion:
self.checkRows(5,cmd)
tdLog.info("=============== step3")
tdDnodes.stop(1)
os.system("sudo date -s $(date -d \"${DATE} 2 days\" \"+%Y%m%d\")")
os.system("date -s '%s'"%(datetime.datetime.now()+datetime.timedelta(hours=48)))
tdDnodes.start(1)
tdLog.info(cmd)
tdSql.execute(cmd)
@ -99,18 +100,19 @@ class TDTestRetetion:
tdLog.info(cmd)
tdSql.execute(cmd)
self.queryRows=tdSql.query('select * from test')
self.checkRows(7,cmd)
self.checkRows(5,cmd)
tdLog.info("=============== step5")
tdDnodes.stop(1)
tdDnodes.start(1)
cmd='select * from test where ts > now-1d'
self.queryRows=tdSql.query('select * from test where ts > now-1d')
self.checkRows(1,cmd)
self.checkRows(2,cmd)
def stop(self):
os.system("sudo timedatectl set-ntp true")
time.sleep(40)
os.system("date -s '%s'"%(datetime.datetime.now()+datetime.timedelta(hours=1)))
time.sleep(5)
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -1,18 +1,18 @@
# update
#python3 ./test.py -f update/allow_update.py
python3 ./test.py -f update/allow_update.py
python3 ./test.py -f update/allow_update-0.py
python3 ./test.py -f update/append_commit_data.py
python3 ./test.py -f update/append_commit_last-0.py
python3 ./test.py -f update/append_commit_last.py
#python3 ./test.py -f update/merge_commit_data.py
#python3 ./test.py -f update/merge_commit_data-0.py
#python3 ./test.py -f update/merge_commit_data2.py
#python3 ./test.py -f update/merge_commit_data2_update0.py
#python3 ./test.py -f update/merge_commit_last-0.py
#python3 ./test.py -f update/merge_commit_last.py
#python3 ./test.py -f update/bug_td2279.py
python3 ./test.py -f update/merge_commit_data.py
python3 ./test.py -f update/merge_commit_data-0.py
python3 ./test.py -f update/merge_commit_data2.py
python3 ./test.py -f update/merge_commit_data2_update0.py
python3 ./test.py -f update/merge_commit_last-0.py
python3 ./test.py -f update/merge_commit_last.py
python3 ./test.py -f update/bug_td2279.py
# wal
python3 ./test.py -f wal/addOldWalTest.py

View File

@ -111,6 +111,18 @@ class TDTestCase:
tdSql.query("select * from tb where c5 = 'true' ")
tdSql.checkRows(5)
# For jira: https://jira.taosdata.com:18080/browse/TD-2850
tdSql.execute("create database 'Test' ")
tdSql.execute("use 'Test' ")
tdSql.execute("create table 'TB'(ts timestamp, 'Col1' int) tags('Tag1' int)")
tdSql.execute("insert into 'Tb0' using tb tags(1) values(now, 1)")
tdSql.query("select * from tb")
tdSql.checkRows(1)
tdSql.query("select * from tb0")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -47,6 +47,40 @@ class TDTestCase:
tdSql.query("select * from db.st where dev='dev_02'")
tdSql.checkRows(1)
#For: https://jira.taosdata.com:18080/browse/TD-2671
print("==============step3")
tdSql.execute(
"create stable if not exists stb (ts timestamp, tagtype int) tags(dev nchar(50))")
tdSql.execute(
'CREATE TABLE if not exists dev_01 using stb tags("dev_01")')
tdSql.execute(
'CREATE TABLE if not exists dev_02 using stb tags("dev_02")')
print("==============step4")
tdSql.execute(
"""INSERT INTO dev_01(ts, tagtype) VALUES('2020-05-13 10:00:00.000', 1),
('2020-05-13 10:00:00.001', 1)
dev_02 VALUES('2020-05-13 10:00:00.001', 1)""")
tdSql.query("select * from db.stb where dev='dev_01'")
tdSql.checkRows(2)
tdSql.query("select * from db.stb where dev='dev_02'")
tdSql.checkRows(1)
tdSql.query("describe db.stb")
tdSql.checkRows(3)
tdSql.execute("alter stable db.stb add tag t1 int")
tdSql.query("describe db.stb")
tdSql.checkRows(4)
tdSql.execute("drop stable db.stb")
tdSql.query("show stables")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -31,10 +31,18 @@ class TDTestCase:
def insertDataAndAlterTable(self, threadID):
if(threadID == 0):
os.system("yes | taosdemo -t %d -n %d" % (self.numberOfTables, self.numberOfRecords))
os.system("yes | taosdemo -t %d -n %d -x" % (self.numberOfTables, self.numberOfRecords))
if(threadID == 1):
print("use test")
tdSql.execute("use test")
while True:
print("query started")
tdSql.query("select * from test.t9")
rows = tdSql.queryRows
print("rows %d" % rows)
if(rows > 0):
break
time.sleep(1)
print("alter table test.meters add column f4 int")
tdSql.execute("alter table test.meters add column f4 int")
print("insert into test.t0 values (now, 1, 2, 3, 4)")
@ -46,8 +54,8 @@ class TDTestCase:
t1 = threading.Thread(target=self.insertDataAndAlterTable, args=(0, ))
t2 = threading.Thread(target=self.insertDataAndAlterTable, args=(1, ))
t1.start()
time.sleep(2)
t1.start()
time.sleep(2)
t2.start()
t1.join()
t2.join()

View File

@ -12,24 +12,24 @@ sql create database 'abc123'
sql create database '_ab1234'
sql create database 'ABC123'
sql create database '_ABC123'
sql create database 'aABb123 '
sql create database ' xyz '
sql create database ' XYZ '
sql_error create database 'aABb123 '
sql_error create database ' xyz '
sql_error create database ' XYZ '
sql use 'abc123'
sql use '_ab1234'
sql use 'ABC123'
sql use '_ABC123'
sql use 'aABb123'
sql use ' xyz '
sql use ' XYZ '
sql_error use 'aABb123'
sql_error use ' xyz '
sql_error use ' XYZ '
sql drop database 'abc123'
sql drop database '_ab1234'
sql_error drop database 'ABC123'
sql drop database '_ABC123'
sql drop database 'aABb123'
sql drop database ' xyz '
sql_error drop database 'aABb123'
sql_error drop database ' xyz '
sql_error drop database ' XYZ '

View File

@ -407,4 +407,359 @@ sql insert into tm13 values('2020-1-1 1:1:1', 0);
sql select count(*) from m1 where ts='2020-1-1 1:1:1' interval(1h) group by tbname;
if $rows != 2 then
return -1
endi
endi
sql drop table m1;
sql drop table if exists tm1;
sql drop table if exists tm2;
sql create table m1(ts timestamp, k double, b double, c int, d smallint, e int unsigned) tags(a int);
sql create table tm1 using m1 tags(1);
sql create table tm2 using m1 tags(2);
sql insert into tm1 values('2021-01-27 22:22:39.294', 1, 10, NULL, 110, 123) ('2021-01-27 22:22:40.294', 2, 20, NULL, 120, 124) ('2021-01-27 22:22:41.294', 3, 30, NULL, 130, 125)('2021-01-27 22:22:43.294', 4, 40, NULL, 140, 126)('2021-01-27 22:22:44.294', 5, 50, NULL, 150, 127);
sql insert into tm2 values('2021-01-27 22:22:40.688', 5, 101, NULL, 210, 321) ('2021-01-27 22:22:41.688', 5, 102, NULL, 220, 322) ('2021-01-27 22:22:42.688', 5, 103, NULL, 230, 323)('2021-01-27 22:22:43.688', 5, 104, NULL, 240, 324)('2021-01-27 22:22:44.688', 5, 105, NULL, 250, 325)('2021-01-27 22:22:45.688', 5, 106, NULL, 260, 326);
sql select stddev(k) from m1
if $rows != 1 then
return -1
endi
if $data00 != 1.378704626 then
return -1
endi
sql select stddev(c) from m1
if $rows != 0 then
return -1
endi
sql select stddev(k), stddev(c) from m1
if $rows != 1 then
return -1
endi
if $data00 != 1.378704626 then
return -1
endi
if $data01 != NULL then
return -1;
endi
sql select stddev(b),stddev(b),stddev(k) from m1;
if $rows != 1 then
return -1
endi
if $data00 != 37.840465463 then
return -1
endi
if $data01 != 37.840465463 then
return -1
endi
if $data02 != 1.378704626 then
return -1
endi
sql select stddev(k), stddev(b) from m1 group by a
if $rows != 2 then
return -1
endi
if $data00 != 1.414213562 then
return -1
endi
if $data01 != 14.142135624 then
return -1
endi
if $data02 != 1 then
return -1
endi
if $data10 != 0.000000000 then
return -1
endi
if $data11 != 1.707825128 then
return -1
endi
if $data12 != 2 then
return -1
endi
sql select stddev(k), stddev(b) from m1 where a= 1 group by a
if $rows != 1 then
return -1
endi
if $data00 != 1.414213562 then
return -1
endi
if $data01 != 14.142135624 then
return -1
endi
if $data02 != 1 then
return -1
endi
sql select stddev(k), stddev(b) from m1 group by tbname
if $rows != 2 then
return -1
endi
if $data00 != 1.414213562 then
return -1
endi
if $data01 != 14.142135624 then
return -1
endi
if $data02 != @tm1@ then
return -1
endi
if $data10 != 0.000000000 then
return -1
endi
if $data11 != 1.707825128 then
return -1
endi
if $data12 != @tm2@ then
return -1
endi
sql select stddev(k), stddev(b) from m1 group by tbname,a
if $rows != 2 then
return -1
endi
sql select stddev(k), stddev(b), stddev(c) from m1 group by tbname,a
if $rows != 2 then
return -1
endi
if $data00 != 1.414213562 then
return -1
endi
if $data01 != 14.142135624 then
return -1
endi
if $data02 != NULL then
return -1
endi
if $data03 != @tm1@ then
return -1
endi
if $data04 != 1 then
return -1
endi
if $data10 != 0.000000000 then
return -1
endi
if $data11 != 1.707825128 then
return -1
endi
if $data12 != NULL then
return -1
endi
if $data13 != @tm2@ then
return -1
endi
if $data14 != 2 then
return -1
endi
sql select stddev(k), stddev(b), stddev(c) from m1 interval(10s) group by tbname,a
if $rows != 3 then
return -1
endi
if $data01 != 0.000000000 then
return -1
endi
if $data02 != 0.000000000 then
return -1
endi
if $data03 != NULL then
return -1
endi
if $data04 != @tm1@ then
return -1
endi
if $data05 != 1 then
return -1
endi
if $data11 != 1.118033989 then
return -1
endi
if $data12 != 11.180339887 then
return -1
endi
if $data13 != NULL then
return -1
endi
if $data14 != @tm1@ then
return -1
endi
if $data22 != 1.707825128 then
return -1
endi
if $data23 != NULL then
return -1
endi
if $data24 != @tm2@ then
return -1
endi
if $data25 != 2 then
return -1
endi
sql select count(*), first(b), stddev(b), stddev(c) from m1 interval(10s) group by a
if $rows != 3 then
return -1
endi
if $data00 != @21-01-27 22:22:30.000@ then
return -1
endi
if $data01 != 1 then
return -1
endi
if $data02 != 10.000000000 then
return -1
endi
if $data03 != 0.000000000 then
return -1
endi
if $data04 != NULL then
return -1
endi
if $data05 != 1 then
return -1
endi
if $data12 != 20.000000000 then
return -1
endi
if $data13 != 11.180339887 then
return -1
endi
if $data14 != NULL then
return -1
endi
if $data23 != 1.707825128 then
return -1
endi
sql select count(*), first(b), stddev(b), stddev(c) from m1 interval(10s) group by tbname,a
if $rows != 3 then
return -1
endi
if $data23 != 1.707825128 then
return -1
endi
if $data25 != @tm2@ then
return -1
endi
sql select count(*), stddev(b), stddev(b)+20, stddev(c) from m1 interval(10s) group by tbname,a
if $rows != 3 then
return -1
endi
if $data02 != 0.000000000 then
return -1
endi
if $data03 != 20.000000000 then
return -1
endi
if $data13 != 31.180339887 then
return -1
endi
if $data14 != NULL then
return -1
endi
sql select count(*), first(b), stddev(b)+first(b), stddev(c) from m1 interval(10s) group by tbname,a
if $rows != 3 then
return -1
endi
if $data02 != 10.000000000 then
return -1
endi
if $data03 != 10.000000000 then
return -1
endi
if $data12 != 20.000000000 then
return -1
endi
if $data13 != 31.180339887 then
return -1
endi
if $data22 != 101.000000000 then
return -1
endi
if $data23 != 102.707825128 then
return -1
endi
sql select stddev(e),stddev(k) from m1 where a=1
if $rows != 1 then
return -1
endi
if $data00 != 1.414213562 then
return -1
endi
if $data01 != 1.414213562 then
return -1
endi

View File

@ -1,84 +1,84 @@
run general/parser/alter.sim
sleep 100
run general/parser/alter1.sim
sleep 100
run general/parser/alter_stable.sim
sleep 100
run general/parser/auto_create_tb.sim
sleep 100
run general/parser/auto_create_tb_drop_tb.sim
sleep 100
run general/parser/col_arithmetic_operation.sim
sleep 100
run general/parser/columnValue.sim
sleep 100
run general/parser/commit.sim
sleep 100
run general/parser/create_db.sim
sleep 100
run general/parser/create_mt.sim
sleep 100
run general/parser/create_tb.sim
sleep 100
run general/parser/dbtbnameValidate.sim
sleep 100
run general/parser/fill.sim
sleep 100
run general/parser/fill_stb.sim
sleep 100
#run general/parser/fill_us.sim #
sleep 100
run general/parser/first_last.sim
sleep 100
run general/parser/import_commit1.sim
sleep 100
run general/parser/import_commit2.sim
sleep 100
run general/parser/import_commit3.sim
sleep 100
#run general/parser/import_file.sim
sleep 100
run general/parser/insert_tb.sim
sleep 100
run general/parser/tags_dynamically_specifiy.sim
sleep 100
run general/parser/interp.sim
sleep 100
run general/parser/lastrow.sim
sleep 100
run general/parser/limit.sim
sleep 100
run general/parser/limit1.sim
sleep 100
run general/parser/limit1_tblocks100.sim
sleep 100
run general/parser/limit2.sim
sleep 100
run general/parser/mixed_blocks.sim
sleep 100
run general/parser/nchar.sim
sleep 100
run general/parser/null_char.sim
sleep 100
run general/parser/selectResNum.sim
sleep 100
run general/parser/select_across_vnodes.sim
sleep 100
run general/parser/select_from_cache_disk.sim
sleep 100
run general/parser/set_tag_vals.sim
sleep 100
run general/parser/single_row_in_tb.sim
sleep 100
run general/parser/slimit.sim
sleep 100
run general/parser/slimit1.sim
sleep 100
run general/parser/slimit_alter_tags.sim
sleep 100
run general/parser/tbnameIn.sim
sleep 100
run general/parser/slimit_alter_tags.sim # persistent failed
#run general/parser/alter.sim
#sleep 100
#run general/parser/alter1.sim
#sleep 100
#run general/parser/alter_stable.sim
#sleep 100
#run general/parser/auto_create_tb.sim
#sleep 100
#run general/parser/auto_create_tb_drop_tb.sim
#sleep 100
#run general/parser/col_arithmetic_operation.sim
#sleep 100
#run general/parser/columnValue.sim
#sleep 100
#run general/parser/commit.sim
#sleep 100
#run general/parser/create_db.sim
#sleep 100
#run general/parser/create_mt.sim
#sleep 100
#run general/parser/create_tb.sim
#sleep 100
#run general/parser/dbtbnameValidate.sim
#sleep 100
#run general/parser/fill.sim
#sleep 100
#run general/parser/fill_stb.sim
#sleep 100
##run general/parser/fill_us.sim #
#sleep 100
#run general/parser/first_last.sim
#sleep 100
#run general/parser/import_commit1.sim
#sleep 100
#run general/parser/import_commit2.sim
#sleep 100
#run general/parser/import_commit3.sim
#sleep 100
##run general/parser/import_file.sim
#sleep 100
#run general/parser/insert_tb.sim
#sleep 100
#run general/parser/tags_dynamically_specifiy.sim
#sleep 100
#run general/parser/interp.sim
#sleep 100
#run general/parser/lastrow.sim
#sleep 100
#run general/parser/limit.sim
#sleep 100
#run general/parser/limit1.sim
#sleep 100
#run general/parser/limit1_tblocks100.sim
#sleep 100
#run general/parser/limit2.sim
#sleep 100
#run general/parser/mixed_blocks.sim
#sleep 100
#run general/parser/nchar.sim
#sleep 100
#run general/parser/null_char.sim
#sleep 100
#run general/parser/selectResNum.sim
#sleep 100
#run general/parser/select_across_vnodes.sim
#sleep 100
#run general/parser/select_from_cache_disk.sim
#sleep 100
#run general/parser/set_tag_vals.sim
#sleep 100
#run general/parser/single_row_in_tb.sim
#sleep 100
#run general/parser/slimit.sim
#sleep 100
#run general/parser/slimit1.sim
#sleep 100
#run general/parser/slimit_alter_tags.sim
#sleep 100
#run general/parser/tbnameIn.sim
#sleep 100
#run general/parser/slimit_alter_tags.sim # persistent failed
sleep 100
run general/parser/join.sim
sleep 100
@ -106,4 +106,4 @@ run general/parser/sliding.sim
sleep 100
run general/parser/function.sim
sleep 100
run general/parse/stableOp.sim
run general/parser/stableOp.sim

View File

@ -44,10 +44,10 @@ echo serverPort 7100 >> %TAOS_CFG%
echo logDir %LOG_DIR% >> %TAOS_CFG%
echo scriptDir %SCRIPT_DIR% >> %TAOS_CFG%
echo numOfLogLines 100000000 >> %TAOS_CFG%
echo rpcDebugFlag 143 >> %TAOS_CFG%
echo rpcDebugFlag 135 >> %TAOS_CFG%
echo tmrDebugFlag 131 >> %TAOS_CFG%
echo cDebugFlag 143 >> %TAOS_CFG%
echo udebugFlag 143 >> %TAOS_CFG%
echo cDebugFlag 135 >> %TAOS_CFG%
echo udebugFlag 135 >> %TAOS_CFG%
echo wal 0 >> %TAOS_CFG%
echo asyncLog 0 >> %TAOS_CFG%
echo locale en_US.UTF-8 >> %TAOS_CFG%

View File

@ -6,15 +6,15 @@ GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
function git_branch {
branch="`git branch 2>/dev/null | grep "^\*" | sed -e "s/^\*\ //"`"
if [ "${branch}" != "" ];then
if [ "${branch}" = "(no branch)" ];then
branch="(`git rev-parse --short HEAD`...)"
fi
branch=(${branch////_})
echo "$branch"
fi
function dohavecore(){
corefile=`find $corepath -mmin 1`
if [ -n "$corefile" ];then
echo 'taosd or taos has generated core'
if [[ $1 == 1 ]];then
exit 8
fi
fi
}
function runSimCaseOneByOne {
while read -r line; do
@ -42,6 +42,7 @@ function runSimCaseOneByOne {
# fi
end_time=`date +%s`
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log
dohavecore 0
fi
done < $1
}
@ -69,14 +70,15 @@ function runSimCaseOneByOnefq {
out_log=`tail -1 out.log `
if [[ $out_log =~ 'failed' ]];then
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
cp -r ../../../sim ~/sim_$(git_branch)_`date "+%Y_%m_%d_%H:%M:%S"`
cp -r ../../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"`
else
cp -r ../../sim ~/sim_$(git_branch)_`date "+%Y_%m_%d_%H:%M:%S" `
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
fi
exit 8
fi
end_time=`date +%s`
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log
dohavecore 1
fi
done < $1
}
@ -105,6 +107,7 @@ function runPyCaseOneByOne {
else
$line > /dev/null 2>&1
fi
dohavecore 0
fi
done < $1
}
@ -126,13 +129,14 @@ function runPyCaseOneByOnefq {
end_time=`date +%s`
out_log=`tail -1 pytest-out.log `
if [[ $out_log =~ 'failed' ]];then
cp -r ../../sim ~/sim_$(git_branch)_`date "+%Y_%m_%d_%H:%M:%S" `
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
exit 8
fi
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
else
$line > /dev/null 2>&1
fi
dohavecore 1
fi
done < $1
}
@ -140,7 +144,7 @@ totalFailed=0
totalPyFailed=0
tests_dir=`pwd`
corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern`
if [ "$2" != "python" ]; then
echo "### run TSIM test case ###"
cd $tests_dir/script