Merge remote-tracking branch 'origin/3.0' into feature/dnode

This commit is contained in:
Shengliang Guan 2022-05-06 22:20:39 +08:00
commit 3009df865b
56 changed files with 2078 additions and 360 deletions

View File

@ -73,7 +73,8 @@ typedef uint16_t tmsg_t;
enum { CONN_TYPE__QUERY = 1, CONN_TYPE__TMQ, CONN_TYPE__MAX };
enum {
HEARTBEAT_KEY_DBINFO = 1,
HEARTBEAT_KEY_USER_AUTHINFO = 1,
HEARTBEAT_KEY_DBINFO,
HEARTBEAT_KEY_STBINFO,
HEARTBEAT_KEY_MQ_TMP,
};
@ -426,7 +427,9 @@ int32_t tDeserializeSGetUserAuthReq(void* buf, int32_t bufLen, SGetUserAuthReq*
typedef struct {
char user[TSDB_USER_LEN];
int32_t version;
int8_t superAuth;
SHashObj* createdDbs;
SHashObj* readDbs;
SHashObj* writeDbs;
} SGetUserAuthRsp;
@ -669,10 +672,20 @@ typedef struct {
SArray* pArray; // Array of SUseDbRsp
} SUseDbBatchRsp;
int32_t tSerializeSUseDbBatchRsp(void* buf, int32_t bufLen, SUseDbBatchRsp* pRsp);
int32_t tDeserializeSUseDbBatchRsp(void* buf, int32_t bufLen, SUseDbBatchRsp* pRsp);
void tFreeSUseDbBatchRsp(SUseDbBatchRsp* pRsp);
typedef struct {
SArray* pArray; // Array of SGetUserAuthRsp
} SUserAuthBatchRsp;
int32_t tSerializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchRsp* pRsp);
int32_t tDeserializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchRsp* pRsp);
void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
} SCompactDbReq;

View File

@ -141,7 +141,7 @@ typedef struct {
/// row total length
uint32_t len;
/// row version
uint64_t ver;
// uint64_t ver;
/// the inline data, maybe a tuple or a k-v tuple
char data[];
} STSRow;
@ -176,7 +176,7 @@ typedef struct {
#define TD_ROW_DATA(r) ((r)->data)
#define TD_ROW_LEN(r) ((r)->len)
#define TD_ROW_KEY(r) ((r)->ts)
#define TD_ROW_VER(r) ((r)->ver)
// #define TD_ROW_VER(r) ((r)->ver)
#define TD_ROW_KEY_ADDR(r) (r)
// N.B. If without STSchema, getExtendedRowSize() is used to get the rowMaxBytes and

View File

@ -40,6 +40,11 @@ enum {
CTG_DBG_STB_RENT_NUM,
};
typedef enum {
AUTH_TYPE_READ = 1,
AUTH_TYPE_WRITE,
AUTH_TYPE_OTHER,
} AUTH_TYPE;
typedef struct SCatalogReq {
SArray *pTableName; // element is SNAME
@ -57,6 +62,7 @@ typedef struct SMetaData {
typedef struct SCatalogCfg {
uint32_t maxTblCacheNum;
uint32_t maxDBCacheNum;
uint32_t maxUserCacheNum;
uint32_t dbRentSec;
uint32_t stbRentSec;
} SCatalogCfg;
@ -77,6 +83,11 @@ typedef struct SDbVgVersion {
int32_t numOfTable; // unit is TSDB_TABLE_NUM_UNIT
} SDbVgVersion;
typedef struct SUserAuthVersion {
char user[TSDB_USER_LEN];
int32_t version;
} SUserAuthVersion;
typedef SDbCfgRsp SDbCfgInfo;
typedef SUserIndexRsp SIndexInfo;
@ -219,12 +230,18 @@ int32_t catalogGetExpiredSTables(SCatalog* pCatalog, SSTableMetaVersion **stable
int32_t catalogGetExpiredDBs(SCatalog* pCatalog, SDbVgVersion **dbs, uint32_t *num);
int32_t catalogGetExpiredUsers(SCatalog* pCtg, SUserAuthVersion **users, uint32_t *num);
int32_t catalogGetDBCfg(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* dbFName, SDbCfgInfo* pDbCfg);
int32_t catalogGetIndexInfo(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* indexName, SIndexInfo* pInfo);
int32_t catalogGetUdfInfo(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* funcName, SFuncInfo** pInfo);
int32_t catalogChkAuth(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* user, const char* dbFName, AUTH_TYPE type, bool *pass);
int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth);
/**
* Destroy catalog and relase all resources

View File

@ -29,7 +29,11 @@ extern "C" {
#endif
#define UDF_LISTEN_PIPE_NAME_LEN 32
#define UDF_LISTEN_PIPE_NAME_PREFIX "udfd.sock."
#ifdef _WIN32
#define UDF_LISTEN_PIPE_NAME_PREFIX "\\\\?\\pipe\\udfd.sock"
#else
#define UDF_LISTEN_PIPE_NAME_PREFIX ".udfd.sock."
#endif
#define UDF_DNODE_ID_ENV_NAME "DNODE_ID"
//======================================================================================
@ -129,8 +133,8 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock);
// begin API to UDF writer.
// dynamic lib init and destroy
typedef int32_t (*TUdfSetupFunc)();
typedef int32_t (*TUdfTeardownFunc)();
typedef int32_t (*TUdfInitFunc)();
typedef int32_t (*TUdfDestroyFunc)();
//TODO: add API to check function arguments type, number etc.
@ -242,7 +246,6 @@ static FORCE_INLINE int32_t udfColSetRow(SUdfColumn* pColumn, uint32_t currentRo
return 0;
}
typedef int32_t (*TUdfFreeUdfColumnFunc)(SUdfColumn* column);
typedef int32_t (*TUdfScalarProcFunc)(SUdfDataBlock* block, SUdfColumn *resultCol);
typedef int32_t (*TUdfAggStartFunc)(SUdfInterBuf *buf);

View File

@ -91,6 +91,8 @@ int32_t winDurFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
#ifdef __cplusplus
}
#endif

View File

@ -68,6 +68,7 @@ typedef int (*RpcAfp)(void *parent, char *tableId, char *spi, char *encrypt, cha
typedef bool (*RpcRfp)(int32_t code);
typedef struct SRpcInit {
char localFqdn[TSDB_FQDN_LEN];
uint16_t localPort; // local port
char * label; // for debug purpose
int numOfThreads; // number of threads to handle connections

View File

@ -161,6 +161,7 @@ int taosCreateSocketWithTimeOutOpt(uint32_t conn_timeout_sec);
TdSocketPtr taosOpenUdpSocket(uint32_t localIp, uint16_t localPort);
TdSocketPtr taosOpenTcpClientSocket(uint32_t ip, uint16_t port, uint32_t localIp);
bool taosValidIpAndPort(uint32_t ip, uint16_t port);
TdSocketServerPtr taosOpenTcpServerSocket(uint32_t ip, uint16_t port);
int32_t taosKeepTcpAlive(TdSocketPtr pSocket);
TdSocketPtr taosAcceptTcpConnectSocket(TdSocketServerPtr pServerSocket, struct sockaddr *destAddr, int *addrLen);

View File

@ -621,6 +621,14 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_TOPIC_QUERY TAOS_DEF_ERROR_CODE(0, 0x2639)
#define TSDB_CODE_PAR_INVALID_DROP_STABLE TAOS_DEF_ERROR_CODE(0, 0x263A)
#define TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE TAOS_DEF_ERROR_CODE(0, 0x263B)
#define TSDB_CODE_PAR_DUPLICATED_COLUMN TAOS_DEF_ERROR_CODE(0, 0x263C)
#define TSDB_CODE_PAR_INVALID_TAGS_LENGTH TAOS_DEF_ERROR_CODE(0, 0x263D)
#define TSDB_CODE_PAR_INVALID_ROW_LENGTH TAOS_DEF_ERROR_CODE(0, 0x263E)
#define TSDB_CODE_PAR_INVALID_COLUMNS_NUM TAOS_DEF_ERROR_CODE(0, 0x263F)
#define TSDB_CODE_PAR_TOO_MANY_COLUMNS TAOS_DEF_ERROR_CODE(0, 0x2640)
#define TSDB_CODE_PAR_INVALID_FIRST_COLUMN TAOS_DEF_ERROR_CODE(0, 0x2641)
#define TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN TAOS_DEF_ERROR_CODE(0, 0x2642)
#define TSDB_CODE_PAR_INVALID_TAGS_NUM TAOS_DEF_ERROR_CODE(0, 0x2643)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)

View File

@ -28,6 +28,27 @@ static int32_t hbMqHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq
static int32_t hbMqHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { return 0; }
static int32_t hbProcessUserAuthInfoRsp(void *value, int32_t valueLen, struct SCatalog *pCatalog) {
int32_t code = 0;
SUserAuthBatchRsp batchRsp = {0};
if (tDeserializeSUserAuthBatchRsp(value, valueLen, &batchRsp) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
return -1;
}
int32_t numOfBatchs = taosArrayGetSize(batchRsp.pArray);
for (int32_t i = 0; i < numOfBatchs; ++i) {
SGetUserAuthRsp *rsp = taosArrayGet(batchRsp.pArray, i);
tscDebug("hb user auth rsp, user:%s, version:%d", rsp->user, rsp->version);
catalogUpdateUserAuthInfo(pCatalog, rsp);
}
tFreeSUserAuthBatchRsp(&batchRsp);
return TSDB_CODE_SUCCESS;
}
static int32_t hbProcessDBInfoRsp(void *value, int32_t valueLen, struct SCatalog *pCatalog) {
int32_t code = 0;
@ -148,6 +169,24 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
for (int32_t i = 0; i < kvNum; ++i) {
SKv *kv = taosArrayGet(pRsp->info, i);
switch (kv->key) {
case HEARTBEAT_KEY_USER_AUTHINFO: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid hb user auth info, len:%d, value:%p", kv->valueLen, kv->value);
break;
}
int64_t *clusterId = (int64_t *)info->param;
struct SCatalog *pCatalog = NULL;
int32_t code = catalogGetHandle(*clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", *clusterId, tstrerror(code));
break;
}
hbProcessUserAuthInfoRsp(kv->value, kv->valueLen, pCatalog);
break;
}
case HEARTBEAT_KEY_DBINFO: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid hb db info, len:%d, value:%p", kv->valueLen, kv->value);
@ -327,6 +366,39 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
return TSDB_CODE_SUCCESS;
}
int32_t hbGetExpiredUserInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SClientHbReq *req) {
SUserAuthVersion *users = NULL;
uint32_t userNum = 0;
int32_t code = 0;
code = catalogGetExpiredUsers(pCatalog, &users, &userNum);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
if (userNum <= 0) {
return TSDB_CODE_SUCCESS;
}
for (int32_t i = 0; i < userNum; ++i) {
SUserAuthVersion *user = &users[i];
user->version = htonl(user->version);
}
SKv kv = {
.key = HEARTBEAT_KEY_USER_AUTHINFO,
.valueLen = sizeof(SUserAuthVersion) * userNum,
.value = users,
};
tscDebug("hb got %d expired users, valueLen:%d", userNum, kv.valueLen);
taosHashPut(req->info, &kv.key, sizeof(kv.key), &kv, sizeof(kv));
return TSDB_CODE_SUCCESS;
}
int32_t hbGetExpiredDBInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SClientHbReq *req) {
SDbVgVersion *dbs = NULL;
uint32_t dbNum = 0;
@ -407,6 +479,11 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
hbGetQueryBasicInfo(connKey, req);
code = hbGetExpiredUserInfo(connKey, pCatalog, req);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
code = hbGetExpiredDBInfo(connKey, pCatalog, req);
if (TSDB_CODE_SUCCESS != code) {
return code;

View File

@ -587,15 +587,34 @@ TEST(testCase, projection_query_tables) {
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tu using st1 tags(1)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
for(int32_t i = 0; i < 10000; ++i) {
char sql[512] = {0};
sprintf(sql, "insert into tu values(now+%da, %d)", i, i);
pRes = taos_query(pConn, "create table tu2 using st2 tags(1)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
for(int32_t i = 0; i < 10000000; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)",
i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7, i + 7,
i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14, i + 14,
i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19);
TAOS_RES* p = taos_query(pConn, sql);
if (taos_errno(p) != 0) {
printf("failed to insert data, reason:%s\n", taos_errstr(p));
@ -604,24 +623,44 @@ TEST(testCase, projection_query_tables) {
taos_free_result(p);
}
pRes = taos_query(pConn, "select * from tu");
if (taos_errno(pRes) != 0) {
printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
taos_free_result(pRes);
ASSERT_TRUE(false);
printf("start to insert next table\n");
for(int32_t i = 0; i < 10000000; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu2 values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)",
i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7, i + 7,
i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14, i + 14,
i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19);
TAOS_RES* p = taos_query(pConn, sql);
if (taos_errno(p) != 0) {
printf("failed to insert data, reason:%s\n", taos_errstr(p));
}
taos_free_result(p);
}
TAOS_ROW pRow = NULL;
TAOS_FIELD* pFields = taos_fetch_fields(pRes);
int32_t numOfFields = taos_num_fields(pRes);
// pRes = taos_query(pConn, "select * from tu");
// if (taos_errno(pRes) != 0) {
// printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
// taos_free_result(pRes);
// ASSERT_TRUE(false);
// }
char str[512] = {0};
while ((pRow = taos_fetch_row(pRes)) != NULL) {
int32_t code = taos_print_row(str, pRow, pFields, numOfFields);
printf("%s\n", str);
}
// TAOS_ROW pRow = NULL;
// TAOS_FIELD* pFields = taos_fetch_fields(pRes);
// int32_t numOfFields = taos_num_fields(pRes);
//
// char str[512] = {0};
// while ((pRow = taos_fetch_row(pRes)) != NULL) {
// int32_t code = taos_print_row(str, pRow, pFields, numOfFields);
// printf("%s\n", str);
// }
taos_free_result(pRes);
// taos_free_result(pRes);
taos_close(pConn);
}
@ -659,7 +698,7 @@ TEST(testCase, agg_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
TAOS_RES* pRes = taos_query(pConn, "use db");
TAOS_RES* pRes = taos_query(pConn, "use abc1");
if (taos_errno(pRes) != 0) {
printf("failed to use db, reason:%s\n", taos_errstr(pRes));
taos_free_result(pRes);

View File

@ -363,9 +363,9 @@ int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc, SArray* pInd
for (int32_t i = 0; i < pDest->info.numOfCols; ++i) {
int32_t mapIndex = i;
if (pIndexMap) {
mapIndex = *(int32_t*)taosArrayGet(pIndexMap, i);
}
// if (pIndexMap) {
// mapIndex = *(int32_t*)taosArrayGet(pIndexMap, i);
// }
SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i);
SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, mapIndex);
@ -493,12 +493,12 @@ SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int3
for (int32_t j = startIndex; j < (startIndex + rowCount); ++j) {
bool isNull = false;
if (pBlock->pBlockAgg == NULL) {
isNull = colDataIsNull(pColData, pBlock->info.rows, j, NULL);
isNull = colDataIsNull_s(pColData, pBlock->info.rows);
} else {
isNull = colDataIsNull(pColData, pBlock->info.rows, j, pBlock->pBlockAgg[i]);
}
char* p = colDataGetData(pColData, j);
char* p = colDataGetData(pColData, j);
colDataAppend(pDstCol, j - startIndex, p, isNull);
}
}

View File

@ -1164,31 +1164,47 @@ int32_t tDeserializeSGetUserAuthReq(void *buf, int32_t bufLen, SGetUserAuthReq *
return 0;
}
int32_t tSerializeSGetUserAuthRsp(void *buf, int32_t bufLen, SGetUserAuthRsp *pRsp) {
SCoder encoder = {0};
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pRsp->user) < 0) return -1;
if (tEncodeI8(&encoder, pRsp->superAuth) < 0) return -1;
int32_t tSerializeSGetUserAuthRspImpl(SCoder *pEncoder, SGetUserAuthRsp *pRsp) {
if (tEncodeCStr(pEncoder, pRsp->user) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->superAuth) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->version) < 0) return -1;
int32_t numOfCreatedDbs = taosHashGetSize(pRsp->createdDbs);
int32_t numOfReadDbs = taosHashGetSize(pRsp->readDbs);
int32_t numOfWriteDbs = taosHashGetSize(pRsp->writeDbs);
if (tEncodeI32(&encoder, numOfReadDbs) < 0) return -1;
if (tEncodeI32(&encoder, numOfWriteDbs) < 0) return -1;
if (tEncodeI32(pEncoder, numOfCreatedDbs) < 0) return -1;
if (tEncodeI32(pEncoder, numOfReadDbs) < 0) return -1;
if (tEncodeI32(pEncoder, numOfWriteDbs) < 0) return -1;
char *db = taosHashIterate(pRsp->readDbs, NULL);
char *db = taosHashIterate(pRsp->createdDbs, NULL);
while (db != NULL) {
if (tEncodeCStr(&encoder, db) < 0) return -1;
if (tEncodeCStr(pEncoder, db) < 0) return -1;
db = taosHashIterate(pRsp->createdDbs, db);
}
db = taosHashIterate(pRsp->readDbs, NULL);
while (db != NULL) {
if (tEncodeCStr(pEncoder, db) < 0) return -1;
db = taosHashIterate(pRsp->readDbs, db);
}
db = taosHashIterate(pRsp->writeDbs, NULL);
while (db != NULL) {
if (tEncodeCStr(&encoder, db) < 0) return -1;
if (tEncodeCStr(pEncoder, db) < 0) return -1;
db = taosHashIterate(pRsp->writeDbs, db);
}
return 0;
}
int32_t tSerializeSGetUserAuthRsp(void *buf, int32_t bufLen, SGetUserAuthRsp *pRsp) {
SCoder encoder = {0};
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);
if (tStartEncode(&encoder) < 0) return -1;
if (tSerializeSGetUserAuthRspImpl(&encoder, pRsp) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -1196,39 +1212,58 @@ int32_t tSerializeSGetUserAuthRsp(void *buf, int32_t bufLen, SGetUserAuthRsp *pR
return tlen;
}
int32_t tDeserializeSGetUserAuthRsp(void *buf, int32_t bufLen, SGetUserAuthRsp *pRsp) {
pRsp->readDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
pRsp->writeDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
int32_t tDeserializeSGetUserAuthRspImpl(SCoder *pDecoder, SGetUserAuthRsp *pRsp) {
pRsp->createdDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
pRsp->readDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
pRsp->writeDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
if (pRsp->readDbs == NULL || pRsp->writeDbs == NULL) {
return -1;
}
SCoder decoder = {0};
tCoderInit(&decoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_DECODER);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pRsp->user) < 0) return -1;
if (tDecodeI8(&decoder, &pRsp->superAuth) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pRsp->user) < 0) return -1;
if (tDecodeI8(pDecoder, &pRsp->superAuth) < 0) return -1;
if (tDecodeI32(pDecoder, &pRsp->version) < 0) return -1;
int32_t numOfCreatedDbs = 0;
int32_t numOfReadDbs = 0;
int32_t numOfWriteDbs = 0;
if (tDecodeI32(&decoder, &numOfReadDbs) < 0) return -1;
if (tDecodeI32(&decoder, &numOfWriteDbs) < 0) return -1;
if (tDecodeI32(pDecoder, &numOfCreatedDbs) < 0) return -1;
if (tDecodeI32(pDecoder, &numOfReadDbs) < 0) return -1;
if (tDecodeI32(pDecoder, &numOfWriteDbs) < 0) return -1;
for (int32_t i = 0; i < numOfCreatedDbs; ++i) {
char db[TSDB_DB_FNAME_LEN] = {0};
if (tDecodeCStrTo(pDecoder, db) < 0) return -1;
int32_t len = strlen(db) + 1;
taosHashPut(pRsp->createdDbs, db, len, db, len);
}
for (int32_t i = 0; i < numOfReadDbs; ++i) {
char db[TSDB_DB_FNAME_LEN] = {0};
if (tDecodeCStrTo(&decoder, db) < 0) return -1;
if (tDecodeCStrTo(pDecoder, db) < 0) return -1;
int32_t len = strlen(db) + 1;
taosHashPut(pRsp->readDbs, db, len, db, len);
}
for (int32_t i = 0; i < numOfWriteDbs; ++i) {
char db[TSDB_DB_FNAME_LEN] = {0};
if (tDecodeCStrTo(&decoder, db) < 0) return -1;
if (tDecodeCStrTo(pDecoder, db) < 0) return -1;
int32_t len = strlen(db) + 1;
taosHashPut(pRsp->writeDbs, db, len, db, len);
}
return 0;
}
int32_t tDeserializeSGetUserAuthRsp(void *buf, int32_t bufLen, SGetUserAuthRsp *pRsp) {
SCoder decoder = {0};
tCoderInit(&decoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_DECODER);
if (tStartDecode(&decoder) < 0) return -1;
if (tDeserializeSGetUserAuthRspImpl(&decoder, pRsp) < 0) return -1;
tEndDecode(&decoder);
tCoderClear(&decoder);
@ -1236,6 +1271,7 @@ int32_t tDeserializeSGetUserAuthRsp(void *buf, int32_t bufLen, SGetUserAuthRsp *
}
void tFreeSGetUserAuthRsp(SGetUserAuthRsp *pRsp) {
taosHashCleanup(pRsp->createdDbs);
taosHashCleanup(pRsp->readDbs);
taosHashCleanup(pRsp->writeDbs);
}
@ -2055,6 +2091,62 @@ void tFreeSUseDbBatchRsp(SUseDbBatchRsp *pRsp) {
taosArrayDestroy(pRsp->pArray);
}
int32_t tSerializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchRsp* pRsp){
SCoder encoder = {0};
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);
if (tStartEncode(&encoder) < 0) return -1;
int32_t numOfBatch = taosArrayGetSize(pRsp->pArray);
if (tEncodeI32(&encoder, numOfBatch) < 0) return -1;
for (int32_t i = 0; i < numOfBatch; ++i) {
SGetUserAuthRsp *pUserAuthRsp = taosArrayGet(pRsp->pArray, i);
if (tSerializeSGetUserAuthRspImpl(&encoder, pUserAuthRsp) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tCoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchRsp* pRsp){
SCoder decoder = {0};
tCoderInit(&decoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_DECODER);
if (tStartDecode(&decoder) < 0) return -1;
int32_t numOfBatch = taosArrayGetSize(pRsp->pArray);
if (tDecodeI32(&decoder, &numOfBatch) < 0) return -1;
pRsp->pArray = taosArrayInit(numOfBatch, sizeof(SGetUserAuthRsp));
if (pRsp->pArray == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
for (int32_t i = 0; i < numOfBatch; ++i) {
SGetUserAuthRsp rsp = {0};
if (tDeserializeSGetUserAuthRspImpl(&decoder, &rsp) < 0) return -1;
taosArrayPush(pRsp->pArray, &rsp);
}
tEndDecode(&decoder);
tCoderClear(&decoder);
return 0;
}
void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp){
int32_t numOfBatch = taosArrayGetSize(pRsp->pArray);
for (int32_t i = 0; i < numOfBatch; ++i) {
SGetUserAuthRsp *pUserAuthRsp = taosArrayGet(pRsp->pArray, i);
tFreeSGetUserAuthRsp(pUserAuthRsp);
}
taosArrayDestroy(pRsp->pArray);
}
int32_t tSerializeSDbCfgReq(void *buf, int32_t bufLen, SDbCfgReq *pReq) {
SCoder encoder = {0};
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);

View File

@ -16,8 +16,8 @@
#define _DEFAULT_SOURCE
#include "dmImp.h"
#define INTERNAL_USER "_dnd"
#define INTERNAL_CKEY "_key"
#define INTERNAL_USER "_dnd"
#define INTERNAL_CKEY "_key"
#define INTERNAL_SECRET "_pwd"
static void dmGetMnodeEpSet(SDnode *pDnode, SEpSet *pEpSet) {
@ -130,10 +130,10 @@ _OVER:
}
static void dmProcessMsg(SDnode *pDnode, SRpcMsg *pMsg, SEpSet *pEpSet) {
SDnodeTrans *pTrans = &pDnode->trans;
SDnodeTrans * pTrans = &pDnode->trans;
tmsg_t msgType = pMsg->msgType;
bool isReq = msgType & 1u;
SMsgHandle *pHandle = &pTrans->msgHandles[TMSG_INDEX(msgType)];
SMsgHandle * pHandle = &pTrans->msgHandles[TMSG_INDEX(msgType)];
SMgmtWrapper *pWrapper = pHandle->pNdWrapper;
if (msgType == TDMT_DND_SERVER_STATUS) {
@ -517,7 +517,7 @@ static inline int32_t dmRetrieveUserAuthInfo(SDnode *pDnode, char *user, char *s
SAuthReq authReq = {0};
tstrncpy(authReq.user, user, TSDB_USER_LEN);
int32_t contLen = tSerializeSAuthReq(NULL, 0, &authReq);
void *pReq = rpcMallocCont(contLen);
void * pReq = rpcMallocCont(contLen);
tSerializeSAuthReq(pReq, contLen, &authReq);
SRpcMsg rpcMsg = {.pCont = pReq, .contLen = contLen, .msgType = TDMT_MND_AUTH, .ahandle = (void *)9528};
@ -547,6 +547,8 @@ static int32_t dmInitServer(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
SRpcInit rpcInit = {0};
strncpy(rpcInit.localFqdn, pDnode->data.localFqdn, strlen(pDnode->data.localFqdn));
rpcInit.localPort = pDnode->data.serverPort;
rpcInit.label = "DND";
rpcInit.numOfThreads = tsNumOfRpcThreads;

View File

@ -255,6 +255,7 @@ typedef struct {
int64_t updateTime;
int8_t superUser;
int32_t acctId;
int32_t authVersion;
SHashObj* readDbs;
SHashObj* writeDbs;
SRWLatch lock;

View File

@ -29,6 +29,7 @@ void mndReleaseUser(SMnode *pMnode, SUserObj *pUser);
// for trans test
SSdbRaw *mndUserActionEncode(SUserObj *pUser);
int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp, int32_t *pRspLen);
#ifdef __cplusplus
}

View File

@ -314,7 +314,7 @@ static int32_t mndProcessCreateFuncReq(SNodeMsg *pReq) {
goto _OVER;
}
if (createReq.bufSize <= 0 || createReq.bufSize > TSDB_FUNC_BUF_SIZE) {
if (createReq.bufSize < 0 || createReq.bufSize > TSDB_FUNC_BUF_SIZE) {
terrno = TSDB_CODE_MND_INVALID_FUNC_BUFSIZE;
goto _OVER;
}

View File

@ -403,6 +403,16 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb
SKv *kv = pIter;
switch (kv->key) {
case HEARTBEAT_KEY_USER_AUTHINFO: {
void * rspMsg = NULL;
int32_t rspLen = 0;
mndValidateUserAuthInfo(pMnode, kv->value, kv->valueLen / sizeof(SUserAuthVersion), &rspMsg, &rspLen);
if (rspMsg && rspLen > 0) {
SKv kv1 = {.key = HEARTBEAT_KEY_USER_AUTHINFO, .valueLen = rspLen, .value = rspMsg};
taosArrayPush(hbRsp.info, &kv1);
}
break;
}
case HEARTBEAT_KEY_DBINFO: {
void * rspMsg = NULL;
int32_t rspLen = 0;

View File

@ -457,13 +457,16 @@ static int32_t mndProcessAlterUserReq(SNodeMsg *pReq) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
newUser.authVersion++;
} else if (alterReq.alterType == TSDB_ALTER_USER_REMOVE_READ_DB) {
if (taosHashRemove(newUser.readDbs, alterReq.dbname, len) != 0) {
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
goto _OVER;
}
newUser.authVersion++;
} else if (alterReq.alterType == TSDB_ALTER_USER_CLEAR_READ_DB) {
taosHashClear(newUser.readDbs);
newUser.authVersion++;
} else if (alterReq.alterType == TSDB_ALTER_USER_ADD_WRITE_DB) {
if (pDb == NULL) {
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
@ -473,13 +476,16 @@ static int32_t mndProcessAlterUserReq(SNodeMsg *pReq) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
newUser.authVersion++;
} else if (alterReq.alterType == TSDB_ALTER_USER_REMOVE_WRITE_DB) {
if (taosHashRemove(newUser.writeDbs, alterReq.dbname, len) != 0) {
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
goto _OVER;
}
newUser.authVersion++;
} else if (alterReq.alterType == TSDB_ALTER_USER_CLEAR_WRITE_DB) {
taosHashClear(newUser.writeDbs);
newUser.authVersion++;
} else {
terrno = TSDB_CODE_MND_INVALID_ALTER_OPER;
goto _OVER;
@ -582,6 +588,38 @@ _OVER:
return code;
}
static int32_t mndSetUserAuthRsp(SMnode *pMnode, SUserObj *pUser, SGetUserAuthRsp *pRsp) {
memcpy(pRsp->user, pUser->user, TSDB_USER_LEN);
pRsp->superAuth = pUser->superUser;
pRsp->version = pUser->authVersion;
taosRLockLatch(&pUser->lock);
pRsp->readDbs = mndDupDbHash(pUser->readDbs);
pRsp->writeDbs = mndDupDbHash(pUser->writeDbs);
taosRUnLockLatch(&pUser->lock);
pRsp->createdDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == pRsp->createdDbs) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
SDbObj *pDb = NULL;
pIter = sdbFetch(pSdb, SDB_DB, pIter, (void **)&pDb);
if (pIter == NULL) break;
if (strcmp(pDb->createUser, pUser->user) == 0) {
int32_t len = strlen(pDb->name) + 1;
taosHashPut(pRsp->createdDbs, pDb->name, len, pDb->name, len);
}
sdbRelease(pSdb, pDb);
}
return 0;
}
static int32_t mndProcessGetUserAuthReq(SNodeMsg *pReq) {
SMnode *pMnode = pReq->pNode;
int32_t code = -1;
@ -602,28 +640,9 @@ static int32_t mndProcessGetUserAuthReq(SNodeMsg *pReq) {
goto _OVER;
}
memcpy(authRsp.user, pUser->user, TSDB_USER_LEN);
authRsp.superAuth = pUser->superUser;
taosRLockLatch(&pUser->lock);
authRsp.readDbs = mndDupDbHash(pUser->readDbs);
authRsp.writeDbs = mndDupDbHash(pUser->writeDbs);
taosRUnLockLatch(&pUser->lock);
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
SDbObj *pDb = NULL;
pIter = sdbFetch(pSdb, SDB_DB, pIter, (void **)&pDb);
if (pIter == NULL) break;
if (strcmp(pDb->createUser, pUser->user) == 0) {
int32_t len = strlen(pDb->name) + 1;
taosHashPut(authRsp.readDbs, pDb->name, len, pDb->name, len);
taosHashPut(authRsp.writeDbs, pDb->name, len, pDb->name, len);
}
sdbRelease(pSdb, pDb);
code = mndSetUserAuthRsp(pMnode, pUser, &authRsp);
if (code) {
goto _OVER;
}
int32_t contLen = tSerializeSGetUserAuthRsp(NULL, 0, &authRsp);
@ -640,6 +659,7 @@ static int32_t mndProcessGetUserAuthReq(SNodeMsg *pReq) {
code = 0;
_OVER:
mndReleaseUser(pMnode, pUser);
tFreeSGetUserAuthRsp(&authRsp);
@ -690,3 +710,72 @@ static void mndCancelGetNextUser(SMnode *pMnode, void *pIter) {
SSdb *pSdb = pMnode->pSdb;
sdbCancelFetch(pSdb, pIter);
}
int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp, int32_t *pRspLen) {
SUserAuthBatchRsp batchRsp = {0};
batchRsp.pArray = taosArrayInit(numOfUses, sizeof(SGetUserAuthRsp));
if (batchRsp.pArray == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
int32_t code = 0;
for (int32_t i = 0; i < numOfUses; ++i) {
SUserObj *pUser = mndAcquireUser(pMnode, pUsers[i].user);
if (pUser == NULL) {
mError("user:%s, failed to auth user since %s", pUsers[i].user, terrstr());
continue;
}
if (pUser->authVersion <= pUsers[i].version) {
mndReleaseUser(pMnode, pUser);
continue;
}
SGetUserAuthRsp rsp = {0};
code = mndSetUserAuthRsp(pMnode, pUser, &rsp);
if (code) {
mndReleaseUser(pMnode, pUser);
tFreeSGetUserAuthRsp(&rsp);
goto _OVER;
}
taosArrayPush(batchRsp.pArray, &rsp);
mndReleaseUser(pMnode, pUser);
}
if (taosArrayGetSize(batchRsp.pArray) <= 0) {
*ppRsp = NULL;
*pRspLen = 0;
tFreeSUserAuthBatchRsp(&batchRsp);
return 0;
}
int32_t rspLen = tSerializeSUserAuthBatchRsp(NULL, 0, &batchRsp);
void *pRsp = taosMemoryMalloc(rspLen);
if (pRsp == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tFreeSUserAuthBatchRsp(&batchRsp);
return -1;
}
tSerializeSUserAuthBatchRsp(pRsp, rspLen, &batchRsp);
*ppRsp = pRsp;
*pRspLen = rspLen;
tFreeSUserAuthBatchRsp(&batchRsp);
return 0;
_OVER:
*ppRsp = NULL;
*pRspLen = 0;
tFreeSUserAuthBatchRsp(&batchRsp);
return code;
}

View File

@ -24,6 +24,7 @@ class MndTestFunc : public ::testing::Test {
void SetCode(SCreateFuncReq* pReq, const char* pCode, int32_t size);
void SetComment(SCreateFuncReq* pReq, const char* pComment);
void SetBufSize(SCreateFuncReq* pReq, int32_t size);
};
Testbase MndTestFunc::test;
@ -40,6 +41,10 @@ void MndTestFunc::SetComment(SCreateFuncReq* pReq, const char* pComment) {
strcpy(pReq->pComment, pComment);
}
void MndTestFunc::SetBufSize(SCreateFuncReq* pReq, int32_t size) {
pReq->bufSize = size;
}
TEST_F(MndTestFunc, 01_Show_Func) {
test.SendShowReq(TSDB_MGMT_TABLE_FUNC, "user_functions", "");
EXPECT_EQ(test.GetShowRows(), 0);
@ -96,6 +101,7 @@ TEST_F(MndTestFunc, 02_Create_Func) {
strcpy(createReq.name, "f1");
SetCode(&createReq, "code1", 6);
SetComment(&createReq, "comment1");
SetBufSize(&createReq, -1);
int32_t contLen = tSerializeSCreateFuncReq(NULL, 0, &createReq);
void* pReq = rpcMallocCont(contLen);

View File

@ -339,7 +339,7 @@ static int vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq,
goto _exit;
}
rsp.pArray = taosArrayInit(sizeof(cRsp), req.nReqs);
rsp.pArray = taosArrayInit(req.nReqs, sizeof(cRsp));
if (rsp.pArray == NULL) {
rcode = -1;
terrno = TSDB_CODE_OUT_OF_MEMORY;

View File

@ -54,6 +54,7 @@ enum {
CTG_ACT_REMOVE_DB,
CTG_ACT_REMOVE_STB,
CTG_ACT_REMOVE_TBL,
CTG_ACT_UPDATE_USER,
CTG_ACT_MAX
};
@ -95,8 +96,18 @@ typedef struct SCtgRentMgmt {
SCtgRentSlot *slots;
} SCtgRentMgmt;
typedef struct SCtgUserAuth {
int32_t version;
SRWLatch lock;
bool superUser;
SHashObj *createdDbs;
SHashObj *readDbs;
SHashObj *writeDbs;
} SCtgUserAuth;
typedef struct SCatalog {
uint64_t clusterId;
SHashObj *userCache; //key:user, value:SCtgUserAuth
SHashObj *dbCache; //key:dbname, value:SCtgDBCache
SCtgRentMgmt dbRent;
SCtgRentMgmt stbRent;
@ -124,6 +135,8 @@ typedef struct SCtgCacheStat {
uint64_t vgMissNum;
uint64_t tblHitNum;
uint64_t tblMissNum;
uint64_t userHitNum;
uint64_t userMissNum;
} SCtgCacheStat;
typedef struct SCatalogStat {
@ -169,6 +182,11 @@ typedef struct SCtgRemoveTblMsg {
uint64_t dbId;
} SCtgRemoveTblMsg;
typedef struct SCtgUpdateUserMsg {
SCatalog* pCtg;
SGetUserAuthRsp userAuth;
} SCtgUpdateUserMsg;
typedef struct SCtgMetaAction {
int32_t act;
@ -234,6 +252,8 @@ typedef struct SCtgAction {
#define CTG_FLAG_SYS_DB 0x8
#define CTG_FLAG_FORCE_UPDATE 0x10
#define CTG_FLAG_SET(_flag, _v) ((_flag) |= (_v))
#define CTG_FLAG_IS_STB(_flag) ((_flag) & CTG_FLAG_STB)
#define CTG_FLAG_IS_NOT_STB(_flag) ((_flag) & CTG_FLAG_NOT_STB)
#define CTG_FLAG_IS_UNKNOWN_STB(_flag) ((_flag) & CTG_FLAG_UNKNOWN_STB)

View File

@ -24,6 +24,7 @@ int32_t ctgActUpdateTbl(SCtgMetaAction *action);
int32_t ctgActRemoveDB(SCtgMetaAction *action);
int32_t ctgActRemoveStb(SCtgMetaAction *action);
int32_t ctgActRemoveTbl(SCtgMetaAction *action);
int32_t ctgActUpdateUser(SCtgMetaAction *action);
extern SCtgDebug gCTGDebug;
SCatalogMgmt gCtgMgmt = {0};
@ -51,6 +52,11 @@ SCtgAction gCtgAction[CTG_ACT_MAX] = {{
CTG_ACT_REMOVE_TBL,
"remove tbMeta",
ctgActRemoveTbl
},
{
CTG_ACT_UPDATE_USER,
"update user",
ctgActUpdateUser
}
};
@ -357,6 +363,31 @@ _return:
CTG_RET(code);
}
int32_t ctgPushUpdateUserMsgInQueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq) {
int32_t code = 0;
SCtgMetaAction action= {.act = CTG_ACT_UPDATE_USER, .syncReq = syncReq};
SCtgUpdateUserMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateUserMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateUserMsg));
CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
}
msg->pCtg = pCtg;
msg->userAuth = *pAuth;
action.data = msg;
CTG_ERR_JRET(ctgPushAction(pCtg, &action));
return TSDB_CODE_SUCCESS;
_return:
tFreeSGetUserAuthRsp(pAuth);
taosMemoryFreeClear(msg);
CTG_RET(code);
}
int32_t ctgAcquireVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) {
CTG_LOCK(CTG_READ, &dbCache->vgLock);
@ -687,6 +718,43 @@ int32_t ctgGetUdfInfoFromMnode(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEp
return TSDB_CODE_SUCCESS;
}
int32_t ctgGetUserDbAuthFromMnode(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char *user, SGetUserAuthRsp *authRsp) {
char *msg = NULL;
int32_t msgLen = 0;
ctgDebug("try to get user auth from mnode, user:%s", user);
int32_t code = queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_USER_AUTH)]((void *)user, &msg, 0, &msgLen);
if (code) {
ctgError("Build get user auth msg failed, code:%x, db:%s", code, user);
CTG_ERR_RET(code);
}
SRpcMsg rpcMsg = {
.msgType = TDMT_MND_GET_USER_AUTH,
.pCont = msg,
.contLen = msgLen,
};
SRpcMsg rpcRsp = {0};
rpcSendRecv(pRpc, (SEpSet*)pMgmtEps, &rpcMsg, &rpcRsp);
if (TSDB_CODE_SUCCESS != rpcRsp.code) {
ctgError("error rsp for get user auth, error:%s, user:%s", tstrerror(rpcRsp.code), user);
CTG_ERR_RET(rpcRsp.code);
}
code = queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_USER_AUTH)](authRsp, rpcRsp.pCont, rpcRsp.contLen);
if (code) {
ctgError("Process get user auth rsp failed, code:%x, user:%s", code, user);
CTG_ERR_RET(code);
}
ctgDebug("Got user auth from mnode, user:%s", user);
return TSDB_CODE_SUCCESS;
}
int32_t ctgIsTableMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32_t *exist) {
if (NULL == pCtg->dbCache) {
@ -859,6 +927,55 @@ int32_t ctgGetTableTypeFromCache(SCatalog* pCtg, const char* dbFName, const char
return TSDB_CODE_SUCCESS;
}
int32_t ctgChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type, bool *inCache, bool *pass) {
if (NULL == pCtg->userCache) {
ctgDebug("empty user auth cache, user:%s", user);
goto _return;
}
SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, user, strlen(user));
if (NULL == pUser) {
ctgDebug("user not in cache, user:%s", user);
goto _return;
}
*inCache = true;
ctgDebug("Got user from cache, user:%s", user);
CTG_CACHE_STAT_ADD(userHitNum, 1);
if (pUser->superUser) {
*pass = true;
return TSDB_CODE_SUCCESS;
}
CTG_LOCK(CTG_READ, &pUser->lock);
if (pUser->createdDbs && taosHashGet(pUser->createdDbs, dbFName, strlen(dbFName))) {
*pass = true;
CTG_UNLOCK(CTG_READ, &pUser->lock);
return TSDB_CODE_SUCCESS;
}
if (pUser->readDbs && taosHashGet(pUser->readDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_READ) {
*pass = true;
}
if (pUser->writeDbs && taosHashGet(pUser->writeDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_WRITE) {
*pass = true;
}
CTG_UNLOCK(CTG_READ, &pUser->lock);
return TSDB_CODE_SUCCESS;
_return:
*inCache = false;
CTG_CACHE_STAT_ADD(userMissNum, 1);
return TSDB_CODE_SUCCESS;
}
int32_t ctgGetTableMetaFromMnodeImpl(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, char *dbFName, char* tbName, STableMetaOutput* output) {
SBuildTableMetaInput bInput = {.vgId = 0, .dbFName = dbFName, .tbName = tbName};
char *msg = NULL;
@ -1952,6 +2069,45 @@ _return:
CTG_RET(code);
}
int32_t ctgChkAuth(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* user, const char* dbFName, AUTH_TYPE type, bool *pass) {
bool inCache = false;
int32_t code = 0;
*pass = false;
CTG_ERR_RET(ctgChkAuthFromCache(pCtg, user, dbFName, type, &inCache, pass));
if (inCache) {
return TSDB_CODE_SUCCESS;
}
SGetUserAuthRsp authRsp = {0};
CTG_ERR_RET(ctgGetUserDbAuthFromMnode(pCtg, pRpc, pMgmtEps, user, &authRsp));
if (authRsp.superAuth) {
*pass = true;
goto _return;
}
if (authRsp.createdDbs && taosHashGet(authRsp.createdDbs, dbFName, strlen(dbFName))) {
*pass = true;
goto _return;
}
if (authRsp.readDbs && taosHashGet(authRsp.readDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_READ) {
*pass = true;
}
if (authRsp.writeDbs && taosHashGet(authRsp.writeDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_WRITE) {
*pass = true;
}
_return:
ctgPushUpdateUserMsgInQueue(pCtg, &authRsp, false);
return TSDB_CODE_SUCCESS;
}
int32_t ctgActUpdateVg(SCtgMetaAction *action) {
@ -2121,6 +2277,67 @@ _return:
CTG_RET(code);
}
int32_t ctgActUpdateUser(SCtgMetaAction *action) {
int32_t code = 0;
SCtgUpdateUserMsg *msg = action->data;
SCatalog* pCtg = msg->pCtg;
if (NULL == pCtg->userCache) {
pCtg->userCache = taosHashInit(gCtgMgmt.cfg.maxUserCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);
if (NULL == pCtg->userCache) {
ctgError("taosHashInit %d user cache failed", gCtgMgmt.cfg.maxUserCacheNum);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
}
SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user));
if (NULL == pUser) {
SCtgUserAuth userAuth = {0};
userAuth.version = msg->userAuth.version;
userAuth.superUser = msg->userAuth.superAuth;
userAuth.createdDbs = msg->userAuth.createdDbs;
userAuth.readDbs = msg->userAuth.readDbs;
userAuth.writeDbs = msg->userAuth.writeDbs;
if (taosHashPut(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user), &userAuth, sizeof(userAuth))) {
ctgError("taosHashPut user %s to cache failed", msg->userAuth.user);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
return TSDB_CODE_SUCCESS;
}
pUser->version = msg->userAuth.version;
CTG_LOCK(CTG_WRITE, &pUser->lock);
taosHashCleanup(pUser->createdDbs);
pUser->createdDbs = msg->userAuth.createdDbs;
msg->userAuth.createdDbs = NULL;
taosHashCleanup(pUser->readDbs);
pUser->readDbs = msg->userAuth.readDbs;
msg->userAuth.readDbs = NULL;
taosHashCleanup(pUser->writeDbs);
pUser->writeDbs = msg->userAuth.writeDbs;
msg->userAuth.writeDbs = NULL;
CTG_UNLOCK(CTG_WRITE, &pUser->lock);
_return:
taosHashCleanup(msg->userAuth.createdDbs);
taosHashCleanup(msg->userAuth.readDbs);
taosHashCleanup(msg->userAuth.writeDbs);
taosMemoryFreeClear(msg);
CTG_RET(code);
}
void* ctgUpdateThreadFunc(void* param) {
setThreadName("catalog");
@ -2836,6 +3053,35 @@ int32_t catalogGetExpiredDBs(SCatalog* pCtg, SDbVgVersion **dbs, uint32_t *num)
CTG_API_LEAVE(ctgMetaRentGet(&pCtg->dbRent, (void **)dbs, num, sizeof(SDbVgVersion)));
}
int32_t catalogGetExpiredUsers(SCatalog* pCtg, SUserAuthVersion **users, uint32_t *num) {
CTG_API_ENTER();
if (NULL == pCtg || NULL == users || NULL == num) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
*num = taosHashGetSize(pCtg->userCache);
if (*num > 0) {
*users = taosMemoryCalloc(*num, sizeof(SUserAuthVersion));
if (NULL == *users) {
ctgError("calloc %d userAuthVersion failed", *num);
CTG_API_LEAVE(TSDB_CODE_OUT_OF_MEMORY);
}
}
uint32_t i = 0;
SCtgUserAuth *pAuth = taosHashIterate(pCtg->userCache, NULL);
while (pAuth != NULL) {
void *key = taosHashGetKey(pAuth, NULL);
strncpy((*users)[i].user, key, sizeof((*users)[i].user));
(*users)[i].version = pAuth->version;
pAuth = taosHashIterate(pCtg->userCache, pAuth);
}
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}
int32_t catalogGetDBCfg(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* dbFName, SDbCfgInfo* pDbCfg) {
CTG_API_ENTER();
@ -2880,6 +3126,31 @@ _return:
CTG_API_LEAVE(code);
}
int32_t catalogChkAuth(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* user, const char* dbFName, AUTH_TYPE type, bool *pass) {
CTG_API_ENTER();
if (NULL == pCtg || NULL == pRpc || NULL == pMgmtEps || NULL == user || NULL == dbFName || NULL == pass) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
int32_t code = 0;
CTG_ERR_JRET(ctgChkAuth(pCtg, pRpc, pMgmtEps, user, dbFName, type, pass));
_return:
CTG_API_LEAVE(code);
}
int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth) {
CTG_API_ENTER();
if (NULL == pCtg || NULL == pAuth) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
CTG_API_LEAVE(ctgPushUpdateUserMsgInQueue(pCtg, pAuth, false));
}
void catalogDestroy(void) {
qInfo("start to destroy catalog");

View File

@ -87,9 +87,7 @@ typedef struct SResultInfo { // TODO refactor
typedef struct STableQueryInfo {
TSKEY lastKey; // last check ts, todo remove it later
SResultRowPosition pos; // current active time window
// int32_t groupIndex; // group id in table list
// SVariant tag;
// SResultRowInfo resInfo; // result info
} STableQueryInfo;
typedef enum {
@ -363,11 +361,12 @@ typedef struct STableScanInfo {
} STableScanInfo;
typedef struct STagScanInfo {
SColumnInfo *pCols;
SSDataBlock *pRes;
int32_t totalTables;
int32_t curPos;
void *pReader;
SColumnInfo *pCols;
SSDataBlock *pRes;
SArray *pColMatchInfo;
int32_t curPos;
SReadHandle readHandle;
STableGroupInfo *pTableGroups;
} STagScanInfo;
typedef struct SStreamBlockScanInfo {
@ -579,9 +578,8 @@ typedef struct SSortOperatorInfo {
uint32_t sortBufSize; // max buffer size for in-memory sort
SArray* pSortInfo;
SSortHandle* pSortHandle;
SArray* inputSlotMap; // for index map from table scan output
SArray* pColMatchInfo; // for index map from table scan output
int32_t bufPageSize;
// int32_t numOfRowsInRes;
// TODO extact struct
int64_t startTs; // sort start time
@ -646,7 +644,7 @@ void cleanupAggSup(SAggSupporter* pAggSup);
void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity);
SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, SArray* pColMatchInfo);
SSDataBlock* loadNextDataBlock(void* param);
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset);
@ -704,7 +702,7 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo*
SSDataBlock* pResultBlock, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTagScanOperatorInfo(void* pReaderHandle, SExprInfo* pExpr, int32_t numOfOutput, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput, SSDataBlock* pResBlock, SArray* pColMatchInfo, STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo);
#if 0
SOperatorInfo* createTableSeqScanOperatorInfo(void* pTsdbReadHandle, STaskRuntimeEnv* pRuntimeEnv);
@ -717,7 +715,6 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, bool createDummyCol);
void finalizeQueryResult(SqlFunctionCtx* pCtx, int32_t numOfOutput);
void copyTsColoum(SSDataBlock* pRes, SqlFunctionCtx* pCtx, int32_t numOfOutput);
STableQueryInfo* createTableQueryInfo(void* buf, STimeWindow win);

View File

@ -117,18 +117,25 @@ STupleHandle* tsortNextTuple(SSortHandle* pHandle);
/**
*
* @param pHandle
* @param colIndex
* @param colId
* @return
*/
bool tsortIsNullVal(STupleHandle* pVHandle, int32_t colIndex);
bool tsortIsNullVal(STupleHandle* pVHandle, int32_t colId);
/**
*
* @param pHandle
* @param colIndex
* @param colId
* @return
*/
void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex);
void* tsortGetValue(STupleHandle* pVHandle, int32_t colId);
/**
*
* @param pSortHandle
* @return
*/
SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle);
#ifdef __cplusplus
}

View File

@ -3520,7 +3520,7 @@ static SSDataBlock* doSortedMerge(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SSortedMergeOperatorInfo* pInfo = pOperator->info;
if (pOperator->status == OP_RES_TO_RETURN) {
return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity);
return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, NULL);
}
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
@ -4701,7 +4701,7 @@ static SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo);
static SArray* extractColumnInfo(SNodeList* pNodeList);
static SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols);
static SArray* createSortInfo(SNodeList* pNodeList, SNodeList* pNodeListTarget);
static SArray* createSortInfo(SNodeList* pNodeList);
static SArray* createIndexMap(SNodeList* pNodeList);
static SArray* extractPartitionColInfo(SNodeList* pNodeList);
static int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode);
@ -4739,7 +4739,6 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SSDataBlock* pResBlock = createResDataBlock(pScanPhyNode->node.pOutputDataBlockDesc);
SQueryTableDataCond cond = {0};
int32_t code = initQueryTableDataCond(&cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
@ -4783,6 +4782,25 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pHandle, pResBlock, &pScanNode->tableName, pScanNode->node.pConditions, pSysScanPhyNode->mgmtEpSet, colList,
pTaskInfo, pSysScanPhyNode->showRewrite, pSysScanPhyNode->accountId);
return pOperator;
} else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) {
STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*) pPhyNode;
SSDataBlock* pResBlock = createResDataBlock(pScanPhyNode->node.pOutputDataBlockDesc);
int32_t code =
doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, queryId, taskId);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
}
int32_t num = 0;
SExprInfo* pExprInfo = createExprInfo(pScanPhyNode->pScanPseudoCols, NULL, &num);
int32_t numOfOutputCols = 0;
SArray* colList =
extractColMatchInfo(pScanPhyNode->pScanPseudoCols, pScanPhyNode->node.pOutputDataBlockDesc, &numOfOutputCols);
SOperatorInfo* pOperator = createTagScanOperatorInfo(pHandle, pExprInfo, num, pResBlock, colList, pTableGroupInfo, pTaskInfo);
return pOperator;
} else {
ASSERT(0);
}
@ -4852,16 +4870,16 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SSortPhysiNode* pSortPhyNode = (SSortPhysiNode*)pPhyNode;
SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
SArray* info = createSortInfo(pSortPhyNode->pSortKeys, pSortPhyNode->pTargets);
SArray* slotMap = createIndexMap(pSortPhyNode->pTargets);
SArray* info = createSortInfo(pSortPhyNode->pSortKeys);
int32_t numOfCols = 0;
SExprInfo* pExprInfo = NULL;
if (pSortPhyNode->pExprs != NULL) {
pExprInfo = createExprInfo(pSortPhyNode->pExprs, NULL, &numOfCols);
}
SExprInfo* pExprInfo = createExprInfo(pSortPhyNode->pExprs, NULL, &numOfCols);
pOptr = createSortOperatorInfo(ops[0], pResBlock, info, pExprInfo, numOfCols, slotMap, pTaskInfo);
int32_t numOfOutputCols = 0;
SArray* pColList =
extractColMatchInfo(pSortPhyNode->pTargets, pSortPhyNode->node.pOutputDataBlockDesc, &numOfOutputCols);
pOptr = createSortOperatorInfo(ops[0], pResBlock, info, pExprInfo, numOfCols, pColList, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW == type) {
SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode;
@ -5019,7 +5037,7 @@ SArray* extractPartitionColInfo(SNodeList* pNodeList) {
return pList;
}
SArray* createSortInfo(SNodeList* pNodeList, SNodeList* pNodeListTarget) {
SArray* createSortInfo(SNodeList* pNodeList) {
size_t numOfCols = LIST_LENGTH(pNodeList);
SArray* pList = taosArrayInit(numOfCols, sizeof(SBlockOrderInfo));
if (pList == NULL) {
@ -5034,22 +5052,7 @@ SArray* createSortInfo(SNodeList* pNodeList, SNodeList* pNodeListTarget) {
bi.nullFirst = (pSortKey->nullOrder == NULL_ORDER_FIRST);
SColumnNode* pColNode = (SColumnNode*)pSortKey->pExpr;
bool found = false;
for (int32_t j = 0; j < LIST_LENGTH(pNodeListTarget); ++j) {
STargetNode* pTarget = (STargetNode*)nodesListGetNode(pNodeListTarget, j);
SColumnNode* pColNodeT = (SColumnNode*)pTarget->pExpr;
if (pColNode->slotId == pColNodeT->slotId) { // to find slotId in PhysiSort OutputDataBlockDesc
bi.slotId = pTarget->slotId;
found = true;
break;
}
}
if (!found) {
qError("sort slot id does not found");
}
bi.slotId = pColNode->slotId;
taosArrayPush(pList, &bi);
}
@ -5088,7 +5091,7 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
SColMatchInfo c = {0};
c.output = true;
c.colId = pColNode->colId;
c.colId = pColNode->colId;
c.targetSlotId = pNode->slotId;
taosArrayPush(pList, &c);
}
@ -5166,9 +5169,7 @@ tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle*
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
#if 0
return tsdbQueryTables(pHandle->reader, &cond, pTableGroupInfo, queryId, taskId);
#endif
return tsdbQueryTables(pHandle->vnode, &cond, pTableGroupInfo, queryId, taskId);
_error:

View File

@ -13,15 +13,16 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ttime.h"
#include <libs/function/function.h>
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
#include "os.h"
#include "querynodes.h"
#include "systable.h"
#include "tglobal.h"
#include "tname.h"
#include "systable.h"
#include "ttime.h"
#include "tdatablock.h"
#include "tmsg.h"
@ -1159,16 +1160,17 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSDataBlock* pRe
}
static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
#if 0
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
#if 0
int32_t maxNumOfTables = (int32_t)pResultInfo->capacity;
STagScanInfo *pInfo = pOperator->info;
SSDataBlock *pRes = pInfo->pRes;
*newgroup = false;
int32_t count = 0;
SArray* pa = GET_TABLEGROUP(pRuntimeEnv, 0);
@ -1237,55 +1239,54 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
pOperator->status = OP_EXEC_DONE;
//qDebug("QInfo:0x%"PRIx64" create count(tbname) query, res:%d rows:1", GET_TASKID(pRuntimeEnv), count);
} else { // return only the tags|table name etc.
SExprInfo* pExprInfo = &pOperator->pExpr[0]; // todo use the column list instead of exprinfo
#endif
count = 0;
while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) {
int32_t i = pInfo->curPos++;
STagScanInfo* pInfo = pOperator->info;
SExprInfo* pExprInfo = &pOperator->pExpr[0];
SSDataBlock* pRes = pInfo->pRes;
STableQueryInfo* item = taosArrayGetP(pa, i);
SArray* pa = taosArrayGetP(pInfo->pTableGroups->pGroupList, 0);
char *data = NULL, *dst = NULL;
int16_t type = 0, bytes = 0;
for(int32_t j = 0; j < pOperator->numOfExprs; ++j) {
// not assign value in case of user defined constant output column
if (TSDB_COL_IS_UD_COL(pExprInfo[j].base.pColumns->flag)) {
continue;
}
char str[512] = {0};
int32_t count = 0;
SMetaReader mr = {0};
SColumnInfoData* pColInfo = taosArrayGet(pRes->pDataBlock, j);
type = pExprInfo[j].base.resSchema.type;
bytes = pExprInfo[j].base.resSchema.bytes;
while (pInfo->curPos < pInfo->pTableGroups->numOfTables && count < pOperator->resultInfo.capacity) {
STableKeyInfo* item = taosArrayGet(pa, pInfo->curPos);
if (pExprInfo[j].base.pColumns->info.colId == TSDB_TBNAME_COLUMN_INDEX) {
data = tsdbGetTableName(item->pTable);
} else {
data = tsdbGetTableTagVal(item->pTable, pExprInfo[j].base.pColumns->info.colId, type, bytes);
}
for (int32_t j = 0; j < pOperator->numOfExprs; ++j) {
SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId);
dst = pColInfo->pData + count * pExprInfo[j].base.resSchema.bytes;
doSetTagValueToResultBuf(dst, data, type, bytes);
// refactor later
if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) {
metaReaderInit(&mr, pInfo->readHandle.meta, 0);
metaGetTableEntryByUid(&mr, item->uid);
STR_TO_VARSTR(str, mr.me.name);
metaReaderClear(&mr);
colDataAppend(pDst, count, str, false);
// data = tsdbGetTableTagVal(item->pTable, pExprInfo[j].base.pColumns->info.colId, type, bytes);
// dst = pColInfo->pData + count * pExprInfo[j].base.resSchema.bytes;
// doSetTagValueToResultBuf(dst, data, type, bytes);
}
count += 1;
}
if (pInfo->curPos >= pInfo->totalTables) {
if (++pInfo->curPos >= pInfo->pTableGroups->numOfTables) {
pOperator->status = OP_EXEC_DONE;
}
//qDebug("QInfo:0x%"PRIx64" create tag values results completed, rows:%d", GET_TASKID(pRuntimeEnv), count);
}
// qDebug("QInfo:0x%"PRIx64" create tag values results completed, rows:%d", GET_TASKID(pRuntimeEnv), count);
if (pOperator->status == OP_EXEC_DONE) {
setTaskStatus(pOperator->pRuntimeEnv, TASK_COMPLETED);
setTaskStatus(pTaskInfo, TASK_COMPLETED);
}
pRes->info.rows = count;
return (pRes->info.rows == 0)? NULL:pInfo->pRes;
#endif
return TSDB_CODE_SUCCESS;
return (pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
@ -1293,14 +1294,18 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = blockDataDestroy(pInfo->pRes);
}
SOperatorInfo* createTagScanOperatorInfo(void* readHandle, SExprInfo* pExpr, int32_t numOfOutput, SExecTaskInfo* pTaskInfo) {
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, SExprInfo* pExpr, int32_t numOfOutput,
SSDataBlock* pResBlock, SArray* pColMatchInfo, STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo) {
STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
pInfo->pReader = readHandle;
pInfo->pTableGroups = pTableGroupInfo;
pInfo->pColMatchInfo = pColMatchInfo;
pInfo->pRes = pResBlock;
pInfo->readHandle = *pReadHandle;
pInfo->curPos = 0;
pOperator->name = "TagScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN;
@ -1308,9 +1313,12 @@ SOperatorInfo* createTagScanOperatorInfo(void* readHandle, SExprInfo* pExpr, int
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
pOperator->pExpr = pExpr;
pOperator->numOfExprs = numOfOutput;
pOperator->numOfExprs = numOfOutput;
pOperator->pTaskInfo = pTaskInfo;
initResultSizeInfo(pOperator, 4096);
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doTagScan, NULL, NULL, destroyTagScanOperatorInfo, NULL, NULL, NULL);

View File

@ -5,7 +5,7 @@ static SSDataBlock* doSort(SOperatorInfo* pOperator);
static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols,
SArray* pIndexMap, SExecTaskInfo* pTaskInfo) {
SArray* pColMatchColInfo, SExecTaskInfo* pTaskInfo) {
SSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
int32_t rowSize = pResBlock->info.rowSize;
@ -20,17 +20,19 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pR
pInfo->binfo.pRes = pResBlock;
initResultSizeInfo(pOperator, 1024);
pInfo->bufPageSize = rowSize < 1024 ? 1024 * 2 : rowSize * 2; // there are headers, so pageSize = rowSize + header
pInfo->sortBufSize = pInfo->bufPageSize * 16; // TODO dynamic set the available sort buffer
pInfo->pSortInfo = pSortInfo;
pInfo->inputSlotMap = pIndexMap;
pInfo->pColMatchInfo= pColMatchColInfo;
pOperator->name = "SortOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SORT;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
// lazy evaluation for the following parameter since the input datablock is not known till now.
// pInfo->bufPageSize = rowSize < 1024 ? 1024 * 2 : rowSize * 2; // there are headers, so pageSize = rowSize + header
// pInfo->sortBufSize = pInfo->bufPageSize * 16; // TODO dynamic set the available sort buffer
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, NULL);
@ -45,14 +47,12 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pR
return NULL;
}
// TODO merge aggregate super table
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle) {
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, i);
bool isNull = tsortIsNullVal(pTupleHandle, i);
if (isNull) {
colDataAppend(pColInfo, pBlock->info.rows, NULL, true);
colDataAppendNULL(pColInfo, pBlock->info.rows);
} else {
char* pData = tsortGetValue(pTupleHandle, i);
colDataAppend(pColInfo, pBlock->info.rows, pData, false);
@ -62,11 +62,12 @@ void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle) {
pBlock->info.rows += 1;
}
SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity) {
SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, SArray* pColMatchInfo) {
blockDataCleanup(pDataBlock);
blockDataEnsureCapacity(pDataBlock, capacity);
ASSERT(taosArrayGetSize(pColMatchInfo) == pDataBlock->info.numOfCols);
blockDataEnsureCapacity(pDataBlock, capacity);
SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
blockDataEnsureCapacity(p, capacity);
while (1) {
STupleHandle* pTupleHandle = tsortNextTuple(pHandle);
@ -74,12 +75,32 @@ SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, i
break;
}
appendOneRowToDataBlock(pDataBlock, pTupleHandle);
if (pDataBlock->info.rows >= capacity) {
appendOneRowToDataBlock(p, pTupleHandle);
if (p->info.rows >= capacity) {
return pDataBlock;
}
}
if (p->info.rows > 0) {
int32_t numOfCols = taosArrayGetSize(pColMatchInfo);
for(int32_t i = 0; i < numOfCols; ++i) {
SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, i);
for(int32_t j = 0; j < p->info.numOfCols; ++j) {
SColumnInfoData* pSrc = taosArrayGet(p->pDataBlock, j);
if (pSrc->info.colId == pmInfo->colId) {
SColumnInfoData* pDst = taosArrayGet(pDataBlock->pDataBlock, pmInfo->targetSlotId);
colDataAssign(pDst, pSrc, p->info.rows);
break;
}
}
}
pDataBlock->info.rows = p->info.rows;
pDataBlock->info.capacity = p->info.rows;
}
blockDataDestroy(p);
return (pDataBlock->info.rows > 0) ? pDataBlock : NULL;
}
@ -106,16 +127,16 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
SSortOperatorInfo* pInfo = pOperator->info;
if (pOperator->status == OP_RES_TO_RETURN) {
return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity);
return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo);
}
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->inputSlotMap, SORT_SINGLESOURCE_SORT,
pInfo->bufPageSize, numOfBufPage, pInfo->binfo.pRes, pTaskInfo->id.str);
// pInfo->binfo.pRes is not equalled to the input datablock.
// int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_SINGLESOURCE_SORT,
-1, -1, NULL, pTaskInfo->id.str);
tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, applyScalarFunction, pOperator);
SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource));
ps->param = pOperator->pDownstream[0];
tsortAddSource(pInfo->pSortHandle, ps);
@ -127,7 +148,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
}
pOperator->status = OP_RES_TO_RETURN;
return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity);
return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo);
}
void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
@ -135,5 +156,5 @@ void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
taosArrayDestroy(pInfo->pSortInfo);
taosArrayDestroy(pInfo->inputSlotMap);
taosArrayDestroy(pInfo->pColMatchInfo);
}

View File

@ -64,25 +64,8 @@ struct SSortHandle {
static int32_t msortComparFn(const void *pLeft, const void *pRight, void *param);
static SSDataBlock* createDataBlock_rv(SSchema* pSchema, int32_t numOfCols) {
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
pBlock->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData));
pBlock->info.numOfCols = numOfCols;
for(int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData colInfo = {0};
colInfo.info.type = pSchema[i].type;
colInfo.info.bytes = pSchema[i].bytes;
colInfo.info.colId = pSchema[i].colId;
taosArrayPush(pBlock->pDataBlock, &colInfo);
if (IS_VAR_DATA_TYPE(colInfo.info.type)) {
pBlock->info.hasVarCol = true;
}
}
return pBlock;
SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle) {
return createOneDataBlock(pSortHandle->pDataBlock, false);
}
/**
@ -98,7 +81,10 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, SArray* pIndexMap, int32_t
pSortHandle->numOfPages = numOfPages;
pSortHandle->pSortInfo = pSortInfo;
pSortHandle->pIndexMap = pIndexMap;
pSortHandle->pDataBlock = createOneDataBlock(pBlock, false);
if (pBlock != NULL) {
pSortHandle->pDataBlock = createOneDataBlock(pBlock, false);
}
pSortHandle->pOrderedSource = taosArrayInit(4, POINTER_BYTES);
pSortHandle->cmpParam.orderInfo = pSortInfo;
@ -530,6 +516,17 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
if (pHandle->pDataBlock == NULL) {
pHandle->pDataBlock = createOneDataBlock(pBlock, false);
// calculate the buffer pages according to the total available buffers.
int32_t rowSize = blockDataGetRowSize(pBlock);
if (rowSize * 4 > 4096) {
pHandle->pageSize = rowSize * 4;
} else {
pHandle->pageSize = 4096;
}
// todo!!
pHandle->numOfPages = 1024;
sortBufSize = pHandle->numOfPages * pHandle->pageSize;
}
// perform the scalar function calculation before apply the sort
@ -538,7 +535,6 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
}
// todo relocate the columns
int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock, pHandle->pIndexMap);
if (code != 0) {
return code;
@ -689,7 +685,7 @@ STupleHandle* tsortNextTuple(SSortHandle* pHandle) {
bool tsortIsNullVal(STupleHandle* pVHandle, int32_t colIndex) {
SColumnInfoData* pColInfoSrc = taosArrayGet(pVHandle->pBlock->pDataBlock, colIndex);
return colDataIsNull(pColInfoSrc, 0, pVHandle->rowIndex, NULL);
return colDataIsNull_s(pColInfoSrc, pVHandle->rowIndex);
}
void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex) {

View File

@ -73,6 +73,11 @@ bool spreadFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo)
int32_t spreadFunction(SqlFunctionCtx* pCtx);
int32_t spreadFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
bool getHistogramFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool histogramFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t histogramFunction(SqlFunctionCtx* pCtx);
int32_t histogramFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
#ifdef __cplusplus
}
#endif

View File

@ -225,6 +225,26 @@ static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
return TSDB_CODE_SUCCESS;
}
static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (4 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_NUMERIC_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = (SDataType) { .bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE };
return TSDB_CODE_SUCCESS;
}
static int32_t translateLastRow(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
// todo
return TSDB_CODE_SUCCESS;
@ -242,8 +262,7 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l
"The parameters of first/last can only be columns");
}
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType};
pFunc->node.resType = ((SExprNode*)pPara)->resType;
return TSDB_CODE_SUCCESS;
}
@ -600,6 +619,16 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.processFunc = diffFunction,
.finalizeFunc = functionFinalize
},
{
.name = "histogram",
.type = FUNCTION_TYPE_HISTOGRAM,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateHistogram,
.getEnvFunc = getHistogramFuncEnv,
.initFunc = histogramFunctionSetup,
.processFunc = histogramFunction,
.finalizeFunc = histogramFinalize
},
{
.name = "abs",
.type = FUNCTION_TYPE_ABS,
@ -917,7 +946,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.translateFunc = translateTbnameColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = NULL,
.sprocessFunc = qTbnameFunction,
.finalizeFunc = NULL
},
{

View File

@ -20,6 +20,8 @@
#include "tdatablock.h"
#include "tpercentile.h"
#define HISTOGRAM_MAX_BINS_NUM 100
typedef struct SSumRes {
union {
int64_t isum;
@ -89,6 +91,22 @@ typedef struct SSpreadInfo {
double max;
} SSpreadInfo;
typedef struct SHistoFuncBin {
double lower;
double upper;
union {
int64_t count;
double percentage;
};
} SHistoFuncBin;
typedef struct SHistoFuncInfo {
int32_t numOfBins;
bool normalized;
SHistoFuncBin bins[];
} SHistoFuncInfo;
#define SET_VAL(_info, numOfElem, res) \
do { \
if ((numOfElem) <= 0) { \
@ -1777,3 +1795,34 @@ int32_t spreadFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
}
return functionFinalize(pCtx, pBlock);
}
bool getHistogramFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SHistoFuncInfo) + HISTOGRAM_MAX_BINS_NUM * sizeof(SHistoFuncBin);
return true;
}
bool histogramFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) {
if (!functionSetup(pCtx, pResultInfo)) {
return false;
}
SHistoFuncInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
char* binType = pCtx->param[1].param.pz;
char* binDesc = pCtx->param[2].param.pz;
int64_t nornalized = pCtx->param[3].param.i;
return true;
}
int32_t histogramFunction(SqlFunctionCtx *pCtx) {
return TSDB_CODE_SUCCESS;
}
int32_t histogramFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SHistoFuncInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
//if (pInfo->hasResult == true) {
// SET_DOUBLE_VAL(&pInfo->result, pInfo->max - pInfo->min);
//}
return functionFinalize(pCtx, pBlock);
}

View File

@ -594,7 +594,9 @@ int32_t convertScalarParamToDataBlock(SScalarParam *input, int32_t numOfCols, SS
//TODO: free the array output->pDataBlock
output->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData));
taosArrayPush(output->pDataBlock, input->columnData);
for (int32_t i = 0; i < numOfCols; ++i) {
taosArrayPush(output->pDataBlock, (input + i)->columnData);
}
return 0;
}

View File

@ -81,6 +81,9 @@ typedef struct SUdf {
TUdfAggStartFunc aggStartFunc;
TUdfAggProcessFunc aggProcFunc;
TUdfAggFinishFunc aggFinishFunc;
TUdfInitFunc initFunc;
TUdfDestroyFunc destroyFunc;
} SUdf;
// TODO: low priority: change name onxxx to xxxCb, and udfc or udfd as prefix
@ -101,7 +104,19 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
fnError("can not load library %s. error: %s", udf->path, uv_strerror(err));
return UDFC_CODE_LOAD_UDF_FAILURE;
}
//TODO: init and destroy function
char initFuncName[TSDB_FUNC_NAME_LEN+5] = {0};
char *initSuffix = "_init";
strcpy(initFuncName, udfName);
strncat(initFuncName, initSuffix, strlen(initSuffix));
uv_dlsym(&udf->lib, initFuncName, (void**)(&udf->initFunc));
char destroyFuncName[TSDB_FUNC_NAME_LEN+5] = {0};
char *destroySuffix = "_destroy";
strcpy(destroyFuncName, udfName);
strncat(destroyFuncName, destroySuffix, strlen(destroySuffix));
uv_dlsym(&udf->lib, destroyFuncName, (void**)(&udf->destroyFunc));
if (udf->funcType == TSDB_FUNC_TYPE_SCALAR) {
char processFuncName[TSDB_FUNC_NAME_LEN] = {0};
strcpy(processFuncName, udfName);
@ -159,6 +174,9 @@ void udfdProcessRequest(uv_work_t *req) {
if (udf->state == UDF_STATE_INIT) {
udf->state = UDF_STATE_LOADING;
udfdLoadUdf(setup->udfName, udf);
if (udf->initFunc) {
udf->initFunc();
}
udf->state = UDF_STATE_READY;
uv_cond_broadcast(&udf->condReady);
uv_mutex_unlock(&udf->lock);
@ -170,7 +188,6 @@ void udfdProcessRequest(uv_work_t *req) {
}
SUdfcFuncHandle *handle = taosMemoryMalloc(sizeof(SUdfcFuncHandle));
handle->udf = udf;
// TODO: allocate private structure and call init function and set it to handle
SUdfResponse rsp;
rsp.seqNum = request.seqNum;
rsp.type = request.type;
@ -275,10 +292,12 @@ void udfdProcessRequest(uv_work_t *req) {
if (unloadUdf) {
uv_cond_destroy(&udf->condReady);
uv_mutex_destroy(&udf->lock);
if (udf->destroyFunc) {
(udf->destroyFunc)();
}
uv_dlclose(&udf->lib);
taosMemoryFree(udf);
}
// TODO: call destroy and free udf private
taosMemoryFree(handle);
SUdfResponse response;

View File

@ -2174,17 +2174,6 @@ static int32_t checkTableSmaOption(STranslateContext* pCxt, SCreateTableStmt* pS
return TSDB_CODE_SUCCESS;
}
static int32_t checkTableTags(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
SNode* pNode;
FOREACH(pNode, pStmt->pTags) {
SColumnDefNode* pCol = (SColumnDefNode*)pNode;
if (pCol->dataType.type == TSDB_DATA_TYPE_JSON && LIST_LENGTH(pStmt->pTags) > 1) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t checkTableRollupOption(STranslateContext* pCxt, SNodeList* pFuncs) {
if (NULL == pFuncs) {
return TSDB_CODE_SUCCESS;
@ -2196,6 +2185,113 @@ static int32_t checkTableRollupOption(STranslateContext* pCxt, SNodeList* pFuncs
return TSDB_CODE_SUCCESS;
}
static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SNodeList* pTags) {
int32_t ntags = LIST_LENGTH(pTags);
if (0 == ntags) {
return TSDB_CODE_SUCCESS;
} else if (ntags > TSDB_MAX_TAGS) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_NUM);
}
int32_t code = TSDB_CODE_SUCCESS;
int32_t tagsSize = 0;
SNode* pNode = NULL;
FOREACH(pNode, pTags) {
SColumnDefNode* pTag = (SColumnDefNode*)pNode;
int32_t len = strlen(pTag->colName);
if (NULL != taosHashGet(pHash, pTag->colName, len)) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_DUPLICATED_COLUMN);
}
if (TSDB_CODE_SUCCESS == code && pTag->dataType.type == TSDB_DATA_TYPE_JSON && ntags > 1) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
if (TSDB_CODE_SUCCESS == code) {
if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_BINARY_LEN) ||
(TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_NCHAR_LEN)) {
code = code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
}
}
if (TSDB_CODE_SUCCESS == code) {
code = taosHashPut(pHash, pTag->colName, len, &pTag, POINTER_BYTES);
}
if (TSDB_CODE_SUCCESS == code) {
tagsSize += pTag->dataType.bytes;
} else {
break;
}
}
if (TSDB_CODE_SUCCESS == code && tagsSize > TSDB_MAX_TAGS_LEN) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_LENGTH, TSDB_MAX_TAGS_LEN);
}
return code;
}
static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, SNodeList* pCols) {
int32_t ncols = LIST_LENGTH(pCols);
if (ncols < TSDB_MIN_COLUMNS) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM);
} else if (ncols > TSDB_MAX_COLUMNS) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS);
}
int32_t code = TSDB_CODE_SUCCESS;
bool first = true;
int32_t rowSize = 0;
SNode* pNode = NULL;
FOREACH(pNode, pCols) {
SColumnDefNode* pCol = (SColumnDefNode*)pNode;
if (first) {
first = false;
if (TSDB_DATA_TYPE_TIMESTAMP != pCol->dataType.type) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FIRST_COLUMN);
}
}
int32_t len = strlen(pCol->colName);
if (TSDB_CODE_SUCCESS == code && NULL != taosHashGet(pHash, pCol->colName, len)) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_DUPLICATED_COLUMN);
}
if (TSDB_CODE_SUCCESS == code) {
if ((TSDB_DATA_TYPE_VARCHAR == pCol->dataType.type && pCol->dataType.bytes > TSDB_MAX_BINARY_LEN) ||
(TSDB_DATA_TYPE_NCHAR == pCol->dataType.type && pCol->dataType.bytes > TSDB_MAX_NCHAR_LEN)) {
code = code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
}
}
if (TSDB_CODE_SUCCESS == code) {
code = taosHashPut(pHash, pCol->colName, len, &pCol, POINTER_BYTES);
}
if (TSDB_CODE_SUCCESS == code) {
rowSize += pCol->dataType.bytes;
} else {
break;
}
}
if (TSDB_CODE_SUCCESS == code && rowSize > TSDB_MAX_BYTES_PER_ROW) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
}
return code;
}
static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
SHashObj* pHash = taosHashInit(LIST_LENGTH(pStmt->pTags) + LIST_LENGTH(pStmt->pCols),
taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
if (NULL == pHash) {
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t code = checkTableTagsSchema(pCxt, pHash, pStmt->pTags);
if (TSDB_CODE_SUCCESS == code) {
code = checkTableColsSchema(pCxt, pHash, pStmt->pCols);
}
taosHashCleanup(pHash);
return code;
}
static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
int32_t code = checkRangeOption(pCxt, "delay", pStmt->pOptions->delay, TSDB_MIN_ROLLUP_DELAY, TSDB_MAX_ROLLUP_DELAY);
if (TSDB_CODE_SUCCESS == code) {
@ -2211,7 +2307,7 @@ static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt
code = checkTableSmaOption(pCxt, pStmt);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkTableTags(pCxt, pStmt);
code = checkTableSchema(pCxt, pStmt);
}
return code;
}
@ -3838,6 +3934,10 @@ static int32_t buildDropTableVgroupHashmap(STranslateContext* pCxt, SDropTableCl
goto over;
}
if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code && pClause->ignoreNotExists) {
code = TSDB_CODE_SUCCESS;
}
*pIsSuperTable = false;
SVgroupInfo info = {0};

View File

@ -129,7 +129,23 @@ static char* getSyntaxErrFormat(int32_t errCode) {
case TSDB_CODE_PAR_INVALID_DROP_STABLE:
return "Cannot drop super table in batch";
case TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE:
return "start(end) time of query range required or time range too large";
return "Start(end) time of query range required or time range too large";
case TSDB_CODE_PAR_DUPLICATED_COLUMN:
return "Duplicated column names";
case TSDB_CODE_PAR_INVALID_TAGS_LENGTH:
return "Tags length exceeds max length %d";
case TSDB_CODE_PAR_INVALID_ROW_LENGTH:
return "Row length exceeds max length %d";
case TSDB_CODE_PAR_INVALID_COLUMNS_NUM:
return "Illegal number of columns";
case TSDB_CODE_PAR_TOO_MANY_COLUMNS:
return "Too many columns";
case TSDB_CODE_PAR_INVALID_FIRST_COLUMN:
return "First column must be timestamp";
case TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN:
return "Invalid binary/nchar column length";
case TSDB_CODE_PAR_INVALID_TAGS_NUM:
return "Invalid number of tag columns";
case TSDB_CODE_OUT_OF_MEMORY:
return "Out of memory";
default:

View File

@ -277,6 +277,11 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
code = nodesCollectFuncs(pSelect, SQL_CLAUSE_FROM, fmIsScanPseudoColumnFunc, &pScan->pScanPseudoCols);
}
// rewrite the expression in subsequent clauses
if (TSDB_CODE_SUCCESS == code) {
code = rewriteExprForSelect(pScan->pScanPseudoCols, pSelect, SQL_CLAUSE_FROM);
}
pScan->scanType = getScanType(pCxt, pScan->pScanPseudoCols, pScan->pScanCols, pScan->pMeta);
if (TSDB_CODE_SUCCESS == code) {

View File

@ -123,14 +123,40 @@ static SNodeList* osdGetAllFuncs(SLogicNode* pNode) {
return NULL;
}
static bool needOptimizeDataRequire(const SFunctionNode* pFunc) {
if (!fmIsSpecialDataRequiredFunc(pFunc->funcId)) {
return false;
}
SNode* pPara = NULL;
FOREACH(pPara, pFunc->pParameterList) {
if (QUERY_NODE_COLUMN != nodeType(pPara) && QUERY_NODE_VALUE != nodeType(pPara)) {
return false;
}
}
return true;
}
static bool needOptimizeDynamicScan(const SFunctionNode* pFunc) {
if (!fmIsDynamicScanOptimizedFunc(pFunc->funcId)) {
return false;
}
SNode* pPara = NULL;
FOREACH(pPara, pFunc->pParameterList) {
if (QUERY_NODE_COLUMN != nodeType(pPara) && QUERY_NODE_VALUE != nodeType(pPara)) {
return false;
}
}
return true;
}
static int32_t osdGetRelatedFuncs(SScanLogicNode* pScan, SNodeList** pSdrFuncs, SNodeList** pDsoFuncs) {
SNodeList* pAllFuncs = osdGetAllFuncs(pScan->node.pParent);
SNode* pFunc = NULL;
FOREACH(pFunc, pAllFuncs) {
int32_t code = TSDB_CODE_SUCCESS;
if (fmIsSpecialDataRequiredFunc(((SFunctionNode*)pFunc)->funcId)) {
if (needOptimizeDataRequire((SFunctionNode*)pFunc)) {
code = nodesListMakeStrictAppend(pSdrFuncs, nodesCloneNode(pFunc));
} else if (fmIsDynamicScanOptimizedFunc(((SFunctionNode*)pFunc)->funcId)) {
} else if (needOptimizeDynamicScan((SFunctionNode*)pFunc)) {
code = nodesListMakeStrictAppend(pDsoFuncs, nodesCloneNode(pFunc));
}
if (TSDB_CODE_SUCCESS != code) {
@ -541,9 +567,14 @@ static bool cpdIsPrimaryKeyEqualCond(SJoinLogicNode* pJoin, SNode* pCond) {
if (QUERY_NODE_OPERATOR != nodeType(pCond)) {
return false;
}
SNodeList* pLeftCols = ((SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 0))->pTargets;
SNodeList* pRightCols = ((SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1))->pTargets;
SOperatorNode* pOper = (SOperatorNode*)pJoin->pOnConditions;
if (OP_TYPE_EQUAL != pOper->opType) {
return false;
}
SNodeList* pLeftCols = ((SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 0))->pTargets;
SNodeList* pRightCols = ((SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1))->pTargets;
if (cpdIsPrimaryKey(pOper->pLeft, pLeftCols)) {
return cpdIsPrimaryKey(pOper->pRight, pRightCols);
} else if (cpdIsPrimaryKey(pOper->pLeft, pRightCols)) {

View File

@ -20,11 +20,21 @@ using namespace std;
class PlanOptimizeTest : public PlannerTestBase {};
TEST_F(PlanOptimizeTest, optimizeScanData) {
useDb("root", "test");
run("SELECT COUNT(*) FROM t1");
run("SELECT COUNT(c1) FROM t1");
run("SELECT COUNT(CAST(c1 AS BIGINT)) FROM t1");
}
TEST_F(PlanOptimizeTest, orderByPrimaryKey) {
useDb("root", "test");
run("select * from t1 order by ts");
run("select * from t1 order by ts desc");
run("select c1 from t1 order by ts");
run("select c1 from t1 order by ts desc");
run("SELECT * FROM t1 ORDER BY ts");
run("SELECT * FROM t1 ORDER BY ts DESC");
run("SELECT c1 FROM t1 ORDER BY ts");
run("SELECT c1 FROM t1 ORDER BY ts DESC");
}

View File

@ -181,6 +181,25 @@ int32_t queryBuildRetrieveFuncMsg(void *input, char **msg, int32_t msgSize, int3
return TSDB_CODE_SUCCESS;
}
int32_t queryBuildGetUserAuthMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
SGetUserAuthReq req = {0};
strncpy(req.user, input, sizeof(req.user));
int32_t bufLen = tSerializeSGetUserAuthReq(NULL, 0, &req);
void *pBuf = rpcMallocCont(bufLen);
tSerializeSGetUserAuthReq(pBuf, bufLen, &req);
*msg = pBuf;
*msgLen = bufLen;
return TSDB_CODE_SUCCESS;
}
int32_t queryProcessUseDBRsp(void *output, char *msg, int32_t msgSize) {
SUseDbOutput *pOut = output;
SUseDbRsp usedbRsp = {0};
@ -419,6 +438,20 @@ int32_t queryProcessRetrieveFuncRsp(void *output, char *msg, int32_t msgSize) {
return TSDB_CODE_SUCCESS;
}
int32_t queryProcessGetUserAuthRsp(void *output, char *msg, int32_t msgSize) {
if (NULL == output || NULL == msg || msgSize <= 0) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
if (tDeserializeSGetUserAuthRsp(msg, msgSize, (SGetUserAuthRsp *)output) != 0) {
qError("tDeserializeSGetUserAuthRsp failed, msgSize:%d", msgSize);
return TSDB_CODE_INVALID_MSG;
}
return TSDB_CODE_SUCCESS;
}
void initQueryModuleMsgHandle() {
queryBuildMsg[TMSG_INDEX(TDMT_VND_TABLE_META)] = queryBuildTableMetaReqMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_TABLE_META)] = queryBuildTableMetaReqMsg;
@ -427,6 +460,8 @@ void initQueryModuleMsgHandle() {
queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_DB_CFG)] = queryBuildGetDBCfgMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_INDEX)] = queryBuildGetIndexMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_RETRIEVE_FUNC)] = queryBuildRetrieveFuncMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_USER_AUTH)] = queryBuildGetUserAuthMsg;
queryProcessMsgRsp[TMSG_INDEX(TDMT_VND_TABLE_META)] = queryProcessTableMetaRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_TABLE_META)] = queryProcessTableMetaRsp;
@ -435,6 +470,7 @@ void initQueryModuleMsgHandle() {
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_DB_CFG)] = queryProcessGetDbCfgRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_INDEX)] = queryProcessGetIndexRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_RETRIEVE_FUNC)] = queryProcessRetrieveFuncRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_USER_AUTH)] = queryProcessGetUserAuthRsp;
}
#pragma GCC diagnostic pop

View File

@ -1513,3 +1513,9 @@ int32_t winEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
colDataAppendInt64(pOutput->columnData, pOutput->numOfRows, (int64_t*) colDataGetData(pInput->columnData, 4));
return TSDB_CODE_SUCCESS;
}
int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
ASSERT(inputNum == 1);
colDataAppend(pOutput->columnData, pOutput->numOfRows, colDataGetData(pInput->columnData, 0), false);
return TSDB_CODE_SUCCESS;
}

View File

@ -1023,8 +1023,7 @@ static void vectorMathMultiplyHelper(SColumnInfoData* pLeftCol, SColumnInfoData*
colDataAppendNULL(pOutputCol, i);
continue; // TODO set null or ignore
}
*output = getVectorDoubleValueFnLeft(LEFT_COL, i)
* getVectorDoubleValueFnRight(RIGHT_COL, 0);
*output = getVectorDoubleValueFnLeft(LEFT_COL, i) * getVectorDoubleValueFnRight(RIGHT_COL, 0);
}
}
}
@ -1050,8 +1049,7 @@ void vectorMathMultiply(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam
colDataAppendNULL(pOutputCol, i);
continue; // TODO set null or ignore
}
*output = getVectorDoubleValueFnLeft(LEFT_COL, i)
* getVectorDoubleValueFnRight(RIGHT_COL, i);
*output = getVectorDoubleValueFnLeft(LEFT_COL, i) * getVectorDoubleValueFnRight(RIGHT_COL, i);
}
} else if (pLeft->numOfRows == 1) {
vectorMathMultiplyHelper(pRightCol, pLeftCol, pOutputCol, pRight->numOfRows, step, i);

View File

@ -22,7 +22,7 @@
#include "trpc.h"
SSchedulerMgmt schMgmt = {
.jobRef = -1,
.jobRef = -1,
};
FORCE_INLINE SSchJob *schAcquireJob(int64_t refId) { return (SSchJob *)taosAcquireRef(schMgmt.jobRef, refId); }
@ -72,7 +72,7 @@ int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *
int32_t schInitJob(SSchJob **pSchJob, SQueryPlan *pDag, void *transport, SArray *pNodeList, const char *sql,
int64_t startTs, bool syncSchedule) {
int32_t code = 0;
int64_t refId = -1;
int64_t refId = -1;
SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob));
if (NULL == pJob) {
qError("QID:%" PRIx64 " calloc %d failed", pDag->queryId, (int32_t)sizeof(SSchJob));
@ -124,7 +124,7 @@ int32_t schInitJob(SSchJob **pSchJob, SQueryPlan *pDag, void *transport, SArray
}
atomic_add_fetch_32(&schMgmt.jobNum, 1);
if (NULL == schAcquireJob(refId)) {
SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId);
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
@ -1085,19 +1085,22 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
case TDMT_VND_CREATE_TABLE_RSP: {
SVCreateTbBatchRsp batchRsp = {0};
if (msg) {
SCH_ERR_JRET(tDeserializeSVCreateTbBatchRsp(msg, msgSize, &batchRsp));
if (batchRsp.pArray) {
int32_t num = taosArrayGetSize(batchRsp.pArray);
for (int32_t i = 0; i < num; ++i) {
SVCreateTbRsp *rsp = taosArrayGet(batchRsp.pArray, i);
SCoder coder = {0};
tCoderInit(&coder, TD_LITTLE_ENDIAN, msg, msgSize, TD_DECODER);
code = tDecodeSVCreateTbBatchRsp(&coder, &batchRsp);
if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) {
for (int32_t i = 0; i < batchRsp.nRsps; ++i) {
SVCreateTbRsp *rsp = batchRsp.pRsps + i;
if (NEED_CLIENT_HANDLE_ERROR(rsp->code)) {
taosArrayDestroy(batchRsp.pArray);
tCoderClear(&coder);
SCH_ERR_JRET(rsp->code);
} else if (TSDB_CODE_SUCCESS != rsp->code) {
code = rsp->code;
}
}
taosArrayDestroy(batchRsp.pArray);
}
tCoderClear(&coder);
SCH_ERR_JRET(code);
}
SCH_ERR_JRET(rspCode);
@ -1110,13 +1113,14 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCoder coder = {0};
tCoderInit(&coder, TD_LITTLE_ENDIAN, msg, msgSize, TD_DECODER);
code = tDecodeSVDropTbBatchRsp(&coder, &batchRsp);
if (TSDB_CODE_SUCCESS == code && batchRsp.pArray) {
int32_t num = taosArrayGetSize(batchRsp.pArray);
for (int32_t i = 0; i < num; ++i) {
SVDropTbRsp *rsp = taosArrayGet(batchRsp.pArray, i);
if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) {
for (int32_t i = 0; i < batchRsp.nRsps; ++i) {
SVDropTbRsp *rsp = batchRsp.pRsps + i;
if (NEED_CLIENT_HANDLE_ERROR(rsp->code)) {
tCoderClear(&coder);
SCH_ERR_JRET(rsp->code);
} else if (TSDB_CODE_SUCCESS != rsp->code) {
code = rsp->code;
}
}
}
@ -2282,10 +2286,10 @@ int32_t schCancelJob(SSchJob *pJob) {
}
void schCloseJobRef(void) {
if (!atomic_load_8((int8_t*)&schMgmt.exit)) {
if (!atomic_load_8((int8_t *)&schMgmt.exit)) {
return;
}
SCH_LOCK(SCH_WRITE, &schMgmt.lock);
if (atomic_load_32(&schMgmt.jobNum) <= 0 && schMgmt.jobRef >= 0) {
taosCloseRef(schMgmt.jobRef);
@ -2791,8 +2795,8 @@ void schedulerFreeTaskList(SArray *taskList) {
}
void schedulerDestroy(void) {
atomic_store_8((int8_t*)&schMgmt.exit, 1);
atomic_store_8((int8_t *)&schMgmt.exit, 1);
if (schMgmt.jobRef >= 0) {
SSchJob *pJob = taosIterateRef(schMgmt.jobRef, 0);
int64_t refId = 0;

View File

@ -46,9 +46,21 @@ void* rpcOpen(const SRpcInit* pInit) {
pRpc->numOfThreads = pInit->numOfThreads > TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS : pInit->numOfThreads;
}
uint32_t ip = 0;
if (pInit->connType == TAOS_CONN_SERVER) {
ip = taosGetIpv4FromFqdn(pInit->localFqdn);
if (ip == 0xFFFFFFFF) {
tError("invalid fqdn: %s", pInit->localFqdn);
terrno = TSDB_CODE_RPC_FQDN_ERROR;
taosMemoryFree(pRpc);
return NULL;
}
}
pRpc->connType = pInit->connType;
pRpc->idleTime = pInit->idleTime;
pRpc->tcphandle = (*taosInitHandle[pRpc->connType])(0, pInit->localPort, pRpc->label, pRpc->numOfThreads, NULL, pRpc);
pRpc->tcphandle =
(*taosInitHandle[pRpc->connType])(ip, pInit->localPort, pRpc->label, pRpc->numOfThreads, NULL, pRpc);
if (pRpc->tcphandle == NULL) {
taosMemoryFree(pRpc);
return NULL;

View File

@ -817,7 +817,6 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
srv->pipe[i] = (uv_pipe_t*)taosMemoryCalloc(2, sizeof(uv_pipe_t));
uv_os_sock_t fds[2];
if (uv_socketpair(SOCK_STREAM, 0, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
goto End;
@ -841,6 +840,10 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
goto End;
}
}
if (false == taosValidIpAndPort(srv->ip, srv->port)) {
tError("failed to bind, reason: %s", terrstr());
goto End;
}
if (false == addHandleToAcceptloop(srv)) {
goto End;
}

View File

@ -638,6 +638,48 @@ int32_t taosKeepTcpAlive(TdSocketPtr pSocket) {
return 0;
}
bool taosValidIpAndPort(uint32_t ip, uint16_t port) {
struct sockaddr_in serverAdd;
SocketFd fd;
int32_t reuse;
// printf("open tcp server socket:0x%x:%hu", ip, port);
bzero((char *)&serverAdd, sizeof(serverAdd));
serverAdd.sin_family = AF_INET;
serverAdd.sin_addr.s_addr = ip;
serverAdd.sin_port = (uint16_t)htons(port);
if ((fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) <= 2) {
// printf("failed to open TCP socket: %d (%s)", errno, strerror(errno));
taosCloseSocketNoCheck1(fd);
return false;
}
TdSocketPtr pSocket = (TdSocketPtr)taosMemoryMalloc(sizeof(TdSocket));
if (pSocket == NULL) {
taosCloseSocketNoCheck1(fd);
return false;
}
pSocket->refId = 0;
pSocket->fd = fd;
/* set REUSEADDR option, so the portnumber can be re-used */
reuse = 1;
if (taosSetSockOpt(pSocket, SOL_SOCKET, SO_REUSEADDR, (void *)&reuse, sizeof(reuse)) < 0) {
// printf("setsockopt SO_REUSEADDR failed: %d (%s)", errno, strerror(errno));
taosCloseSocket(&pSocket);
return NULL;
}
/* bind socket to server address */
if (bind(pSocket->fd, (struct sockaddr *)&serverAdd, sizeof(serverAdd)) < 0) {
// printf("bind tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno));
taosCloseSocket(&pSocket);
return false;
}
taosCloseSocket(&pSocket);
return true;
}
TdSocketServerPtr taosOpenTcpServerSocket(uint32_t ip, uint16_t port) {
struct sockaddr_in serverAdd;
SocketFd fd;

View File

@ -443,6 +443,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SCH_STATUS_ERROR, "scheduler status erro
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_INTERNAL_ERROR, "scheduler internal error")
TAOS_DEFINE_ERROR(TSDB_CODE_QW_MSG_ERROR, "Invalid msg order")
// parser
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TABLE_NOT_EXIST, "Table does not exist")
//planner
TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "planner internal error")

5
tests/requirements.txt Normal file
View File

@ -0,0 +1,5 @@
taospy
numpy
fabric2
psutil
pandas

View File

@ -25,15 +25,15 @@ sql insert into tb3 values (now, 3, "Hash (cost=229.20..229.20 rows=101 width=2
sql create table tb4 using st1 tags(4);
sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..229.20 rows=101 width=244) (actual time=0.080..0.526 rows=100 loops=1)");
sql create table tb1 using st2 tags(1);
sql insert into tb1 values (now, 1, "Hash Join (cost=230.47..713.98 rows=101 width=488) (actual time=0.711..7.427 rows=100 loops=1)");
#sql create table tb1 using st2 tags(1);
#sql insert into tb1 values (now, 1, "Hash Join (cost=230.47..713.98 rows=101 width=488) (actual time=0.711..7.427 rows=100 loops=1)");
sql create table tb2 using st2 tags(2);
sql insert into tb2 values (now, 2, "Seq Scan on tenk2 t2 (cost=0.00..445.00 rows=10000 width=244) (actual time=0.007..2.583 rows=10000 loops=1)");
sql create table tb3 using st2 tags(3);
sql insert into tb3 values (now, 3, "Hash (cost=229.20..229.20 rows=101 width=244) (actual time=0.659..0.659 rows=100 loops=1)");
sql create table tb4 using st2 tags(4);
sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..229.20 rows=101 width=244) (actual time=0.080..0.526 rows=100 loops=1)");
#sql create table tb2 using st2 tags(2);
#sql insert into tb2 values (now, 2, "Seq Scan on tenk2 t2 (cost=0.00..445.00 rows=10000 width=244) (actual time=0.007..2.583 rows=10000 loops=1)");
#sql create table tb3 using st2 tags(3);
#sql insert into tb3 values (now, 3, "Hash (cost=229.20..229.20 rows=101 width=244) (actual time=0.659..0.659 rows=100 loops=1)");
#sql create table tb4 using st2 tags(4);
#sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..229.20 rows=101 width=244) (actual time=0.080..0.526 rows=100 loops=1)");
print ======== step2

View File

@ -65,6 +65,15 @@ sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.031', 5)
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.036', 6)
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.51', 7)
# vnode does not return the precision of the table
print ====> create database d1 precision 'us'
sql create database d1 precision 'us'
sql use d1
sql create table dev_001 (ts timestamp ,i timestamp ,j int)
sql insert into dev_001 values(1623046993681000,now,1)(1623046993681001,now+1s,2)(1623046993681002,now+2s,3)(1623046993681004,now+5s,4)
sql create table secondts(ts timestamp,t2 timestamp,i int)
sql insert into secondts values(1623046993681000,now,1)(1623046993681001,now+1s,2)(1623046993681002,now+2s,3)(1623046993681004,now+5s,4)
$loop_test = 0
loop_test_pos:
@ -288,15 +297,6 @@ sql_error sql select count(*) from dev_001 session(ts,0s)
sql_error select count(*) from dev_001 session(i,1y)
sql_error select count(*) from dev_001 session(ts,1d) where ts <'2020-05-20 0:0:0'
# vnode does not return the precision of the table
print ====> create database d1 precision 'us'
sql create database d1 precision 'us'
sql use d1
sql create table dev_001 (ts timestamp ,i timestamp ,j int)
sql insert into dev_001 values(1623046993681000,now,1)(1623046993681001,now+1s,2)(1623046993681002,now+2s,3)(1623046993681004,now+5s,4)
sql create table secondts(ts timestamp,t2 timestamp,i int)
sql insert into secondts values(1623046993681000,now,1)(1623046993681001,now+1s,2)(1623046993681002,now+2s,3)(1623046993681004,now+5s,4)
#print ====> select count(*) from dev_001 session(ts,1u)
#sql select _wstartts, count(*) from dev_001 session(ts,1u)
#print rows: $rows

View File

@ -43,6 +43,27 @@ if $data00 != 2.236067977 then
return -1
endi
sql create table t2 (ts timestamp, f1 int, f2 int);
sql insert into t2 values(now, 0, 0)(now+1s, 1, 1);
sql select udf1(f1, f2) from t2;
if $rows != 2 then
return -1
endi
if $data00 != 88 then
return -1
endi
if $data10 != 88 then
return -1
endi
sql select udf2(f1, f2) from t2;
if $rows != 1 then
return -1
endi
if $data00 != 1.414213562 then
return -1
endi
#sql drop function udf1;
#sql drop function udf2;
system sh/exec.sh -n dnode1 -s stop -x SIGKILL

View File

@ -91,9 +91,9 @@ print =============== create normal table
sql create database ndb
sql use ndb
sql create table nt0 (ts timestamp, i int)
sql create table if not exists nt0 (ts timestamp, i int)
# sql create table if not exists nt0 (ts timestamp, i int)
sql create table nt1 (ts timestamp, i int)
sql create table if not exists nt1 (ts timestamp, i int)
# sql create table if not exists nt1 (ts timestamp, i int)
sql create table if not exists nt3 (ts timestamp, i int)
sql show tables

View File

@ -5,6 +5,7 @@ from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
@ -18,9 +19,10 @@ BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
UN_CHAR_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, ]
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
TS_TYPE_COL = [TS_COL]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
@ -28,88 +30,171 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __length_condition(self):
length_condition = []
def __query_condition(self,tbname):
query_condition = []
for char_col in CHAR_COL:
length_condition.extend(
query_condition.extend(
(
char_col,
f"upper( {char_col} )",
f"{tbname}.{char_col}",
f"upper( {tbname}.{char_col} )",
)
)
length_condition.extend( f"cast( {un_char_col} as binary(16) ) " for un_char_col in UN_CHAR_COL)
length_condition.extend( f"cast( {char_col} + {char_col_2} as binary(32) ) " for char_col_2 in CHAR_COL )
length_condition.extend( f"cast( {char_col} + {un_char_col} as binary(32) ) " for un_char_col in UN_CHAR_COL )
query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL)
query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{char_col_2} as binary(32) ) " for char_col_2 in CHAR_COL )
query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{un_char_col} as binary(32) ) " for un_char_col in NUM_COL )
for num_col in NUM_COL:
query_condition.extend(
(
f"{tbname}.{num_col}",
f"sin( {tbname}.{num_col} )"
)
)
query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL )
length_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
query_condition.append(''' "test1234!@#$%^&*():'><?/.,][}{" ''')
return length_condition
return query_condition
def __where_condition(self, col):
# return f" where count({col}) > 0 "
return ""
def __join_condition(self, tb_list, filter=PRIMARY_COL):
# sourcery skip: flip-comparison
if 1 == len(tb_list):
join_filter = f"{tb_list[0]}.{filter} = {tb_list[0]}.{filter} "
elif 2 == len(tb_list):
join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} "
else:
join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} "
for i in range(1, len(tb_list)-1 ):
join_filter += f"and {tb_list[i]}.{filter} = {tb_list[i+1]}.{filter}"
def __group_condition(self, col, having = ""):
return join_filter
def __where_condition(self, col, tbname):
if col in NUM_COL:
return f" abs( {tbname}.{col} ) >= 0"
elif col in CHAR_COL:
return f" lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
elif col in BOOLEAN_COL:
return f" {tbname}.{col} in (false, true) "
elif col in TS_TYPE_COL or col in PRIMARY_COL:
return f" cast( {tbname}.{col} as binary(16) ) is not null "
else:
return ""
def __group_condition(self, tbname, col, having = ""):
return f" group by {col} having {having}" if having else f" group by {col} "
def __length_current_check(self, tbname):
length_condition = self.__length_condition()
for condition in length_condition:
where_condition = self.__where_condition(condition)
group_having = self.__group_condition(condition, having=f"{condition} is not null " )
group_no_having= self.__group_condition(condition )
def __join_check(self, tblist, checkrows, join_flag=True):
query_conditions = self.__query_condition(tblist[0])
join_condition = self.__join_condition(tb_list=tblist) if join_flag else " "
for condition in query_conditions:
where_condition = self.__where_condition(col=condition, tbname=tblist[0])
group_having = self.__group_condition(tbname=tblist[0], col=condition, having=f"{condition} is not null " )
group_no_having= self.__group_condition(tbname=tblist[0], col=condition )
groups = ["", group_having, group_no_having]
for group_condition in groups:
tdSql.query(f"select {condition} from {tbname} {where_condition} {group_condition} ")
datas = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
length_data = [ len(str(data)) if data else None for data in datas ]
tdSql.query(f"select length( {condition} ) from {tbname} {where_condition} {group_condition}")
for i in range(len(length_data)):
tdSql.checkData(i, 0, length_data[i] ) if length_data[i] else tdSql.checkData(i, 0, None)
if where_condition:
sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} and {where_condition} {group_condition} "
else:
sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} {group_condition} "
def __length_err_check(self,tbname):
sqls = []
if not join_flag :
tdSql.error(sql=sql)
if len(tblist) == 2:
if "ct1" in tblist or "t1" in tblist:
self.__join_current(sql, checkrows)
elif where_condition or "not null" in group_condition:
self.__join_current(sql, checkrows + 2 )
elif group_condition:
self.__join_current(sql, checkrows + 3 )
else:
self.__join_current(sql, checkrows + 5 )
if len(tblist) > 2 or len(tblist) < 1:
tdSql.error(sql=sql)
for un_char_col in UN_CHAR_COL:
sqls.extend(
(
f"select length( {un_char_col} ) from {tbname} ",
f"select length(ceil( {un_char_col} )) from {tbname} ",
f"select {un_char_col} from {tbname} group by length( {un_char_col} ) ",
)
)
# def __join_err_check(self,tbname):
# sqls = []
sqls.extend( f"select length( {un_char_col} + {un_char_col_2} ) from {tbname} " for un_char_col_2 in UN_CHAR_COL )
sqls.extend( f"select length( {un_char_col} + {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
# for un_char_col in NUM_COL:
# sqls.extend(
# (
# f"select length( {un_char_col} ) from {tbname} ",
# f"select length(ceil( {un_char_col} )) from {tbname} ",
# f"select {un_char_col} from {tbname} group by length( {un_char_col} ) ",
# )
# )
sqls.extend( f"select {char_col} from {tbname} group by length( {char_col} ) " for char_col in CHAR_COL)
sqls.extend( f"select length( {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select length( {char_col} + {ts_col} ) from {tbname} " for char_col in UN_CHAR_COL for ts_col in TS_TYPE_COL)
sqls.extend( f"select length( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
sqls.extend( f"select upper({char_col}, 11) from {tbname} " for char_col in CHAR_COL )
sqls.extend( f"select upper({char_col}) from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
sqls.extend(
(
f"select length() from {tbname} ",
f"select length(*) from {tbname} ",
f"select length(ccccccc) from {tbname} ",
f"select length(111) from {tbname} ",
f"select length(c8, 11) from {tbname} ",
)
)
# sqls.extend( f"select length( {un_char_col} + {un_char_col_2} ) from {tbname} " for un_char_col_2 in NUM_COL )
# sqls.extend( f"select length( {un_char_col} + {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
# sqls.extend( f"select {char_col} from {tbname} group by length( {char_col} ) " for char_col in CHAR_COL)
# sqls.extend( f"select length( {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
# sqls.extend( f"select length( {char_col} + {ts_col} ) from {tbname} " for char_col in NUM_COL for ts_col in TS_TYPE_COL)
# sqls.extend( f"select length( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
# sqls.extend( f"select upper({char_col}, 11) from {tbname} " for char_col in CHAR_COL )
# sqls.extend( f"select upper({char_col}) from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
# sqls.extend(
# (
# f"select length() from {tbname} ",
# f"select length(*) from {tbname} ",
# f"select length(ccccccc) from {tbname} ",
# f"select length(111) from {tbname} ",
# f"select length(c8, 11) from {tbname} ",
# )
# )
# return sqls
def __join_current(self, sql, checkrows):
tdSql.query(sql=sql)
tdSql.checkRows(checkrows)
return sqls
def __test_current(self):
# sourcery skip: extract-duplicate-method, inline-immediately-returned-variable
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1"]
for tb in tbname:
self.__length_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
tblist_1 = ["ct1", "ct2"]
self.__join_check(tblist_1, 1)
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_1} over==========")
tblist_2 = ["ct2", "ct4"]
self.__join_check(tblist_2, self.rows)
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_2} over==========")
tblist_3 = ["t1", "ct4"]
self.__join_check(tblist_3, 1)
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_3} over==========")
tblist_4 = ["t1", "ct1"]
self.__join_check(tblist_4, 1)
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_4} over==========")
def __test_error(self):
# sourcery skip: extract-duplicate-method, move-assign-in-block
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
err_list_1 = ["ct1","ct2", "ct4"]
err_list_2 = ["ct1","ct2", "t1"]
err_list_3 = ["ct1","ct4", "t1"]
err_list_4 = ["ct2","ct4", "t1"]
err_list_5 = ["ct1", "ct2","ct4", "t1"]
self.__join_check(err_list_1, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========")
self.__join_check(err_list_2, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========")
self.__join_check(err_list_3, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========")
self.__join_check(err_list_4, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========")
self.__join_check(err_list_5, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========")
self.__join_check(["ct2", "ct4"], -1, join_flag=False)
tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========")
tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{INT_COL}=ct4.{INT_COL}" )
tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{TS_COL}=ct4.{TS_COL}" )
tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{TS_COL}" )
tdSql.error( f"select ct2.c1, ct1.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and c1 is not null " )
tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and ct1.c1 is not null " )
tbname = ["ct1", "ct2", "ct4", "t1"]
for tb in tbname:
@ -150,33 +235,33 @@ class TDTestCase:
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
@ -184,15 +269,15 @@ class TDTestCase:
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
@ -200,7 +285,7 @@ class TDTestCase:
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
@ -210,12 +295,12 @@ class TDTestCase:
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
@ -228,7 +313,8 @@ class TDTestCase:
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.__insert_data(10)
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()

View File

@ -0,0 +1,551 @@
import taos
import sys
import datetime
import inspect
import math
from util.log import *
from util.sql import *
from util.cases import *
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def prepare_datas(self):
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
def check_result_auto_sqrt(self ,origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
origin_result = tdSql.getResult(origin_query)
auto_result =[]
for row in origin_result:
row_check = []
for elem in row:
if elem == None:
elem = None
elif elem < 0:
elem = None
else:
elem = math.sqrt(elem)
row_check.append(elem)
auto_result.append(row_check)
check_status = True
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
check_status = False
elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
check_status = False
else:
pass
if not check_status:
tdLog.notice("sqrt function value has not as expected , sql is \"%s\" "%pow_query )
sys.exit(1)
else:
tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query )
def test_errors(self):
error_sql_lists = [
"select sqrt from t1",
# "select sqrt(-+--+c1 ) from t1",
# "select +-sqrt(c1) from t1",
# "select ++-sqrt(c1) from t1",
# "select ++--sqrt(c1) from t1",
# "select - -sqrt(c1)*0 from t1",
# "select sqrt(tbname+1) from t1 ",
"select sqrt(123--123)==1 from t1",
"select sqrt(c1) as 'd1' from t1",
"select sqrt(c1 ,c2) from t1",
"select sqrt(c1 ,NULL ) from t1",
"select sqrt(,) from t1;",
"select sqrt(sqrt(c1) ab from t1)",
"select sqrt(c1 ) as int from t1",
"select sqrt from stb1",
# "select sqrt(-+--+c1) from stb1",
# "select +-sqrt(c1) from stb1",
# "select ++-sqrt(c1) from stb1",
# "select ++--sqrt(c1) from stb1",
# "select - -sqrt(c1)*0 from stb1",
# "select sqrt(tbname+1) from stb1 ",
"select sqrt(123--123)==1 from stb1",
"select sqrt(c1) as 'd1' from stb1",
"select sqrt(c1 ,c2 ) from stb1",
"select sqrt(c1 ,NULL) from stb1",
"select sqrt(,) from stb1;",
"select sqrt(sqrt(c1) ab from stb1)",
"select sqrt(c1) as int from stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
def support_types(self):
type_error_sql_lists = [
"select sqrt(ts) from t1" ,
"select sqrt(c7) from t1",
"select sqrt(c8) from t1",
"select sqrt(c9) from t1",
"select sqrt(ts) from ct1" ,
"select sqrt(c7) from ct1",
"select sqrt(c8) from ct1",
"select sqrt(c9) from ct1",
"select sqrt(ts) from ct3" ,
"select sqrt(c7) from ct3",
"select sqrt(c8) from ct3",
"select sqrt(c9) from ct3",
"select sqrt(ts) from ct4" ,
"select sqrt(c7) from ct4",
"select sqrt(c8) from ct4",
"select sqrt(c9) from ct4",
"select sqrt(ts) from stb1" ,
"select sqrt(c7) from stb1",
"select sqrt(c8) from stb1",
"select sqrt(c9) from stb1" ,
"select sqrt(ts) from stbbb1" ,
"select sqrt(c7) from stbbb1",
"select sqrt(ts) from tbname",
"select sqrt(c9) from tbname"
]
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
type_sql_lists = [
"select sqrt(c1) from t1",
"select sqrt(c2) from t1",
"select sqrt(c3) from t1",
"select sqrt(c4) from t1",
"select sqrt(c5) from t1",
"select sqrt(c6) from t1",
"select sqrt(c1) from ct1",
"select sqrt(c2) from ct1",
"select sqrt(c3) from ct1",
"select sqrt(c4) from ct1",
"select sqrt(c5) from ct1",
"select sqrt(c6) from ct1",
"select sqrt(c1) from ct3",
"select sqrt(c2) from ct3",
"select sqrt(c3) from ct3",
"select sqrt(c4) from ct3",
"select sqrt(c5) from ct3",
"select sqrt(c6) from ct3",
"select sqrt(c1) from stb1",
"select sqrt(c2) from stb1",
"select sqrt(c3) from stb1",
"select sqrt(c4) from stb1",
"select sqrt(c5) from stb1",
"select sqrt(c6) from stb1",
"select sqrt(c6) as alisb from stb1",
"select sqrt(c6) alisb from stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
def basic_sqrt_function(self):
# basic query
tdSql.query("select c1 from ct3")
tdSql.checkRows(0)
tdSql.query("select c1 from t1")
tdSql.checkRows(12)
tdSql.query("select c1 from stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
tdSql.query("select sqrt(c1) from ct3")
tdSql.checkRows(0)
tdSql.query("select sqrt(c2) from ct3")
tdSql.checkRows(0)
tdSql.query("select sqrt(c3) from ct3")
tdSql.checkRows(0)
tdSql.query("select sqrt(c4) from ct3")
tdSql.checkRows(0)
tdSql.query("select sqrt(c5) from ct3")
tdSql.checkRows(0)
tdSql.query("select sqrt(c6) from ct3")
tdSql.checkRows(0)
# # used for regular table
tdSql.query("select sqrt(c1) from t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.000000000)
tdSql.checkData(3 , 0, 1.732050808)
tdSql.checkData(5 , 0, None)
tdSql.query("select c1, c2, c3 , c4, c5 from t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from t1")
# used for sub table
tdSql.query("select c2 ,sqrt(c2) from ct1")
tdSql.checkData(0, 1, 298.140906284)
tdSql.checkData(1 , 1, 278.885281074)
tdSql.checkData(3 , 1, 235.701081881)
tdSql.checkData(4 , 1, 0.000000000)
tdSql.query("select c1, c5 ,sqrt(c5) from ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 2.979932904)
tdSql.checkData(2 , 2, 2.787471970)
tdSql.checkData(3 , 2, 2.580697551)
tdSql.checkData(5 , 2, None)
self.check_result_auto_sqrt( "select c1, c2, c3 , c4, c5 from ct1", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from ct1")
# nest query for sqrt functions
tdSql.query("select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 9.380831520)
tdSql.checkData(0 , 2 , 3.062814314)
tdSql.checkData(0 , 3 , 1.750089802)
tdSql.checkData(1 , 0 , 77)
tdSql.checkData(1 , 1 , 8.774964387)
tdSql.checkData(1 , 2 , 2.962256638)
tdSql.checkData(1 , 3 , 1.721120750)
tdSql.checkData(11 , 0 , -99)
tdSql.checkData(11 , 1 , None)
tdSql.checkData(11 , 2 , None)
tdSql.checkData(11 , 3 , None)
# used for stable table
tdSql.query("select sqrt(c1) from stb1")
tdSql.checkRows(25)
# used for not exists table
tdSql.error("select sqrt(c1) from stbbb1")
tdSql.error("select sqrt(c1) from tbname")
tdSql.error("select sqrt(c1) from ct5")
# mix with common col
tdSql.query("select c1, sqrt(c1) from ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,2.828427125)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0.000000000)
tdSql.query("select c2, sqrt(c2) from ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,55555)
tdSql.checkData(4 , 1 ,235.701081881)
tdSql.checkData(5 , 0 ,None)
tdSql.checkData(5 , 1 ,None)
# mix with common functions
tdSql.query("select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,2.449489743)
tdSql.checkData(3 , 2 ,2.449489743)
tdSql.checkData(3 , 3 ,1.565084580)
tdSql.query("select c1, sqrt(c1),c5, floor(c5) from stb1 ")
# # mix with agg functions , not support
tdSql.error("select c1, sqrt(c1),c5, count(c5) from stb1 ")
tdSql.error("select c1, sqrt(c1),c5, count(c5) from ct1 ")
tdSql.error("select sqrt(c1), count(c5) from stb1 ")
tdSql.error("select sqrt(c1), count(c5) from ct1 ")
tdSql.error("select c1, count(c5) from ct1 ")
tdSql.error("select c1, count(c5) from stb1 ")
# agg functions mix with agg functions
tdSql.query("select max(c5), count(c5) from stb1")
tdSql.query("select max(c5), count(c5) from ct1")
# bug fix for count
tdSql.query("select count(c1) from ct4 ")
tdSql.checkData(0,0,9)
tdSql.query("select count(*) from ct4 ")
tdSql.checkData(0,0,12)
tdSql.query("select count(c1) from stb1 ")
tdSql.checkData(0,0,22)
tdSql.query("select count(*) from stb1 ")
tdSql.checkData(0,0,25)
# # bug fix for compute
tdSql.query("select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
tdSql.checkData(1, 0, 8)
tdSql.checkData(1, 1, 2.828427125)
tdSql.checkData(1, 2, 2.000000000)
tdSql.query(" select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
tdSql.checkData(1, 0, 8)
tdSql.checkData(1, 1, 2.828427125)
tdSql.checkData(1, 2, 2.710693865)
tdSql.query("select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from ct1")
def test_big_number(self):
tdSql.query("select c1, sqrt(100000000) from ct1") # bigint to double data overflow
tdSql.checkData(4, 1, 10000.000000000)
tdSql.query("select c1, sqrt(10000000000000) from ct1") # bigint to double data overflow
tdSql.checkData(4, 1, 3162277.660168380)
tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000) from ct1") # bigint to double data overflow
tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, 3162277660171.025390625)
tdSql.query("select c1, sqrt(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
tdSql.query("select c1, sqrt(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, 100000000000000000.000000000)
tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, 100000000000000000000.000000000)
tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
def pow_base_test(self):
# base is an regular number ,int or double
tdSql.query("select c1, sqrt(c1) from ct1")
tdSql.checkData(0, 1,2.828427125)
tdSql.checkRows(13)
# # bug for compute in functions
# tdSql.query("select c1, abs(1/0) from ct1")
# tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1)
tdSql.query("select c1, sqrt(1) from ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# two cols start sqrt(x,y)
tdSql.query("select c1,c2, sqrt(c2) from ct1")
tdSql.checkData(0, 2, 298.140906284)
tdSql.checkData(1, 2, 278.885281074)
tdSql.checkData(4, 2, 0.000000000)
def abs_func_filter(self):
tdSql.execute("use db")
tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
tdSql.checkData(0,2,8.000000000)
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
tdSql.checkData(0,2,5.000000000)
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
tdSql.checkData(0,2,5.000000000)
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=sqrt(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,11111)
tdSql.checkData(0,2,1.000000000)
tdSql.checkData(0,3,1.000000000)
tdSql.checkData(0,4,0.900000000)
tdSql.checkData(0,5,1.000000000)
def pow_Arithmetic(self):
pass
def check_boundary_values(self):
tdSql.execute("drop database if exists bound_test")
tdSql.execute("create database if not exists bound_test")
time.sleep(3)
tdSql.execute("use bound_test")
tdSql.execute(
"create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
tdSql.execute(
f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from sub1_bound")
self.check_result_auto_sqrt( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from sub1_bound")
self.check_result_auto_sqrt("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sqrt(abs(c1)) from sub1_bound" )
# check basic elem for table per row
tdSql.query("select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from sub1_bound ")
tdSql.checkData(0,0,math.sqrt(2147483647))
tdSql.checkData(0,1,math.sqrt(9223372036854775807))
tdSql.checkData(0,2,math.sqrt(32767))
tdSql.checkData(0,3,math.sqrt(127))
tdSql.checkData(0,4,math.sqrt(339999995214436424907732413799364296704.00000))
tdSql.checkData(1,0,math.sqrt(2147483647))
tdSql.checkData(1,1,math.sqrt(9223372036854775807))
tdSql.checkData(1,2,math.sqrt(32767))
tdSql.checkData(1,3,math.sqrt(127))
tdSql.checkData(1,4,math.sqrt(339999995214436424907732413799364296704.00000))
tdSql.checkData(3,0,math.sqrt(2147483646))
tdSql.checkData(3,1,math.sqrt(9223372036854775806))
tdSql.checkData(3,2,math.sqrt(32766))
tdSql.checkData(3,3,math.sqrt(126))
tdSql.checkData(3,4,math.sqrt(339999995214436424907732413799364296704.00000))
# check + - * / in functions
tdSql.query("select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from sub1_bound ")
tdSql.checkData(0,0,math.sqrt(2147483648.000000000))
tdSql.checkData(0,1,math.sqrt(9223372036854775807))
tdSql.checkData(0,2,math.sqrt(32767.000000000))
tdSql.checkData(0,3,math.sqrt(63.500000000))
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_datas()
tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
tdLog.printNoPrefix("==========step4: sqrt basic query ============")
self.basic_sqrt_function()
tdLog.printNoPrefix("==========step5: big number sqrt query ============")
self.test_big_number()
tdLog.printNoPrefix("==========step6: base number for sqrt query ============")
self.pow_base_test()
tdLog.printNoPrefix("==========step7: sqrt boundary query ============")
self.check_boundary_values()
tdLog.printNoPrefix("==========step8: sqrt filter query ============")
self.abs_func_filter()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -24,3 +24,4 @@ python3 ./test.py -f 2-query/floor.py
python3 ./test.py -f 2-query/round.py
python3 ./test.py -f 2-query/log.py
python3 ./test.py -f 2-query/pow.py
python3 ./test.py -f 2-query/sqrt.py

View File

@ -21,7 +21,7 @@ static void shellWorkAsClient() {
SRpcInit rpcInit = {0};
SEpSet epSet = {.inUse = 0, .numOfEps = 1};
SRpcMsg rpcRsp = {0};
void *clientRpc = NULL;
void * clientRpc = NULL;
char pass[TSDB_PASSWORD_LEN + 1] = {0};
taosEncryptPass_c((uint8_t *)("_pwd"), strlen("_pwd"), pass);
@ -116,6 +116,7 @@ static void shellWorkAsServer() {
}
SRpcInit rpcInit = {0};
memcpy(rpcInit.localFqdn, tsLocalFqdn, strlen(tsLocalFqdn));
rpcInit.localPort = pArgs->port;
rpcInit.label = "CHK";
rpcInit.numOfThreads = tsNumOfRpcThreads;
@ -126,7 +127,7 @@ static void shellWorkAsServer() {
void *serverRpc = rpcOpen(&rpcInit);
if (serverRpc == NULL) {
printf("failed to init net test server since %s", terrstr());
printf("failed to init net test server since %s\n", terrstr());
} else {
printf("network test server is initialized, port:%u\n", pArgs->port);
taosSetSignal(SIGTERM, shellNettestHandler);

@ -1 +1 @@
Subproject commit 2f3dfddd4d9a869e706ba3cf98fb6d769404cd7c
Subproject commit 59e0ebaf4905e4cb6d95a01c58b3fa507abc5a20