Merge branch 'develop' into feature/query

This commit is contained in:
Haojun Liao 2021-06-25 03:30:57 -07:00
commit 4f5979bbbc
38 changed files with 1805 additions and 226 deletions

View File

@ -23,7 +23,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
TDengine是一个高效的存储、查询、分析时序大数据的平台专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它但建议您在使用前仔细阅读一遍下面的文档特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture) 与 [数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。
# 生成
# 构建
TDengine目前2.0版服务器仅能在Linux系统上安装和运行后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。
@ -107,7 +107,7 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话
git submodule update --init --recursive
```
## 生成 TDengine
## 构建 TDengine
### Linux 系统
@ -116,6 +116,12 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
您可以选择使用 Jemalloc 作为内存分配器,替代默认的 glibc
```bash
apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
在X86-64、X86、arm64、arm32 和 mips64 平台上TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
aarch64

View File

@ -110,6 +110,12 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
You can use Jemalloc as memory allocator instead of glibc:
```
apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:

View File

@ -39,7 +39,7 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}")
# Set c++ compiler options
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11")
SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function")
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}")

View File

@ -1496,7 +1496,9 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg = (char *)pSchema;
pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen);
memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
if (pAlterInfo->tagData.dataLen > 0) {
memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
}
pMsg += pAlterInfo->tagData.dataLen;
msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg);

View File

@ -512,6 +512,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
pSub->pSql = pSql;
pSql->pSubscription = pSub;
pSub->lastSyncTime = 0;
// no table list now, force to update it
tscDebug("begin table synchronization");

View File

@ -98,7 +98,7 @@ TEST(testCase, parse_time) {
taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999);
// int64_t k = timezone;
// int64_t k = timezone;
char t42[] = "1997-1-1T0:0:0.999999999Z";
taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND);

View File

@ -289,6 +289,11 @@ static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
}
}
static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int row) {
ASSERT(row < pCols->numOfRows);
return dataColsKeyAt(pCols, row);
}
static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, 0);

View File

@ -452,7 +452,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
SDataCols *pTarget = NULL;
if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyFirst(source))) { // No overlap
if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyAtRow(source, *pOffset))) { // No overlap
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
for (int i = 0; i < rowsToMerge; i++) {
for (int j = 0; j < source->numOfCols; j++) {

View File

@ -303,6 +303,8 @@ static int32_t dnodeInitStorage() {
dnodeCheckDataDirOpenned(tsDnodeDir);
taosGetDisk();
taosPrintDiskInfo();
dInfo("dnode storage is initialized at %s", tsDnodeDir);
return 0;
}

View File

@ -79,10 +79,9 @@ enum TEST_MODE {
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
#define COND_BUF_LEN BUFFER_SIZE - 30
#define COND_BUF_LEN (BUFFER_SIZE - 30)
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 64
#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
@ -90,7 +89,7 @@ enum TEST_MODE {
#define OPT_ABORT 1 /* abort */
#define STRING_LEN 60000
#define MAX_PREPARED_RAND 1000000
#define MAX_FILE_NAME_LEN 256
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
#define MAX_SAMPLES_ONCE_FROM_FILE 10000
#define MAX_NUM_DATATYPE 10
@ -195,13 +194,6 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
};
typedef struct {
char field[TSDB_COL_NAME_LEN + 1];
char type[16];
int length;
char note[128];
} SColDes;
/* Used by main to communicate with parse_opt. */
static char *g_dupstr = NULL;
@ -247,16 +239,16 @@ typedef struct SArguments_S {
} SArguments;
typedef struct SColumn_S {
char field[TSDB_COL_NAME_LEN + 1];
char dataType[MAX_TB_NAME_SIZE];
char field[TSDB_COL_NAME_LEN];
char dataType[16];
uint32_t dataLen;
char note[128];
} StrColumn;
typedef struct SSuperTable_S {
char sTblName[MAX_TB_NAME_SIZE+1];
char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample
char childTblPrefix[MAX_TB_NAME_SIZE];
char sTblName[TSDB_TABLE_NAME_LEN];
char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample
char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest
uint16_t childTblExists;
int64_t childTblCount;
@ -277,8 +269,8 @@ typedef struct SSuperTable_S {
int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
char sampleFile[MAX_FILE_NAME_LEN+1];
char tagsFile[MAX_FILE_NAME_LEN+1];
char sampleFile[MAX_FILE_NAME_LEN];
char tagsFile[MAX_FILE_NAME_LEN];
uint32_t columnCount;
StrColumn columns[MAX_COLUMN_COUNT];
@ -305,7 +297,7 @@ typedef struct SSuperTable_S {
} SSuperTable;
typedef struct {
char name[TSDB_DB_NAME_LEN + 1];
char name[TSDB_DB_NAME_LEN];
char create_time[32];
int64_t ntables;
int32_t vgroups;
@ -341,11 +333,11 @@ typedef struct SDbCfg_S {
int cache;
int blocks;
int quorum;
char precision[MAX_TB_NAME_SIZE];
char precision[8];
} SDbCfg;
typedef struct SDataBase_S {
char dbName[MAX_DB_NAME_SIZE];
char dbName[TSDB_DB_NAME_LEN];
bool drop; // 0: use exists, 1: if exists, drop then new create
SDbCfg dbCfg;
uint64_t superTblCount;
@ -353,14 +345,14 @@ typedef struct SDataBase_S {
} SDataBase;
typedef struct SDbs_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char cfgDir[MAX_FILE_NAME_LEN];
char host[MAX_HOSTNAME_SIZE];
struct sockaddr_in serv_addr;
uint16_t port;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
char resultFile[MAX_FILE_NAME_LEN+1];
char resultFile[MAX_FILE_NAME_LEN];
bool use_metric;
bool insert_only;
bool do_aggreFunc;
@ -387,7 +379,7 @@ typedef struct SpecifiedQueryInfo_S {
bool subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
int endAfterConsume[MAX_QUERY_SQL_COUNT];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
@ -398,7 +390,7 @@ typedef struct SpecifiedQueryInfo_S {
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
char sTblName[MAX_TB_NAME_SIZE+1];
char sTblName[TSDB_TABLE_NAME_LEN];
uint64_t queryInterval; // 0: unlimit > 0 loop/s
uint32_t threadCnt;
uint32_t asyncMode; // 0: sync, 1: async
@ -407,10 +399,10 @@ typedef struct SuperQueryInfo_S {
int subscribeKeepProgress;
uint64_t queryTimes;
int64_t childTblCount;
char childTblPrefix[MAX_TB_NAME_SIZE];
char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq
int sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN];
int resubAfterConsume;
int endAfterConsume;
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
@ -420,13 +412,13 @@ typedef struct SuperQueryInfo_S {
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char cfgDir[MAX_FILE_NAME_LEN];
char host[MAX_HOSTNAME_SIZE];
uint16_t port;
struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
char dbName[MAX_DB_NAME_SIZE+1];
char dbName[TSDB_DB_NAME_LEN];
char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest
SpecifiedQueryInfo specifiedQueryInfo;
@ -438,11 +430,11 @@ typedef struct SThreadInfo_S {
TAOS * taos;
TAOS_STMT *stmt;
int threadID;
char db_name[MAX_DB_NAME_SIZE+1];
char db_name[TSDB_DB_NAME_LEN];
uint32_t time_precision;
char filePath[4096];
FILE *fp;
char tb_prefix[MAX_TB_NAME_SIZE];
char tb_prefix[TSDB_TABLE_NAME_LEN];
uint64_t start_table_from;
uint64_t end_table_to;
int64_t ntables;
@ -608,7 +600,7 @@ SArguments g_args = {
1, // query_times
0, // interlace_rows;
30000, // num_of_RPR
(1024*1024), // max_sql_len
(1024*1024), // max_sql_len
10000, // num_of_tables
10000, // num_of_DPT
0, // abort
@ -990,9 +982,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->len_of_binary = atoi(argv[++i]);
} else if (strcmp(argv[i], "-m") == 0) {
if ((argc == i+1) ||
(!isStringNumber(argv[i+1]))) {
(isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-m need a number following!\n");
errorPrint("%s", "\n\t-m need a letter-initial string following!\n");
exit(EXIT_FAILURE);
}
arguments->tb_prefix = argv[++i];
@ -2501,6 +2493,13 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* pTblName = childTblName;
while((row = taos_fetch_row(res)) != NULL) {
int32_t* len = taos_fetch_lengths(res);
if (0 == strlen((char *)row[0])) {
errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n",
__func__, __LINE__, count);
exit(-1);
}
tstrncpy(pTblName, (char *)row[0], len[0]+1);
//printf("==== sub table name: %s\n", pTblName);
count++;
@ -3035,7 +3034,7 @@ static int startMultiThreadCreateChildTable(
for (int64_t i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
pThreadInfo->superTblInfo = superTblInfo;
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
pThreadInfo->taos = taos_connect(
@ -3326,7 +3325,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
goto PARSE_OVER;
}
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
cJSON* dataLen = cJSON_GetObjectItem(column, "len");
if (dataLen && dataLen->type == cJSON_Number) {
@ -3341,7 +3340,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for (int n = 0; n < count; ++n) {
tstrncpy(superTbls->columns[index].dataType,
columnCase.dataType, MAX_TB_NAME_SIZE);
columnCase.dataType, strlen(columnCase.dataType) + 1);
superTbls->columns[index].dataLen = columnCase.dataLen;
index++;
}
@ -3397,7 +3396,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
__func__, __LINE__);
goto PARSE_OVER;
}
tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1);
cJSON* dataLen = cJSON_GetObjectItem(tag, "len");
if (dataLen && dataLen->type == cJSON_Number) {
@ -3412,7 +3411,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for (int n = 0; n < count; ++n) {
tstrncpy(superTbls->tags[index].dataType, columnCase.dataType,
MAX_TB_NAME_SIZE);
strlen(columnCase.dataType) + 1);
superTbls->tags[index].dataLen = columnCase.dataLen;
index++;
}
@ -3635,7 +3634,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf("ERROR: failed to read json, db name not found\n");
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN);
cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop");
if (drop && drop->type == cJSON_String && drop->valuestring != NULL) {
@ -3656,10 +3655,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (precision && precision->type == cJSON_String
&& precision->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring,
MAX_DB_NAME_SIZE);
8);
} else if (!precision) {
//tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE);
memset(g_Dbs.db[i].dbCfg.precision, 0, 8);
} else {
printf("ERROR: failed to read json, precision not found\n");
goto PARSE_OVER;
@ -3836,7 +3834,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring,
MAX_TB_NAME_SIZE);
TSDB_TABLE_NAME_LEN);
cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
@ -3844,7 +3842,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring,
MAX_DB_NAME_SIZE);
TSDB_TABLE_NAME_LEN - 20);
cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table");
if (autoCreateTbl
@ -3912,9 +3910,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (dataSource && dataSource->type == cJSON_String
&& dataSource->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource,
dataSource->valuestring, MAX_DB_NAME_SIZE);
dataSource->valuestring, TSDB_DB_NAME_LEN);
} else if (!dataSource) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", TSDB_DB_NAME_LEN);
} else {
errorPrint("%s() LN%d, failed to read json, data_source not found\n",
__func__, __LINE__);
@ -3972,10 +3970,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp");
if (ts && ts->type == cJSON_String && ts->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
ts->valuestring, MAX_DB_NAME_SIZE);
ts->valuestring, TSDB_DB_NAME_LEN);
} else if (!ts) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
"now", MAX_DB_NAME_SIZE);
"now", TSDB_DB_NAME_LEN);
} else {
printf("ERROR: failed to read json, start_timestamp not found\n");
goto PARSE_OVER;
@ -3995,9 +3993,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (sampleFormat && sampleFormat->type
== cJSON_String && sampleFormat->valuestring != NULL) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat,
sampleFormat->valuestring, MAX_DB_NAME_SIZE);
sampleFormat->valuestring, TSDB_DB_NAME_LEN);
} else if (!sampleFormat) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", TSDB_DB_NAME_LEN);
} else {
printf("ERROR: failed to read json, sample_format not found\n");
goto PARSE_OVER;
@ -4242,7 +4240,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
tstrncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE);
tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN);
} else if (!dbs) {
printf("ERROR: failed to read json, databases not found\n");
goto PARSE_OVER;
@ -4492,7 +4490,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (stblname && stblname->type == cJSON_String
&& stblname->valuestring != NULL) {
tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring,
MAX_TB_NAME_SIZE);
TSDB_TABLE_NAME_LEN);
} else {
errorPrint("%s() LN%d, failed to read json, super table name input error\n",
__func__, __LINE__);
@ -5103,7 +5101,7 @@ static int32_t generateStbDataTail(
} else {
retLen = getRowDataFromSample(
data,
remainderBufLen,
remainderBufLen < MAX_DATA_SIZE ? remainderBufLen : MAX_DATA_SIZE,
startTime + superTblInfo->timeStampStep * k,
superTblInfo,
pSamplePos);
@ -6302,16 +6300,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
// read sample data from file first
if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource,
"sample", strlen("sample")))) {
if (0 != prepareSampleDataForSTable(superTblInfo)) {
errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
__func__, __LINE__);
exit(-1);
}
}
TAOS* taos0 = taos_connect(
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
@ -6417,7 +6405,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
pThreadInfo->time_precision = timePrec;
pThreadInfo->superTblInfo = superTblInfo;
@ -6861,7 +6849,7 @@ static void *specifiedTableQuery(void *sarg) {
}
}
char sqlStr[MAX_DB_NAME_SIZE + 5];
char sqlStr[TSDB_DB_NAME_LEN + 5];
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
@ -7337,12 +7325,6 @@ static void *superSubscribe(void *sarg) {
performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st));
if (res) {
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
fetchResult(res, pThreadInfo);
}
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
@ -7449,10 +7431,10 @@ static void *specifiedSubscribe(void *sarg) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
fetchResult(
g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID],
pThreadInfo);
}
fetchResult(
g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID],
pThreadInfo);
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++;
if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1)
@ -7689,9 +7671,9 @@ static void setParaFromArg(){
g_Dbs.dbCount = 1;
g_Dbs.db[0].drop = true;
tstrncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN);
g_Dbs.db[0].dbCfg.replica = g_args.replica;
tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", 8);
tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN);
@ -7713,7 +7695,7 @@ static void setParaFromArg(){
if (g_args.use_metric) {
g_Dbs.db[0].superTblCount = 1;
tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN);
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = g_args.num_of_threads;
@ -7724,7 +7706,7 @@ static void setParaFromArg(){
g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange;
g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio;
tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix,
g_args.tb_prefix, MAX_TB_NAME_SIZE);
g_args.tb_prefix, TSDB_TABLE_NAME_LEN - 20);
tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].iface = g_args.iface;
tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp,
@ -7741,7 +7723,7 @@ static void setParaFromArg(){
}
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
data_type[i], MAX_TB_NAME_SIZE);
data_type[i], strlen(data_type[i]) + 1);
g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].columnCount++;
}
@ -7752,18 +7734,18 @@ static void setParaFromArg(){
for (int i = g_Dbs.db[0].superTbls[0].columnCount;
i < g_args.num_of_CPR; i++) {
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
"INT", MAX_TB_NAME_SIZE);
"INT", strlen("INT") + 1);
g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
g_Dbs.db[0].superTbls[0].columnCount++;
}
}
tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType,
"INT", MAX_TB_NAME_SIZE);
"INT", strlen("INT") + 1);
g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType,
"BINARY", MAX_TB_NAME_SIZE);
"BINARY", strlen("BINARY") + 1);
g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].tagCount = 2;
} else {
@ -7899,11 +7881,11 @@ static void queryResult() {
pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
tstrncpy(pThreadInfo->tb_prefix,
g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].childTblPrefix, TSDB_TABLE_NAME_LEN - 20);
} else {
pThreadInfo->ntables = g_args.num_of_tables;
pThreadInfo->end_table_to = g_args.num_of_tables -1;
tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN);
}
pThreadInfo->taos = taos_connect(

View File

@ -29,6 +29,9 @@
#define COMMAND_SIZE 65536
//#define DEFAULT_DUMP_FILE "taosdump.sql"
// for strncpy buffer overflow
#define min(a, b) (((a) < (b)) ? (a) : (b))
int converStringToReadable(char *str, int size, char *buf, int bufsize);
int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
void taosDumpCharset(FILE *fp);
@ -1119,12 +1122,11 @@ int taosGetTableDes(
TAOS_FIELD *fields = taos_fetch_fields(res);
tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
while ((row = taos_fetch_row(res)) != NULL) {
strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes);
strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes);
min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes));
tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes);
@ -1575,7 +1577,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX],
fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes));
taosWrite(fd, &tableRecord, sizeof(STableRecord));

View File

@ -1068,7 +1068,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
pStable->info.tableId = strdup(pCreate->tableName);
pStable->info.type = TSDB_SUPER_TABLE;
pStable->createdTime = taosGetTimestampMs();
pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
int64_t x = (us&0x000000FFFFFFFFFF);
x = x<<24;
pStable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
pStable->sversion = 0;
pStable->tversion = 0;
pStable->numOfColumns = numOfColumns;

View File

@ -27,18 +27,20 @@ typedef struct {
} SysDiskSize;
int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize);
void taosGetSystemInfo();
bool taosGetProcIO(float *readKB, float *writeKB);
bool taosGetBandSpeed(float *bandSpeedKb);
void taosGetDisk();
bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage);
bool taosGetProcMemory(float *memoryUsedMB);
bool taosGetSysMemory(float *memoryUsedMB);
void taosPrintOsInfo();
int taosSystem(const char *cmd);
void taosKillSystem();
bool taosGetSystemUid(char *uid);
char * taosGetCmdlineByPID(int pid);
void taosGetSystemInfo();
bool taosGetProcIO(float *readKB, float *writeKB);
bool taosGetBandSpeed(float *bandSpeedKb);
void taosGetDisk();
bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ;
bool taosGetProcMemory(float *memoryUsedMB) ;
bool taosGetSysMemory(float *memoryUsedMB);
void taosPrintOsInfo();
void taosPrintDiskInfo();
int taosSystem(const char * cmd) ;
void taosKillSystem();
bool taosGetSystemUid(char *uid);
char *taosGetCmdlineByPID(int pid);
void taosSetCoreDump();

View File

@ -136,9 +136,6 @@ void taosPrintOsInfo() {
// uInfo(" os openMax: %" PRId64, tsOpenMax);
// uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
@ -154,6 +151,14 @@ void taosPrintOsInfo() {
uInfo("==================================");
}
void taosPrintDiskInfo() {
uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo("==================================");
}
void taosKillSystem() {
uError("function taosKillSystem, exit!");
exit(0);

View File

@ -506,9 +506,6 @@ void taosPrintOsInfo() {
uInfo(" os openMax: %" PRId64, tsOpenMax);
uInfo(" os streamMax: %" PRId64, tsStreamMax);
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
struct utsname buf;
@ -523,6 +520,14 @@ void taosPrintOsInfo() {
uInfo(" os machine: %s", buf.machine);
}
void taosPrintDiskInfo() {
uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo("==================================");
}
void taosKillSystem() {
// SIGINT
uInfo("taosd will shut down soon");

View File

@ -205,10 +205,15 @@ void taosGetSystemInfo() {
void taosPrintOsInfo() {
uInfo(" os numOfCores: %d", tsNumOfCores);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
uInfo("==================================");
}
void taosPrintDiskInfo() {
uInfo("==================================");
uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB);
uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB);
uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB);
uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB);
uInfo("==================================");
}

View File

@ -7330,11 +7330,16 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo*
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
STSBuf *pTsBuf = NULL;
if (pTsBufInfo->tsLen > 0) { // open new file to save the result
char *tsBlock = start + pTsBufInfo->tsOffset;
pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder,
pQueryAttr->vgId);
if (pTsBufInfo->tsLen > 0) { // open new file to save the result
char* tsBlock = start + pTsBufInfo->tsOffset;
pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder,
pQueryAttr->vgId);
if (pTsBuf == NULL) {
code = TSDB_CODE_QRY_NO_DISKSPACE;
goto _error;
}
tsBufResetPos(pTsBuf);
bool ret = tsBufNextPos(pTsBuf);
UNUSED(ret);

View File

@ -2,6 +2,7 @@
#include "taoserror.h"
#include "tscompression.h"
#include "tutil.h"
#include "queryLog.h"
static int32_t getDataStartOffset();
static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo);
@ -633,10 +634,15 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) {
int32_t r = fseek(pTSBuf->f, 0, SEEK_SET);
if (r != 0) {
qError("fseek failed, errno:%d", errno);
return -1;
}
size_t ws = fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
if (ws != 1) {
qError("ts update header fwrite failed, size:%d, expected size:%d", (int32_t)ws, (int32_t)sizeof(STSBufFileHeader));
return -1;
}
fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f);
return 0;
}
@ -853,9 +859,17 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, pBlockInfo);
int32_t ret = fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET);
UNUSED(ret);
if (ret == -1) {
qError("fseek failed, errno:%d", errno);
tsBufDestroy(pTSBuf);
return NULL;
}
size_t sz = fwrite((void*)pData, 1, len, pTSBuf->f);
UNUSED(sz);
if (sz != len) {
qError("ts data fwrite failed, write size:%d, expected size:%d", (int32_t)sz, len);
tsBufDestroy(pTSBuf);
return NULL;
}
pTSBuf->fileSize += len;
pTSBuf->tsOrder = order;
@ -863,9 +877,16 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
STSBufFileHeader header = {
.magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder};
STSBufUpdateHeader(pTSBuf, &header);
if (STSBufUpdateHeader(pTSBuf, &header) < 0) {
tsBufDestroy(pTSBuf);
return NULL;
}
taosFsync(fileno(pTSBuf->f));
if (taosFsync(fileno(pTSBuf->f)) == -1) {
qError("fsync failed, errno:%d", errno);
tsBufDestroy(pTSBuf);
return NULL;
}
return pTSBuf;
}

View File

@ -10,6 +10,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wwrite-strings"
#pragma GCC diagnostic ignored "-Wunused-function"
typedef struct ResultObj {
int32_t numOfResult;

View File

@ -5,6 +5,10 @@
#include "taos.h"
#include "qHistogram.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
namespace {
void doHistogramAddTest() {
SHistogramInfo* pHisto = NULL;

View File

@ -6,6 +6,9 @@
#include "qAggMain.h"
#include "tcompare.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
TEST(testCase, patternMatchTest) {
SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER;

View File

@ -7,6 +7,9 @@
#include "qPercentile.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
namespace {
tMemBucket *createBigIntDataBucket(int32_t start, int32_t end) {
tMemBucket *pBucket = tMemBucketCreate(sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, start, end);

View File

@ -6,6 +6,9 @@
#include "taos.h"
#include "tsdb.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
namespace {
// simple test
void simpleTest() {

View File

@ -9,6 +9,10 @@
#include "ttoken.h"
#include "tutil.h"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
namespace {
/**
*

View File

@ -6,14 +6,17 @@
#include "taos.h"
#include "tsdb.h"
#pragma GCC diagnostic ignored "-Wwrite-strings"
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#pragma GCC diagnostic ignored "-Wsign-compare"
#include "../../client/inc/tscUtil.h"
#include "tutil.h"
#include "tvariant.h"
#include "ttokendef.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wwrite-strings"
namespace {
int32_t testValidateName(char* name) {
SStrToken token = {0};

View File

@ -480,11 +480,13 @@ static int tfsFormatDir(char *idir, char *odir) {
return -1;
}
if (realpath(wep.we_wordv[0], odir) == NULL) {
char tmp[PATH_MAX] = {0};
if (realpath(wep.we_wordv[0], tmp) == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
wordfree(&wep);
return -1;
}
strcpy(odir, tmp);
wordfree(&wep);
return 0;

View File

@ -151,7 +151,7 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) {
wordfree(&full_path);
char tmp[1025] = {0};
char tmp[PATH_MAX] = {0};
if (realpath(option, tmp) != NULL) {
strcpy(option, tmp);
}

View File

@ -235,6 +235,8 @@ python3 ./test.py -f query/queryTscomputWithNow.py
python3 ./test.py -f query/computeErrorinWhere.py
python3 ./test.py -f query/queryTsisNull.py
python3 ./test.py -f query/subqueryFilter.py
# python3 ./test.py -f query/nestedQuery/queryInterval.py
python3 ./test.py -f query/queryStateWindow.py
#stream
@ -325,6 +327,7 @@ python3 ./test.py -f query/queryGroupbySort.py
python3 ./test.py -f functions/queryTestCases.py
python3 ./test.py -f functions/function_stateWindow.py
python3 ./test.py -f functions/function_derivative.py
python3 ./test.py -f functions/function_irate.py
python3 ./test.py -f insert/unsignedInt.py
python3 ./test.py -f insert/unsignedBigint.py

View File

@ -0,0 +1,228 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 100
self.ts = 1537146000000
self.ts1 = 1537146000000000
def run(self):
# db precison ms
tdSql.prepare()
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''')
tdSql.execute("create table test1 using test tags('beijing', 10)")
tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.execute("insert into gtest1 values(1537146000000,0);")
tdSql.execute("insert into gtest1 values(1537146001100,1.2);")
tdSql.execute("insert into gtest2 values(1537146001001,1);")
tdSql.execute("insert into gtest2 values(1537146001101,2);")
tdSql.execute("insert into gtest3 values(1537146001101,2);")
tdSql.execute("insert into gtest4(ts) values(1537146001101);")
tdSql.execute("insert into gtest5 values(1537146001002,4);")
tdSql.execute("insert into gtest5 values(1537146002202,4);")
tdSql.execute("insert into gtest6 values(1537146000000,5);")
tdSql.execute("insert into gtest6 values(1537146001000,2);")
tdSql.execute("insert into gtest7 values(1537146001000,1);")
tdSql.execute("insert into gtest7 values(1537146008000,2);")
tdSql.execute("insert into gtest7 values(1537146009000,6);")
tdSql.execute("insert into gtest7 values(1537146012000,3);")
tdSql.execute("insert into gtest7 values(1537146015000,3);")
tdSql.execute("insert into gtest7 values(1537146017000,1);")
tdSql.execute("insert into gtest7 values(1537146019000,3);")
tdSql.execute("insert into gtest8 values(1537146000002,4);")
tdSql.execute("insert into gtest8 values(1537146002202,4);")
# irate verifacation
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from test1 interval(10s);")
tdSql.checkData(0, 1, 1)
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col3) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col4) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col5) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col6) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col11) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col12) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col13) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col14) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col2) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest1;")
tdSql.checkData(0, 0, 1.2/1.1)
tdSql.query("select irate(col1) from gtest2;")
tdSql.checkData(0, 0, 10)
tdSql.query("select irate(col1) from gtest3;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest4;")
tdSql.checkRows(0)
tdSql.query("select irate(col1) from gtest5;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest6;")
tdSql.checkData(0, 0, 2)
tdSql.query("select irate(col1) from gtest7;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
tdSql.checkData(1, 1, 4)
tdSql.checkData(2, 1, 0)
tdSql.checkData(3, 1, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
tdSql.checkData(1, 1, 0)
tdSql.checkData(2, 1, 4)
tdSql.checkData(3, 1, 0)
#error
tdSql.error("select irate(col1) from test")
tdSql.error("select irate(ts) from test1")
tdSql.error("select irate(col7) from test1")
tdSql.error("select irate(col8) from test1")
tdSql.error("select irate(col9) from test1")
tdSql.error("select irate(loc) from test1")
tdSql.error("select irate(tag1) from test1")
# use db1 precision us
tdSql.execute("create database db1 precision 'us' keep 3650 UPDATE 1")
tdSql.execute("use db1 ")
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table test1 using test tags('beijing')")
tdSql.execute("create table gtest1 (ts timestamp, col1 float)")
tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)")
tdSql.execute("create table gtest9 (ts timestamp, col1 tinyint)")
for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts1 + i*1000000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.execute("insert into gtest1 values(1537146000000000,0);")
tdSql.execute("insert into gtest1 values(1537146001100000,1.2);")
tdSql.execute("insert into gtest2 values(1537146001001000,1);")
tdSql.execute("insert into gtest2 values(1537146001101000,2);")
tdSql.execute("insert into gtest3 values(1537146001101000,2);")
tdSql.execute("insert into gtest4(ts) values(1537146001101000);")
tdSql.execute("insert into gtest5 values(1537146001002000,4);")
tdSql.execute("insert into gtest5 values(1537146002202000,4);")
tdSql.execute("insert into gtest6 values(1537146000000000,5);")
tdSql.execute("insert into gtest6 values(1537146001000000,2);")
tdSql.execute("insert into gtest7 values(1537146001000000,1);")
tdSql.execute("insert into gtest7 values(1537146008000000,2);")
tdSql.execute("insert into gtest7 values(1537146009000000,6);")
tdSql.execute("insert into gtest7 values(1537146012000000,3);")
tdSql.execute("insert into gtest7 values(1537146015000000,3);")
tdSql.execute("insert into gtest7 values(1537146017000000,1);")
tdSql.execute("insert into gtest7 values(1537146019000000,3);")
tdSql.execute("insert into gtest8 values(1537146000002000,3);")
tdSql.execute("insert into gtest8 values(1537146001003000,4);")
tdSql.execute("insert into gtest9 values(1537146000000000,4);")
tdSql.execute("insert into gtest9 values(1537146000000001,5);")
# irate verifacation
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from test1 interval(10s);")
tdSql.checkData(0, 1, 1)
tdSql.query("select irate(col1) from test1;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest1;")
tdSql.checkData(0, 0, 1.2/1.1)
tdSql.query("select irate(col1) from gtest2;")
tdSql.checkData(0, 0, 10)
tdSql.query("select irate(col1) from gtest3;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest4;")
tdSql.checkRows(0)
tdSql.query("select irate(col1) from gtest5;")
tdSql.checkData(0, 0, 0)
tdSql.query("select irate(col1) from gtest6;")
tdSql.checkData(0, 0, 2)
tdSql.query("select irate(col1) from gtest7;")
tdSql.checkData(0, 0, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;")
tdSql.checkData(1, 1, 4)
tdSql.checkData(2, 1, 0)
tdSql.checkData(3, 1, 1)
tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;")
tdSql.checkData(1, 1, 0)
tdSql.checkData(2, 1, 4)
tdSql.checkData(3, 1, 0)
tdSql.query("select irate(col1) from gtest8;")
tdSql.checkData(0, 0, 1/1.001)
tdSql.query("select irate(col1) from gtest9;")
tdSql.checkData(0, 0, 1000000)
#error
tdSql.error("select irate(col1) from test")
tdSql.error("select irate(ts) from test1")
tdSql.error("select irate(col7) from test1")
tdSql.error("select irate(col8) from test1")
tdSql.error("select irate(col9) from test1")
tdSql.error("select irate(loc) from test1")
tdSql.error("select irate(tag1) from test1")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -18,7 +18,6 @@ from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@ -27,6 +26,7 @@ class TDTestCase:
def run(self):
tdSql.prepare()
# test case for https://jira.taosdata.com:18080/browse/TD-4568
# test case for https://jira.taosdata.com:18080/browse/TD-4824
tdLog.info("=============== step1,check bool and tinyint data type")
@ -137,8 +137,28 @@ class TDTestCase:
tdSql.checkData(0,1,'True')
tdSql.checkData(0,2,'0')
tdLog.info("=============== step1.3,multiple column and multiple tag check in function")
cmd1 = '''select * from in_stable_1
where in_bool in (true,false) and in_tinyint in (0,127,-127)
and tin_bool in (true,false) and tin_tinyint in (0,127,-127)
order by ts desc ;'''
tdLog.info(cmd1)
tdSql.query(cmd1)
tdSql.checkData(0,1,'True')
tdSql.checkData(0,2,'0')
tdSql.checkData(0,3,'False')
tdSql.checkData(0,4,'0')
tdSql.checkData(1,1,'False')
tdSql.checkData(1,2,'127')
tdSql.checkData(1,3,'False')
tdSql.checkData(1,4,'-127')
tdSql.checkData(2,1,'True')
tdSql.checkData(2,2,'-127')
tdSql.checkData(2,3,'True')
tdSql.checkData(2,4,'127')
tdLog.info("=============== step1.3,drop normal table && create table")
tdLog.info("=============== step1.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_in_bool_tinyint_1 ;'
cmd2 = 'create table normal_in_bool_tinyint_1 (ts timestamp,in_bool bool,in_tinyint tinyint) ; '
tdLog.info(cmd1)
@ -147,7 +167,7 @@ class TDTestCase:
tdSql.execute(cmd2)
tdLog.info("=============== step1.4,insert normal table right data and check in function")
tdLog.info("=============== step1.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_in_bool_tinyint_1 values(now,\'true\',\'-127\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
@ -175,6 +195,17 @@ class TDTestCase:
tdSql.checkData(0,1,'True')
tdSql.checkData(0,2,'0')
cmd4 = '''select * from normal_in_bool_tinyint_1
where in_bool in (true,false) and in_tinyint in (0,127,-127)
order by ts desc ;'''
tdLog.info(cmd4)
tdSql.query(cmd4)
tdSql.checkData(0,1,'True')
tdSql.checkData(0,2,'0')
tdSql.checkData(1,1,'False')
tdSql.checkData(1,2,'127')
tdSql.checkData(2,1,'True')
tdSql.checkData(2,2,'-127')
tdLog.info("=============== step2,check int、smallint and bigint data type")
@ -378,10 +409,39 @@ class TDTestCase:
tdSql.query('select * from in_int_smallint_bigint_3 where in_big in (-9223372036854775807) order by ts desc')
tdSql.checkData(0,1,'0')
tdSql.checkData(0,2,'32767')
tdSql.checkData(0,3,'-9223372036854775807')
tdSql.checkData(0,3,'-9223372036854775807')
tdLog.info("=============== step2.3,drop normal table && create table")
tdLog.info("=============== step2.3,multiple column and multiple tag check in function")
cmd1 = '''select * from in_stable_2
where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767)
and in_big in (0,9223372036854775807,-9223372036854775807)
and tin_int in (0,2147483647,-2147483647) and tin_small in (0,32767,-32767)
and tin_big in (0,9223372036854775807,-9223372036854775807)
order by ts desc ;'''
tdLog.info(cmd1)
tdSql.query(cmd1)
tdSql.checkData(0,1,'0')
tdSql.checkData(0,2,'32767')
tdSql.checkData(0,3,'-9223372036854775807')
tdSql.checkData(0,4,'0')
tdSql.checkData(0,5,'32767')
tdSql.checkData(0,6,'-9223372036854775807')
tdSql.checkData(1,1,'-2147483647')
tdSql.checkData(1,2,'0')
tdSql.checkData(1,3,'9223372036854775807')
tdSql.checkData(1,4,'-2147483647')
tdSql.checkData(1,5,'0')
tdSql.checkData(1,6,'9223372036854775807')
tdSql.checkData(2,1,'2147483647')
tdSql.checkData(2,2,'-32767')
tdSql.checkData(2,3,'0')
tdSql.checkData(2,4,'2147483647')
tdSql.checkData(2,5,'-32767')
tdSql.checkData(2,6,'0')
tdLog.info("=============== step2.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_int_smallint_bigint_1 ;'
cmd2 = 'create table normal_int_smallint_bigint_1 (ts timestamp,in_int int,in_small smallint , in_big bigint) ; '
tdLog.info(cmd1)
@ -390,7 +450,7 @@ class TDTestCase:
tdSql.execute(cmd2)
tdLog.info("=============== step2.4,insert normal table right data and check in function")
tdLog.info("=============== step2.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_int_smallint_bigint_1 values(now,\'2147483647\',\'-32767\',\'0\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
@ -437,7 +497,23 @@ class TDTestCase:
tdSql.query('select * from normal_int_smallint_bigint_1 where in_big in (-9223372036854775807) order by ts desc')
tdSql.checkData(0,1,'0')
tdSql.checkData(0,2,'32767')
tdSql.checkData(0,3,'-9223372036854775807')
tdSql.checkData(0,3,'-9223372036854775807')
cmd4 = '''select * from normal_int_smallint_bigint_1
where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767)
and in_big in (0,9223372036854775807,-9223372036854775807)
order by ts desc ;'''
tdLog.info(cmd4)
tdSql.query(cmd4)
tdSql.checkData(0,1,'0')
tdSql.checkData(0,2,'32767')
tdSql.checkData(0,3,'-9223372036854775807')
tdSql.checkData(1,1,'-2147483647')
tdSql.checkData(1,2,'0')
tdSql.checkData(1,3,'9223372036854775807')
tdSql.checkData(2,1,'2147483647')
tdSql.checkData(2,2,'-32767')
tdSql.checkData(2,3,'0')
tdLog.info("=============== step3,check binary and nchar data type")
@ -560,7 +636,30 @@ class TDTestCase:
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
tdLog.info("=============== step3.3,drop normal table && create table")
tdLog.info("=============== step3.3,multiple column and multiple tag check in function")
cmd1 = '''select * from in_stable_3
where in_binary in (\'0\',\'TDengine\',\'TAOS\')
and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\')
and tin_binary in (\'0\',\'TDengine\',\'taosdataTDengine\')
and tin_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'北京涛思数据科技有限公司TDengine\')
order by ts desc ;'''
tdLog.info(cmd1)
tdSql.query(cmd1)
tdSql.checkData(0,1,'TDengine')
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
tdSql.checkData(0,3,'taosdataTDengine')
tdSql.checkData(0,4,'北京涛思数据科技有限公司TDengine')
tdSql.checkData(1,1,'TAOS')
tdSql.checkData(1,2,'涛思数据TAOSdata')
tdSql.checkData(1,3,'TDengine')
tdSql.checkData(1,4,'北京涛思数据科技有限公司')
tdSql.checkData(2,1,'0')
tdSql.checkData(2,2,'0')
tdSql.checkData(2,3,'0')
tdSql.checkData(2,4,'0')
tdLog.info("=============== step3.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_in_binary_nchar_1 ;'
cmd2 = 'create table normal_in_binary_nchar_1 (ts timestamp,in_binary binary(8),in_nchar nchar(12)) ; '
tdLog.info(cmd1)
@ -569,7 +668,7 @@ class TDTestCase:
tdSql.execute(cmd2)
tdLog.info("=============== step3.4,insert normal table right data and check in function")
tdLog.info("=============== step3.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_in_binary_nchar_1 values(now,\'0\',\'0\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
@ -598,124 +697,413 @@ class TDTestCase:
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
tdSql.query('select * from normal_in_binary_nchar_1 where in_nchar in (\'北京涛思数据科技有限公司\') order by ts desc')
tdSql.checkData(0,1,'TDengine')
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
tdLog.info("=============== step4,check float and double data type,not support")
cmd4 = '''select * from normal_in_binary_nchar_1
where in_binary in (\'0\',\'TDengine\',\'TAOS\')
and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\')
order by ts desc ;'''
tdLog.info(cmd4)
tdSql.query(cmd4)
tdSql.checkData(0,1,'TDengine')
tdSql.checkData(0,2,'北京涛思数据科技有限公司')
tdSql.checkData(1,1,'TAOS')
tdSql.checkData(1,2,'涛思数据TAOSdata')
tdSql.checkData(2,1,'0')
tdSql.checkData(2,2,'0')
tdLog.info("=============== step4,check float and double data type")
tdLog.info("=============== step4.1,drop table && create table")
cmd1 = 'drop table if exists in_float_double_1 ;'
cmd1 = 'drop table if exists in_ts_float_double_1 ;'
cmd2 = 'drop table if exists in_ts_float_double_2 ;'
cmd3 = 'drop table if exists in_ts_float_double_3 ;'
cmd10 = 'drop table if exists in_stable_4 ;'
cmd11 = 'create stable in_stable_4(ts timestamp,in_float float,in_double double) tags (tin_float float,tin_double double) ;'
cmd12 = 'create table in_float_double_1 using in_stable_4 tags(\'666\',\'88888\') ; '
cmd11 = 'create stable in_stable_4(ts timestamp,in_ts timestamp,in_float float,in_double double) tags (tin_ts timestamp,tin_float float,tin_double double) ;'
cmd12 = 'create table in_ts_float_double_1 using in_stable_4 tags(\'0\',\'0\',\'0\') ; '
cmd13 = 'create table in_ts_float_double_2 using in_stable_4 tags(\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ; '
cmd14 = 'create table in_ts_float_double_3 using in_stable_4 tags(\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ; '
tdLog.info(cmd1)
tdSql.execute(cmd1)
tdLog.info(cmd2)
tdSql.execute(cmd2)
tdLog.info(cmd3)
tdSql.execute(cmd3)
tdLog.info(cmd10)
tdSql.execute(cmd10)
tdLog.info(cmd11)
tdSql.execute(cmd11)
tdLog.info(cmd12)
tdSql.execute(cmd12)
tdLog.info(cmd13)
tdSql.execute(cmd13)
tdLog.info(cmd14)
tdSql.execute(cmd14)
tdLog.info("=============== step4.2,insert stable right data and check in function")
cmd1 = 'insert into in_float_double_1 values(now,\'888\',\'66666\') ;'
cmd1 = 'insert into in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
tdSql.execute(cmd1)
tdSql.query('select * from in_stable_4 where in_ts in (\'0\') order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
tdSql.checkData(0,5,0.00000)
tdSql.checkData(0,6,0.000000000)
tdSql.query('select * from in_stable_4 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
tdSql.checkData(0,5,0.00000)
tdSql.checkData(0,6,0.000000000)
tdSql.query('select * from in_stable_4 where in_float in (0.00000) order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
tdSql.checkData(0,5,0.00000)
tdSql.checkData(0,6,0.000000000)
tdSql.query('select * from in_stable_4 where in_double in (0.000000000) order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
tdSql.checkData(0,5,0.00000)
tdSql.checkData(0,6,0.000000000)
tdSql.query('select * from in_stable_4 where tin_ts in (\'0\') order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
tdSql.checkData(0,5,0.00000)
tdSql.checkData(0,6,0.000000000)
tdSql.query('select * from in_stable_4 where tin_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
tdSql.checkData(0,5,0.00000)
tdSql.checkData(0,6,0.000000000)
tdSql.query('select * from in_stable_4 where tin_float in (0.00000) order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
tdSql.checkData(0,5,0.00000)
tdSql.checkData(0,6,0.000000000)
tdSql.query('select * from in_stable_4 where tin_double in (0.000000000) order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,'1970-01-01 08:00:00.000')
tdSql.checkData(0,5,0.00000)
tdSql.checkData(0,6,0.000000000)
tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'0\') order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.query('select * from in_ts_float_double_1 where in_float in (0.00000) order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.query('select * from in_ts_float_double_1 where in_double in (0.000000000) order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
cmd2 = 'select * from in_stable_4 where in_float in (\'888\');'
cmd2 = 'insert into in_ts_float_double_2 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;'
tdLog.info(cmd2)
tdSql.error(cmd2)
try:
tdSql.execute(cmd2)
tdLog.exit("invalid operation: not supported filter condition")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid operation: not supported filter condition")
tdSql.execute(cmd2)
tdSql.query('select * from in_stable_4 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
tdSql.checkData(0,5,666.00000)
tdSql.checkData(0,6,-88888.000000000)
tdSql.query('select * from in_stable_4 where in_ts in (\'1577836800001\') order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
tdSql.checkData(0,5,666.00000)
tdSql.checkData(0,6,-88888.000000000)
tdSql.query('select * from in_stable_4 where in_float in (666.00000) order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
tdSql.checkData(0,5,666.00000)
tdSql.checkData(0,6,-88888.000000000)
tdSql.query('select * from in_stable_4 where in_double in (-88888.000000000) order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
tdSql.checkData(0,5,666.00000)
tdSql.checkData(0,6,-88888.000000000)
tdSql.query('select * from in_stable_4 where tin_ts in (\'2020-01-01 08:00:00.001000\') order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
tdSql.checkData(0,5,666.00000)
tdSql.checkData(0,6,-88888.000000000)
tdSql.query('select * from in_stable_4 where tin_ts in (\'1577836800001\') order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
tdSql.checkData(0,5,666.00000)
tdSql.checkData(0,6,-88888.000000000)
tdSql.query('select * from in_stable_4 where tin_float in (666.00000) order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
tdSql.checkData(0,5,666.00000)
tdSql.checkData(0,6,-88888.000000000)
tdSql.query('select * from in_stable_4 where tin_double in (-88888.000000000) order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001000')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.checkData(0,4,'2020-01-01 08:00:00.001')
tdSql.checkData(0,5,666.00000)
tdSql.checkData(0,6,-88888.000000000)
tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'1577836800001\') order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.query('select * from in_ts_float_double_2 where in_float in (666.00000) order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.query('select * from in_ts_float_double_2 where in_double in (-88888.000000000) order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
cmd3 = 'select * from in_stable_4 where in_double in (\'66666\');'
cmd3 = 'insert into in_ts_float_double_3 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;'
tdLog.info(cmd3)
tdSql.error(cmd3)
try:
tdSql.execute(cmd3)
tdLog.exit("invalid operation: not supported filter condition")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid operation: not supported filter condition")
tdSql.execute(cmd3)
cmd4 = 'select * from in_stable_4 where tin_float in (\'666\');'
tdLog.info(cmd4)
tdSql.error(cmd4)
try:
tdSql.execute(cmd4)
tdLog.exit("invalid operation: not supported filter condition")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid operation: not supported filter condition")
tdSql.query('select * from in_stable_4 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
tdSql.checkData(0,5,-888.00000)
tdSql.checkData(0,6,66666.000000000)
tdSql.query('select * from in_stable_4 where in_ts in (\'1609459200001\') order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
tdSql.checkData(0,5,-888.00000)
tdSql.checkData(0,6,66666.000000000)
tdSql.query('select * from in_stable_4 where in_float in (-888.00000) order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
tdSql.checkData(0,5,-888.00000)
tdSql.checkData(0,6,66666.000000000)
tdSql.query('select * from in_stable_4 where in_double in (66666.000000000) order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
tdSql.checkData(0,5,-888.00000)
tdSql.checkData(0,6,66666.000000000)
tdSql.query('select * from in_stable_4 where tin_ts in (\'2021-01-01 08:00:00.001000\') order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
tdSql.checkData(0,5,-888.00000)
tdSql.checkData(0,6,66666.000000000)
tdSql.query('select * from in_stable_4 where tin_ts in (\'1609459200001\') order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
tdSql.checkData(0,5,-888.00000)
tdSql.checkData(0,6,66666.000000000)
tdSql.query('select * from in_stable_4 where tin_float in (-888.00000) order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
tdSql.checkData(0,5,-888.00000)
tdSql.checkData(0,6,66666.000000000)
tdSql.query('select * from in_stable_4 where tin_double in (66666.000000000) order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
tdSql.checkData(0,5,-888.00000)
tdSql.checkData(0,6,66666.000000000)
tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'1609459200001\') order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.query('select * from in_ts_float_double_3 where in_float in (-888.00000) order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.query('select * from in_ts_float_double_3 where in_double in (66666.000000000) order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdLog.info("=============== step4.3,multiple column and multiple tag check in function")
cmd1 = '''select * from in_stable_4
where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\')
and in_float in (0.00000,666.00000,-888.00000)
and in_double in (0.000000000,66666.000000000,-88888.000000000)
and tin_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\')
and tin_float in (0.00000,666.00000,-888.00000)
and tin_double in (0.000000000,66666.000000000,-88888.000000000)
order by ts desc ;'''
tdLog.info(cmd1)
tdSql.query(cmd1)
tdSql.checkData(0,1,'2021-01-01 08:00:00.001000')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(0,4,'2021-01-01 08:00:00.001')
tdSql.checkData(0,5,-888.00000)
tdSql.checkData(0,6,66666.000000000)
tdSql.checkData(1,1,'2020-01-01 08:00:00.001000')
tdSql.checkData(1,2,666.00000)
tdSql.checkData(1,3,-88888.000000000)
tdSql.checkData(1,4,'2020-01-01 08:00:00.001')
tdSql.checkData(1,5,666.00000)
tdSql.checkData(1,6,-88888.000000000)
tdSql.checkData(2,1,'1970-01-01 08:00:00.000')
tdSql.checkData(2,2,0.00000)
tdSql.checkData(2,3,0.000000000)
tdSql.checkData(2,4,'1970-01-01 08:00:00.000')
tdSql.checkData(2,5,0.00000)
tdSql.checkData(2,6,0.000000000)
cmd5 = 'select * from in_stable_4 where tin_double in (\'88888\');'
tdLog.info(cmd5)
tdSql.error(cmd5)
try:
tdSql.execute(cmd5)
tdLog.exit("invalid operation: not supported filter condition")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid operation: not supported filter condition")
cmd6 = 'select * from in_float_double_1 where in_float in (\'888\');'
tdLog.info(cmd6)
tdSql.error(cmd6)
try:
tdSql.execute(cmd6)
tdLog.exit("invalid operation: not supported filter condition")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid operation: not supported filter condition")
cmd7 = 'select * from in_float_double_1 where in_double in (\'66666\');'
tdLog.info(cmd7)
tdSql.error(cmd7)
try:
tdSql.execute(cmd7)
tdLog.exit("invalid operation: not supported filter condition")
except Exception as e:
tdLog.info(repr(e))
tdLog.info("invalid operation: not supported filter condition")
tdLog.info("=============== step4.3,drop normal table && create table")
cmd1 = 'drop table if exists normal_in_float_double_1 ;'
cmd2 = 'create table normal_in_float_double_1 (ts timestamp,in_float float,in_double double) ; '
tdLog.info("=============== step4.4,drop normal table && create table")
cmd1 = 'drop table if exists normal_in_ts_float_double_1 ;'
cmd2 = 'create table normal_in_ts_float_double_1 (ts timestamp,in_ts timestamp,in_float float,in_double double) ; '
tdLog.info(cmd1)
tdSql.execute(cmd1)
tdLog.info(cmd2)
tdSql.execute(cmd2)
tdLog.info("=============== step4.4,insert normal table right data and check in function")
cmd1 = 'insert into normal_in_float_double_1 values(now,\'888\',\'666666\') ;'
tdLog.info("=============== step4.5,insert normal table right data and check in function")
cmd1 = 'insert into normal_in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;'
tdLog.info(cmd1)
tdSql.execute(cmd1)
tdSql.execute(cmd1)
cmd2 = 'select * from normal_in_float_double_1 where in_float in (\'888\');'
#tdLog.info(cmd2)
#tdSql.error(cmd2)
#try:
# tdSql.execute(cmd2)
# tdLog.exit("invalid operation: not supported filter condition")
#except Exception as e:
# tdLog.info(repr(e))
# tdLog.info("invalid operation: not supported filter condition")
#
#cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');'
#tdLog.info(cmd3)
#tdSql.error(cmd3)
#try:
# tdSql.execute(cmd3)
# tdLog.exit("invalid operation: not supported filter condition")
#except Exception as e:
# tdLog.info(repr(e))
# tdLog.info("invalid operation: not supported filter condition")
tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'0\') order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (0.00000) order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (0.000000000) order by ts desc')
tdSql.checkData(0,1,'1970-01-01 08:00:00.000')
tdSql.checkData(0,2,0.00000)
tdSql.checkData(0,3,0.000000000)
cmd2 = 'insert into normal_in_ts_float_double_1 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;'
tdLog.info(cmd2)
tdSql.execute(cmd2)
tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1577836800001\') order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (666.00000) order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (-88888.000000000) order by ts desc')
tdSql.checkData(0,1,'2020-01-01 08:00:00.001')
tdSql.checkData(0,2,666.00000)
tdSql.checkData(0,3,-88888.000000000)
cmd3 = 'insert into normal_in_ts_float_double_1 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;'
tdLog.info(cmd3)
tdSql.execute(cmd3)
tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1609459200001\') order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (-888.00000) order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (66666.000000000) order by ts desc')
tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
cmd4 = '''select * from normal_in_ts_float_double_1
where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\')
and in_double in (0.000000000,66666.000000000,-88888.000000000)
and in_float in (0.00000,666.00000,-888.00000)
order by ts desc ;'''
tdLog.info(cmd4)
tdSql.query(cmd4)
tdSql.checkData(0,1,'2021-01-01 08:00:00.001')
tdSql.checkData(0,2,-888.00000)
tdSql.checkData(0,3,66666.000000000)
tdSql.checkData(1,1,'2020-01-01 08:00:00.001')
tdSql.checkData(1,2,666.00000)
tdSql.checkData(1,3,-88888.000000000)
tdSql.checkData(2,1,'1970-01-01 08:00:00.000')
tdSql.checkData(2,2,0.00000)
tdSql.checkData(2,3,0.000000000)
def stop(self):
tdSql.close()

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file":"./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1,
"timestamp_step": 1000,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/sample.csv",
"tags_file": "",
"columns": [{"type": "INT", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BOOL"}],
"tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}

View File

@ -0,0 +1,81 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
os.system("%staosdemo -f query/nestedQuery/insertData.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count(*) from stb00_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 100000)
tdSql.query("select count(*) from stb01_1")
tdSql.checkData(0, 0, 200)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 200000)
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf query/nestedQuery/%s.sql" % testcaseFilename )
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,106 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
import random
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts1 = 1593548685000
self.ts2 = 1593548785000
def run(self):
# tdSql.execute("drop database db ")
tdSql.prepare()
tdSql.execute("create table st (ts timestamp, num int, value int , t_instance int) tags (loc nchar(30))")
node = 5
number = 10
for n in range(node):
for m in range(number):
dt= m*300000+n*60000 # collecting'frequency is 10s
args1=(n,n,self.ts1+dt,n,100+2*m+2*n,10+m+n)
# args2=(n,self.ts2+dt,n,120+n,15+n)
tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d, %d)" % args1)
# tdSql.execute("insert into t1 using st tags('shanghai') values(%d, %d, %d, %d)" % args2)
# interval function
tdSql.query("select avg(value) from st interval(10m)")
# print(tdSql.queryResult)
tdSql.checkRows(6)
tdSql.checkData(0, 0, "2020-07-01 04:20:00")
tdSql.checkData(1, 1, 107.4)
# subquery with interval
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
tdSql.checkData(0, 0, 109.0)
# subquery with interval and select two Column in parent query
tdSql.error("select ts,avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));")
# subquery with interval and sliding
tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(30s) limit 1;")
tdSql.checkData(0, 0, "2020-07-01 04:17:00")
tdSql.checkData(0, 1, 100)
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s));")
tdSql.checkData(0, 0, 111)
# subquery with interval and offset
tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m);")
tdSql.checkData(0, 0, "2020-07-01 04:21:00")
tdSql.checkData(0, 1, 100)
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m) group by loc);")
tdSql.checkData(0, 0, 109)
# subquery with interval,sliding and group by ; parent query with interval
tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(1m) group by loc limit 1 offset 52 ;")
tdSql.checkData(0, 0, "2020-07-01 05:09:00")
tdSql.checkData(0, 1, 118)
tdSql.query("select avg(avg_val) as ncst from(select avg(value) as avg_val from st where loc!='beijing0' interval(8m) sliding(1m) group by loc ) interval(5m);")
tdSql.checkData(1, 1, 105)
# # subquery and parent query with interval and sliding
tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(5m)) interval(10m) sliding(2m);")
tdSql.checkData(29, 0, "2020-07-01 05:10:00.000")
# subquery and parent query with top and bottom
tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val desc;")
tdSql.checkData(0, 1, 117)
tdSql.query("select bottom(avg_val,3) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val asc;")
tdSql.checkData(0, 1, 111)
#
tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(3m));")
tdSql.checkData(0, 1, 120)
# clear env
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf wal/%s.sql" % testcaseFilename )
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,111 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 100000
self.ts = 1537146000000
def run(self):
tdSql.prepare()
print("==============step1")
tdSql.execute(
"create table if not exists st (ts timestamp, t1 int, t2 timestamp, t3 bigint, t4 float, t5 double, t6 binary(10), t7 smallint, t8 tinyint, t9 bool, t10 nchar(10), t11 int unsigned, t12 bigint unsigned, t13 smallint unsigned, t14 tinyint unsigned ,t15 int) tags(dev nchar(50), tag2 binary(16))")
tdSql.execute(
'CREATE TABLE if not exists dev_001 using st tags("dev_01", "tag_01")')
tdSql.execute(
'CREATE TABLE if not exists dev_002 using st tags("dev_02", "tag_02")')
print("==============step2")
tdSql.execute(
"INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254, 1)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253, 5)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, false, '测试', 15, 10, 65534, 253, 10)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254, 15)")
for i in range(self.rowNum):
tdSql.execute("insert into dev_002 (ts,t1) values(%d, %d,)" % (self.ts + i, i + 1))
tdSql.query("select count(ts) from dev_001 state_window(t1)")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 2)
tdSql.query("select count(ts) from dev_001 state_window(t3)")
tdSql.checkRows(2)
tdSql.checkData(1, 0, 2)
tdSql.query("select count(ts) from dev_001 state_window(t7)")
tdSql.checkRows(3)
tdSql.checkData(1, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t8)")
tdSql.checkRows(3)
tdSql.checkData(2, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t11)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 3)
tdSql.query("select count(ts) from dev_001 state_window(t12)")
tdSql.checkRows(2)
tdSql.checkData(1, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t13)")
tdSql.checkRows(2)
tdSql.checkData(1, 0, 1)
tdSql.query("select count(ts) from dev_001 state_window(t14)")
tdSql.checkRows(3)
tdSql.checkData(1, 0, 2)
tdSql.query("select count(ts) from dev_002 state_window(t1)")
tdSql.checkRows(100000)
# with all aggregate function
tdSql.query("select count(*),sum(t1),avg(t1),twa(t1),stddev(t15),leastsquares(t15,1,1),first(t15),last(t15),spread(t15),percentile(t15,90),t9 from dev_001 state_window(t9);")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 2)
tdSql.checkData(1, 1, 10)
tdSql.checkData(0, 2, 1)
# tdSql.checkData(0, 3, 1)
tdSql.checkData(0, 4, np.std([1,5]))
# tdSql.checkData(0, 5, 1)
tdSql.checkData(0, 6, 1)
tdSql.checkData(0, 7, 5)
tdSql.checkData(0, 8, 4)
tdSql.checkData(0, 9, 4.6)
tdSql.checkData(0, 10, 'True')
# with where
tdSql.query("select avg(t15),t9 from dev_001 where t9='true' state_window(t9);")
tdSql.checkData(0, 0, 7)
tdSql.checkData(0, 1, 'True')
# error
tdSql.error("select count(*) from dev_001 state_window(t2)")
tdSql.error("select count(*) from st state_window(t3)")
tdSql.error("select count(*) from dev_001 state_window(t4)")
tdSql.error("select count(*) from dev_001 state_window(t5)")
tdSql.error("select count(*) from dev_001 state_window(t6)")
tdSql.error("select count(*) from dev_001 state_window(t10)")
tdSql.error("select count(*) from dev_001 state_window(tag2)")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -45,7 +45,9 @@ class TDTestCase:
for i in range(100):
sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100)
tdSql.execute(sql)
os.system("rm /tmp/*.sql")
os.system("taosdump --databases db -o /tmp")
tdSql.execute("drop database db")

View File

@ -0,0 +1,74 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1601481600000
self.numberOfTables = 1
self.numberOfRecords = 15000
def run(self):
tdSql.prepare()
tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)")
tdSql.execute("create table t1 using st tags(0)")
currts = self.ts
finish = 0
while(finish < self.numberOfRecords):
sql = "insert into t1 values"
for i in range(finish, self.numberOfRecords):
sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i)
finish = i + 1
if (1048576 - len(sql)) < 16384:
break
tdSql.execute(sql)
os.system("rm /tmp/*.sql")
os.system("taosdump --databases db -o /tmp -B 32766 -L 1048576")
tdSql.execute("drop database db")
tdSql.query("show databases")
tdSql.checkRows(0)
os.system("taosdump -i /tmp")
tdSql.query("show databases")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'db')
tdSql.execute("use db")
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'st')
tdSql.query("select count(*) from t1")
tdSql.checkData(0, 0, self.numberOfRecords)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,450 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import time
import datetime
import inspect
import psutil
import shutil
import json
from util.log import *
from multiprocessing import cpu_count
# TODO: fully test the function. Handle exceptions.
# Handle json format not accepted by taosdemo
class TDTaosdemoCfg:
def __init__(self):
self.insert_cfg = {
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": cpu_count(),
"thread_count_create_tbl": cpu_count(),
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 32766,
"max_sql_len": 32766,
"databases": None
}
self.db = {
"name": 'db',
"drop": 'yes',
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 6,
"precision": "ms",
"keep": 3650,
"minRows": 100,
"maxRows": 4096,
"comp": 2,
"walLevel": 1,
"cachelast": 0,
"quorum": 1,
"fsync": 3000,
"update": 0
}
self.query_cfg = {
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
"query_times": 2,
"query_mode": "taosc",
"specified_table_query": None,
"super_table_query": None
}
self.table_query = {
"query_interval": 1,
"concurrent": 3,
"sqls": None
}
self.stable_query = {
"stblname": "stb",
"query_interval": 1,
"threads": 3,
"sqls": None
}
self.sub_cfg = {
"filetype": "subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "db",
"confirm_parameter_prompt": "no",
"specified_table_query": None,
"super_table_query": None
}
self.table_sub = {
"concurrent": 1,
"mode": "sync",
"interval": 10000,
"restart": "yes",
"keepProgress": "yes",
"sqls": None
}
self.stable_sub = {
"stblname": "stb",
"threads": 1,
"mode": "sync",
"interval": 10000,
"restart": "yes",
"keepProgress": "yes",
"sqls": None
}
self.stbs = []
self.stb_template = {
"name": "stb",
"child_table_exists": "no",
"childtable_count": 100,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"batch_create_tbl_num": 5,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_limit": 10,
"childtable_offset": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 32766,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT", "count": 1}],
"tags": [{"type": "BIGINT", "count": 1}]
}
self.tb_query_sql = []
self.tb_query_sql_template = {
"sql": "select last_row(*) from stb_0 ",
"result": "temp/query_res0.txt"
}
self.stb_query_sql = []
self.stb_query_sql_template = {
"sql": "select last_row(ts) from xxxx",
"result": "temp/query_res2.txt"
}
self.tb_sub_sql = []
self.tb_sub_sql_template = {
"sql": "select * from stb_0 ;",
"result": "temp/subscribe_res0.txt"
}
self.stb_sub_sql = []
self.stb_sub_sql_template = {
"sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;",
"result": "temp/subscribe_res1.txt"
}
# The following functions are import functions for different dicts and lists
# except import_sql, all other import functions will a dict and overwrite the origional dict
# dict_in: the dict used to overwrite the target
def import_insert_cfg(self, dict_in):
self.insert_cfg = dict_in
def import_db(self, dict_in):
self.db = dict_in
def import_stbs(self, dict_in):
self.stbs = dict_in
def import_query_cfg(self, dict_in):
self.query_cfg = dict_in
def import_table_query(self, dict_in):
self.table_query = dict_in
def import_stable_query(self, dict_in):
self.stable_query = dict_in
def import_sub_cfg(self, dict_in):
self.sub_cfg = dict_in
def import_table_sub(self, dict_in):
self.table_sub = dict_in
def import_stable_sub(self, dict_in):
self.stable_sub = dict_in
def import_sql(self, Sql_in, mode):
"""used for importing the sql later used
Args:
Sql_in (dict): the imported sql dict
mode (str): the sql storing location within TDTaosdemoCfg
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
"""
if mode == 'query_table':
self.tb_query_sql = Sql_in
elif mode == 'query_stable':
self.stb_query_sql = Sql_in
elif mode == 'sub_table':
self.tb_sub_sql = Sql_in
elif mode == 'sub_stable':
self.stb_sub_sql = Sql_in
# import functions end
# The following functions are alter functions for different dicts
# Args:
# key: the key that is going to be modified
# value: the value of the key that is going to be modified
# if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls"
# value will not be used
def alter_insert_cfg(self, key, value):
if key == 'databases':
self.insert_cfg[key] = [
{
'dbinfo': self.db,
'super_tables': self.stbs
}
]
else:
self.insert_cfg[key] = value
def alter_db(self, key, value):
self.db[key] = value
def alter_query_tb(self, key, value):
if key == "sqls":
self.table_query[key] = self.tb_query_sql
else:
self.table_query[key] = value
def alter_query_stb(self, key, value):
if key == "sqls":
self.stable_query[key] = self.stb_query_sql
else:
self.stable_query[key] = value
def alter_query_cfg(self, key, value):
if key == "specified_table_query":
self.query_cfg["specified_table_query"] = self.table_query
elif key == "super_table_query":
self.query_cfg["super_table_query"] = self.stable_query
else:
self.table_query[key] = value
def alter_sub_cfg(self, key, value):
if key == "specified_table_query":
self.sub_cfg["specified_table_query"] = self.table_sub
elif key == "super_table_query":
self.sub_cfg["super_table_query"] = self.stable_sub
else:
self.table_query[key] = value
def alter_sub_stb(self, key, value):
if key == "sqls":
self.stable_sub[key] = self.stb_sub_sql
else:
self.stable_sub[key] = value
def alter_sub_tb(self, key, value):
if key == "sqls":
self.table_sub[key] = self.tb_sub_sql
else:
self.table_sub[key] = value
# alter function ends
# the following functions are for handling the sql lists
def append_sql_stb(self, target, value):
"""for appending sql dict into specific sql list
Args:
target (str): the target append list
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
value (dict): the sql dict going to be appended
"""
if target == 'insert_stbs':
self.stbs.append(value)
elif target == 'query_table':
self.tb_query_sql.append(value)
elif target == 'query_stable':
self.stb_query_sql.append(value)
elif target == 'sub_table':
self.tb_sub_sql.append(value)
elif target == 'sub_stable':
self.stb_sub_sql.append(value)
def pop_sql_stb(self, target, index):
"""for poping a sql dict from specific sql list
Args:
target (str): the target append list
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
index (int): the sql dict that is going to be popped
"""
if target == 'insert_stbs':
self.stbs.pop(index)
elif target == 'query_table':
self.tb_query_sql.pop(index)
elif target == 'query_stable':
self.stb_query_sql.pop(index)
elif target == 'sub_table':
self.tb_sub_sql.pop(index)
elif target == 'sub_stable':
self.stb_sub_sql.pop(index)
# sql list modification function end
# The following functions are get functions for different dicts
def get_db(self):
return self.db
def get_stb(self):
return self.stbs
def get_insert_cfg(self):
return self.insert_cfg
def get_query_cfg(self):
return self.query_cfg
def get_tb_query(self):
return self.table_query
def get_stb_query(self):
return self.stable_query
def get_sub_cfg(self):
return self.sub_cfg
def get_tb_sub(self):
return self.table_sub
def get_stb_sub(self):
return self.stable_sub
def get_sql(self, target):
"""general get function for all sql lists
Args:
target (str): the sql list want to get
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
"""
if target == 'query_table':
return self.tb_query_sql
elif target == 'query_stable':
return self.stb_query_sql
elif target == 'sub_table':
return self.tb_sub_sql
elif target == 'sub_stable':
return self.stb_sub_sql
def get_template(self, target):
"""general get function for the default sql template
Args:
target (str): the sql list want to get
format: 'fileType_tableType'
fileType: query, sub
tableType: table, stable
unique: 'insert_stbs'
"""
if target == 'insert_stbs':
return self.stb_template
elif target == 'query_table':
return self.tb_query_sql_template
elif target == 'query_stable':
return self.stb_query_sql_template
elif target == 'sub_table':
return self.tb_sub_sql_template
elif target == 'sub_stable':
return self.stb_sub_sql_template
else:
print(f'did not find {target}')
# the folloing are the file generation functions
"""defalut document:
generator functio for generating taosdemo json file
will assemble the dicts and dump the final json
Args:
pathName (str): the directory wanting the json file to be
fileName (str): the name suffix of the json file
Returns:
str: [pathName]/[filetype]_[filName].json
"""
def generate_insert_cfg(self, pathName, fileName):
cfgFileName = f'{pathName}/insert_{fileName}.json'
self.alter_insert_cfg('databases', None)
with open(cfgFileName, 'w') as file:
json.dump(self.insert_cfg, file)
return cfgFileName
def generate_query_cfg(self, pathName, fileName):
cfgFileName = f'{pathName}/query_{fileName}.json'
self.alter_query_tb('sqls', None)
self.alter_query_stb('sqls', None)
self.alter_query_cfg('specified_table_query', None)
self.alter_query_cfg('super_table_query', None)
with open(cfgFileName, 'w') as file:
json.dump(self.query_cfg, file)
return cfgFileName
def generate_subscribe_cfg(self, pathName, fileName):
cfgFileName = f'{pathName}/subscribe_{fileName}.json'
self.alter_sub_tb('sqls', None)
self.alter_sub_stb('sqls', None)
self.alter_sub_cfg('specified_table_query', None)
self.alter_sub_cfg('super_table_query', None)
with open(cfgFileName, 'w') as file:
json.dump(self.sub_cfg, file)
return cfgFileName
# file generation functions ends
def drop_cfg_file(self, fileName):
os.remove(f'{fileName}')
taosdemoCfg = TDTaosdemoCfg()

View File

@ -35,7 +35,7 @@ int32_t main(int32_t argc, char *argv[]) {
for (int32_t i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-c") == 0 && i < argc - 1) {
tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
tstrncpy(configDir, argv[++i], 128);
} else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) {
strcpy(scriptFile, argv[++i]);
} else if (strcmp(argv[i], "-a") == 0) {
@ -75,4 +75,4 @@ int32_t main(int32_t argc, char *argv[]) {
simInfo("execute result %d", ret);
return ret;
}
}