Merge remote-tracking branch 'origin/develop' into feature/mac
This commit is contained in:
commit
e60b11fe8e
|
@ -852,7 +852,7 @@ npm install td2.0-connector
|
|||
### Linux
|
||||
|
||||
- `python` (建议`v2.7` , `v3.x.x` 目前还不支持)
|
||||
- `node` 必须采用v10.x版本,其他版本存在包兼容性的问题。
|
||||
- `node` 2.0.6支持v12.x和v10.x,2.0.5及更早版本支持v10.x版本,其他版本可能存在包兼容性的问题。
|
||||
- `make`
|
||||
- c语言编译器比如<a href="https://gcc.gnu.org">GCC</a>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.4",
|
||||
version="2.0.5",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
from .cinterface import CTaosInterface
|
||||
from .error import *
|
||||
from .constants import FieldType
|
||||
import threading
|
||||
|
||||
|
||||
class TDengineCursor(object):
|
||||
|
@ -36,7 +35,6 @@ class TDengineCursor(object):
|
|||
self._block_iter = 0
|
||||
self._affected_rows = 0
|
||||
self._logfile = ""
|
||||
self._threadId = threading.get_ident()
|
||||
|
||||
if connection is not None:
|
||||
self._connection = connection
|
||||
|
|
|
@ -93,6 +93,8 @@ extern char configDir[];
|
|||
#define MAX_QUERY_SQL_COUNT 10
|
||||
#define MAX_QUERY_SQL_LENGTH 256
|
||||
|
||||
#define MAX_DATABASE_COUNT 256
|
||||
|
||||
typedef enum CREATE_SUB_TALBE_MOD_EN {
|
||||
PRE_CREATE_SUBTBL,
|
||||
AUTO_CREATE_SUBTBL,
|
||||
|
@ -116,7 +118,41 @@ enum QUERY_TYPE {
|
|||
INSERT_TYPE,
|
||||
QUERY_TYPE_BUT
|
||||
} ;
|
||||
|
||||
|
||||
enum _show_db_index {
|
||||
TSDB_SHOW_DB_NAME_INDEX,
|
||||
TSDB_SHOW_DB_CREATED_TIME_INDEX,
|
||||
TSDB_SHOW_DB_NTABLES_INDEX,
|
||||
TSDB_SHOW_DB_VGROUPS_INDEX,
|
||||
TSDB_SHOW_DB_REPLICA_INDEX,
|
||||
TSDB_SHOW_DB_QUORUM_INDEX,
|
||||
TSDB_SHOW_DB_DAYS_INDEX,
|
||||
TSDB_SHOW_DB_KEEP_INDEX,
|
||||
TSDB_SHOW_DB_CACHE_INDEX,
|
||||
TSDB_SHOW_DB_BLOCKS_INDEX,
|
||||
TSDB_SHOW_DB_MINROWS_INDEX,
|
||||
TSDB_SHOW_DB_MAXROWS_INDEX,
|
||||
TSDB_SHOW_DB_WALLEVEL_INDEX,
|
||||
TSDB_SHOW_DB_FSYNC_INDEX,
|
||||
TSDB_SHOW_DB_COMP_INDEX,
|
||||
TSDB_SHOW_DB_CACHELAST_INDEX,
|
||||
TSDB_SHOW_DB_PRECISION_INDEX,
|
||||
TSDB_SHOW_DB_UPDATE_INDEX,
|
||||
TSDB_SHOW_DB_STATUS_INDEX,
|
||||
TSDB_MAX_SHOW_DB
|
||||
};
|
||||
|
||||
// -----------------------------------------SHOW TABLES CONFIGURE -------------------------------------
|
||||
enum _show_stables_index {
|
||||
TSDB_SHOW_STABLES_NAME_INDEX,
|
||||
TSDB_SHOW_STABLES_CREATED_TIME_INDEX,
|
||||
TSDB_SHOW_STABLES_COLUMNS_INDEX,
|
||||
TSDB_SHOW_STABLES_METRIC_INDEX,
|
||||
TSDB_SHOW_STABLES_UID_INDEX,
|
||||
TSDB_SHOW_STABLES_TID_INDEX,
|
||||
TSDB_SHOW_STABLES_VGID_INDEX,
|
||||
TSDB_MAX_SHOW_STABLES
|
||||
};
|
||||
enum _describe_table_index {
|
||||
TSDB_DESCRIBE_METRIC_FIELD_INDEX,
|
||||
TSDB_DESCRIBE_METRIC_TYPE_INDEX,
|
||||
|
@ -219,6 +255,28 @@ typedef struct SSuperTable_S {
|
|||
int64_t totalAffectedRows;
|
||||
} SSuperTable;
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_DB_NAME_LEN + 1];
|
||||
char create_time[32];
|
||||
int32_t ntables;
|
||||
int32_t vgroups;
|
||||
int16_t replica;
|
||||
int16_t quorum;
|
||||
int16_t days;
|
||||
char keeplist[32];
|
||||
int32_t cache; //MB
|
||||
int32_t blocks;
|
||||
int32_t minrows;
|
||||
int32_t maxrows;
|
||||
int8_t wallevel;
|
||||
int32_t fsync;
|
||||
int8_t comp;
|
||||
int8_t cachelast;
|
||||
char precision[8]; // time resolution
|
||||
int8_t update;
|
||||
char status[16];
|
||||
} SDbInfo;
|
||||
|
||||
typedef struct SDbCfg_S {
|
||||
// int maxtablesPerVnode;
|
||||
int minRows;
|
||||
|
@ -1126,6 +1184,272 @@ static void printfQueryMeta() {
|
|||
printf("\033[1m\033[40;32m================ query.json parse result ================\033[0m\n");
|
||||
}
|
||||
|
||||
|
||||
static char* xFormatTimestamp(char* buf, int64_t val, int precision) {
|
||||
time_t tt;
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
tt = (time_t)(val / 1000000);
|
||||
} else {
|
||||
tt = (time_t)(val / 1000);
|
||||
}
|
||||
|
||||
/* comment out as it make testcases like select_with_tags.sim fail.
|
||||
but in windows, this may cause the call to localtime crash if tt < 0,
|
||||
need to find a better solution.
|
||||
if (tt < 0) {
|
||||
tt = 0;
|
||||
}
|
||||
*/
|
||||
|
||||
#ifdef WINDOWS
|
||||
if (tt < 0) tt = 0;
|
||||
#endif
|
||||
|
||||
struct tm* ptm = localtime(&tt);
|
||||
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
|
||||
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
sprintf(buf + pos, ".%06d", (int)(val % 1000000));
|
||||
} else {
|
||||
sprintf(buf + pos, ".%03d", (int)(val % 1000));
|
||||
}
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) {
|
||||
if (val == NULL) {
|
||||
fprintf(fp, "%s", TSDB_DATA_NULL_STR);
|
||||
return;
|
||||
}
|
||||
|
||||
char buf[TSDB_MAX_BYTES_PER_ROW];
|
||||
switch (field->type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
fprintf(fp, "%d", *((int8_t *)val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
fprintf(fp, "%d", *((int16_t *)val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
fprintf(fp, "%d", *((int32_t *)val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
fprintf(fp, "%" PRId64, *((int64_t *)val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
fprintf(fp, "%.9f", GET_DOUBLE_VAL(val));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
memcpy(buf, val, length);
|
||||
buf[length] = 0;
|
||||
fprintf(fp, "\'%s\'", buf);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
xFormatTimestamp(buf, *(int64_t*)val, precision);
|
||||
fprintf(fp, "'%s'", buf);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int xDumpResultToFile(const char* fname, TAOS_RES* tres) {
|
||||
TAOS_ROW row = taos_fetch_row(tres);
|
||||
if (row == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
FILE* fp = fopen(fname, "at");
|
||||
if (fp == NULL) {
|
||||
fprintf(stderr, "ERROR: failed to open file: %s\n", fname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int num_fields = taos_num_fields(tres);
|
||||
TAOS_FIELD *fields = taos_fetch_fields(tres);
|
||||
int precision = taos_result_precision(tres);
|
||||
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
if (col > 0) {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "%s", fields[col].name);
|
||||
}
|
||||
fputc('\n', fp);
|
||||
|
||||
int numOfRows = 0;
|
||||
do {
|
||||
int32_t* length = taos_fetch_lengths(tres);
|
||||
for (int i = 0; i < num_fields; i++) {
|
||||
if (i > 0) {
|
||||
fputc(',', fp);
|
||||
}
|
||||
xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision);
|
||||
}
|
||||
fputc('\n', fp);
|
||||
|
||||
numOfRows++;
|
||||
row = taos_fetch_row(tres);
|
||||
} while( row != NULL);
|
||||
|
||||
fclose(fp);
|
||||
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
|
||||
TAOS_RES * res;
|
||||
TAOS_ROW row = NULL;
|
||||
int count = 0;
|
||||
|
||||
res = taos_query(taos, "show databases;");
|
||||
int32_t code = taos_errno(res);
|
||||
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to run <show databases>, reason: %s\n", taos_errstr(res));
|
||||
return -1;
|
||||
}
|
||||
|
||||
TAOS_FIELD *fields = taos_fetch_fields(res);
|
||||
|
||||
while ((row = taos_fetch_row(res)) != NULL) {
|
||||
// sys database name : 'log'
|
||||
if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue;
|
||||
|
||||
dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
|
||||
if (dbInfos[count] == NULL) {
|
||||
fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count);
|
||||
return -1;
|
||||
}
|
||||
|
||||
strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
|
||||
xFormatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI);
|
||||
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
|
||||
dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
|
||||
dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
|
||||
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
|
||||
dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
|
||||
|
||||
strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
|
||||
dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
|
||||
dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
|
||||
dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
|
||||
dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
|
||||
dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
|
||||
dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
|
||||
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
|
||||
dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
|
||||
|
||||
strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
|
||||
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
|
||||
strncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], fields[TSDB_SHOW_DB_STATUS_INDEX].bytes);
|
||||
|
||||
count++;
|
||||
if (count > MAX_DATABASE_COUNT) {
|
||||
fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) {
|
||||
FILE *fp = NULL;
|
||||
if (filename[0] != 0) {
|
||||
fp = fopen(filename, "at");
|
||||
if (fp == NULL) {
|
||||
fprintf(stderr, "failed to open file: %s\n", filename);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(fp, "================ database[%d] ================\n", index);
|
||||
fprintf(fp, "name: %s\n", dbInfos->name);
|
||||
fprintf(fp, "created_time: %s\n", dbInfos->create_time);
|
||||
fprintf(fp, "ntables: %d\n", dbInfos->ntables);
|
||||
fprintf(fp, "vgroups: %d\n", dbInfos->vgroups);
|
||||
fprintf(fp, "replica: %d\n", dbInfos->replica);
|
||||
fprintf(fp, "quorum: %d\n", dbInfos->quorum);
|
||||
fprintf(fp, "days: %d\n", dbInfos->days);
|
||||
fprintf(fp, "keep1,keep2,keep(D): %s\n", dbInfos->keeplist);
|
||||
fprintf(fp, "cache(MB): %d\n", dbInfos->cache);
|
||||
fprintf(fp, "blocks: %d\n", dbInfos->blocks);
|
||||
fprintf(fp, "minrows: %d\n", dbInfos->minrows);
|
||||
fprintf(fp, "maxrows: %d\n", dbInfos->maxrows);
|
||||
fprintf(fp, "wallevel: %d\n", dbInfos->wallevel);
|
||||
fprintf(fp, "fsync: %d\n", dbInfos->fsync);
|
||||
fprintf(fp, "comp: %d\n", dbInfos->comp);
|
||||
fprintf(fp, "cachelast: %d\n", dbInfos->cachelast);
|
||||
fprintf(fp, "precision: %s\n", dbInfos->precision);
|
||||
fprintf(fp, "update: %d\n", dbInfos->update);
|
||||
fprintf(fp, "status: %s\n", dbInfos->status);
|
||||
fprintf(fp, "\n");
|
||||
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
static void printfQuerySystemInfo(TAOS * taos) {
|
||||
char filename[MAX_QUERY_SQL_LENGTH+1] = {0};
|
||||
char buffer[MAX_QUERY_SQL_LENGTH+1] = {0};
|
||||
TAOS_RES* res;
|
||||
|
||||
time_t t;
|
||||
struct tm* lt;
|
||||
time(&t);
|
||||
lt = localtime(&t);
|
||||
snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec);
|
||||
|
||||
// show variables
|
||||
res = taos_query(taos, "show variables;");
|
||||
//getResult(res, filename);
|
||||
xDumpResultToFile(filename, res);
|
||||
|
||||
// show dnodes
|
||||
res = taos_query(taos, "show dnodes;");
|
||||
xDumpResultToFile(filename, res);
|
||||
//getResult(res, filename);
|
||||
|
||||
// show databases
|
||||
res = taos_query(taos, "show databases;");
|
||||
SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *));
|
||||
if (dbInfos == NULL) {
|
||||
fprintf(stderr, "failed to allocate memory\n");
|
||||
return;
|
||||
}
|
||||
int dbCount = getDbFromServer(taos, dbInfos);
|
||||
if (dbCount <= 0) return;
|
||||
|
||||
for (int i = 0; i < dbCount; i++) {
|
||||
// printf database info
|
||||
printfDbInfoForQueryToFile(filename, dbInfos[i], i);
|
||||
|
||||
// show db.vgroups
|
||||
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name);
|
||||
res = taos_query(taos, buffer);
|
||||
xDumpResultToFile(filename, res);
|
||||
|
||||
// show db.stables
|
||||
snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name);
|
||||
res = taos_query(taos, buffer);
|
||||
xDumpResultToFile(filename, res);
|
||||
|
||||
free(dbInfos[i]);
|
||||
}
|
||||
|
||||
free(dbInfos);
|
||||
|
||||
}
|
||||
|
||||
|
||||
#ifdef TD_LOWA_CURL
|
||||
static size_t responseCallback(void *contents, size_t size, size_t nmemb, void *userp)
|
||||
{
|
||||
|
@ -4134,7 +4458,7 @@ void *subQueryProcess(void *sarg) {
|
|||
int queryTestProcess() {
|
||||
TAOS * taos = NULL;
|
||||
taos_init();
|
||||
taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, g_queryInfo.dbName, g_queryInfo.port);
|
||||
taos = taos_connect(g_queryInfo.host, g_queryInfo.user, g_queryInfo.password, NULL, g_queryInfo.port);
|
||||
if (taos == NULL) {
|
||||
fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
|
||||
exit(-1);
|
||||
|
@ -4147,6 +4471,8 @@ int queryTestProcess() {
|
|||
printfQueryMeta();
|
||||
printf("Press enter key to continue\n\n");
|
||||
(void)getchar();
|
||||
|
||||
printfQuerySystemInfo(taos);
|
||||
|
||||
pthread_t *pids = NULL;
|
||||
threadInfo *infos = NULL;
|
||||
|
|
|
@ -1081,20 +1081,13 @@ static int32_t mnodeDropSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
SSTableObj *pTable = (SSTableObj *)pMsg->pTable;
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("msg:%p, app:%p stable:%s, failed to drop, sdb error", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId);
|
||||
} else {
|
||||
mLInfo("msg:%p, app:%p stable:%s, is dropped from sdb", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
|
||||
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
|
||||
mLInfo("msg:%p, app:%p stable:%s, is dropped from sdb", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId);
|
||||
|
||||
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
|
||||
mInfo("msg:%p, app:%p stable:%s will be dropped, hash:%p sizeOfVgList:%d", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pStable->info.tableId, pStable->vgHash, taosHashGetSize(pStable->vgHash));
|
||||
|
||||
if (pStable->vgHash != NULL /*pStable->numOfTables != 0*/) {
|
||||
int32_t *pVgId = taosHashIterate(pStable->vgHash, NULL);
|
||||
while (pVgId) {
|
||||
|
@ -1122,6 +1115,16 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
|
|||
mnodeDropAllChildTablesInStable(pStable);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
|
||||
if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR;
|
||||
|
||||
SSTableObj *pStable = (SSTableObj *)pMsg->pTable;
|
||||
mInfo("msg:%p, app:%p stable:%s will be dropped, hash:%p sizeOfVgList:%d", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pStable->info.tableId, pStable->vgHash, taosHashGetSize(pStable->vgHash));
|
||||
|
||||
SSdbRow row = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.pTable = tsSuperTableSdb,
|
||||
|
|
|
@ -188,11 +188,7 @@ static void removeTimer(uintptr_t id) {
|
|||
}
|
||||
|
||||
static int64_t getMonotonicMs(void) {
|
||||
#ifdef WINDOWS
|
||||
return (int64_t) getMonotonicUs() / 1000;
|
||||
#else
|
||||
return taosGetTimestampMs();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void addToWheel(tmr_obj_t* timer, uint32_t delay) {
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 100
|
||||
self.ts = 1537146000000
|
||||
self.clist1 = []
|
||||
self.clist2 = []
|
||||
self.clist3 = []
|
||||
self.clist4 = []
|
||||
self.clist5 = []
|
||||
self.clist6 = []
|
||||
|
||||
def getData(self):
|
||||
for i in range(tdSql.queryRows):
|
||||
for j in range(6):
|
||||
exec('self.clist{}.append(tdSql.queryResult[i][j+1])'.format(j+1))
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(cid int,gbid binary(20),loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags(1,'beijing','北京')")
|
||||
tdSql.execute("create table test2 using test tags(2,'shanghai','深圳')")
|
||||
tdSql.execute("create table test3 using test tags(2,'shenzhen','深圳')")
|
||||
tdSql.execute("create table test4 using test tags(1,'shanghai','上海')")
|
||||
for j in range(4):
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test%d values(now-%dh, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (j+1,i, i + 1, i + 1, i + 1, i + 1, i + i * 0.1, i * 1.5, i % 2, i + 1, i + 1))
|
||||
|
||||
# stddev verifacation
|
||||
tdSql.error("select stddev(ts) from test")
|
||||
tdSql.error("select stddev(col7) from test")
|
||||
tdSql.error("select stddev(col8) from test")
|
||||
tdSql.error("select stddev(col9) from test")
|
||||
|
||||
con_list = [
|
||||
' where cid = 1 and ts >=now - 1d and ts <now',
|
||||
" where gbid = 'beijing' and ts >=now - 1d and ts <now",
|
||||
' '
|
||||
]
|
||||
for condition in con_list:
|
||||
tdSql.query("select * from test %s"%(condition))
|
||||
self.getData()
|
||||
for i in range(6):
|
||||
exec('tdSql.query("select stddev(col{}) from test {}")'.format(i+1,condition))
|
||||
exec('tdSql.checkData(0, 0, np.std(self.clist{}))'.format(i+1))
|
||||
exec('self.clist{}.clear()'.format(i+1))
|
||||
print('step 2')
|
||||
con_group_list = {
|
||||
' cid = 2 and ts >=now - 1d and ts <now group by tbname':2,
|
||||
" loc = '深圳' and ts >=now - 1d and ts <now group by tbname " :2 ,
|
||||
" gbid = 'shanghai' and ts >=now - 1d and ts <now group by cid " :2
|
||||
}
|
||||
result = [6.922186552,6.922186552,6.922186552,6.922186552,7.614405212,10.383279829]
|
||||
for key,value in con_group_list.items():
|
||||
for i in range(6):
|
||||
exec('tdSql.query("select stddev(col{}) from test where {}")'.format(i+1,key))
|
||||
for j in range(value):
|
||||
tdSql.checkData(j, 0, result[i])
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
Loading…
Reference in New Issue