Merge pull request #3592 from taosdata/develop
Merge from develop into release/s102
This commit is contained in:
commit
6e50c72d83
|
@ -1019,5 +1019,5 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
|
|||
- 表名最大长度为193,每行数据最大长度16k个字符
|
||||
- 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳
|
||||
- 标签最多允许128个,可以0个,标签总长度不超过16k个字符
|
||||
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为8M
|
||||
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
|
||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
|
|
|
@ -67,7 +67,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
|||
<center> 图 1 TDengine架构示意图 </center>
|
||||
一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine客户端(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。
|
||||
|
||||
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。
|
||||
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文《[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)》。
|
||||
|
||||
**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
|
||||
|
||||
|
|
|
@ -2,13 +2,13 @@
|
|||
|
||||
多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,先请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验单节点功能。
|
||||
|
||||
集群的每个数据节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。端口是这个数据节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
|
||||
集群的每个数据节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取(如何配置FQDN,请参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html))。端口是这个数据节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。
|
||||
|
||||
TDengine的集群管理极其简单,除添加和删除节点需要人工干预之外,其他全部是自动完成,最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
|
||||
|
||||
## 准备工作
|
||||
|
||||
**第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好【如部署了DNS,请联系网络管理员在DNS上做好相关配置】;
|
||||
**第零步**:如果没有部署DNS服务,请规划集群所有物理节点的FQDN,然后按照《[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)》里的步骤,将所有集群物理节点的IP与FQDN的对应关系添加好。
|
||||
|
||||
**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
||||
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/);
|
||||
|
@ -22,7 +22,8 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
|
|||
|
||||
1. 每个物理节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查);
|
||||
2. 每个物理节点上执行`ping host`, 其中host是其他物理节点的hostname, 看能否ping通其它物理节点; 如果不能ping通,需要检查网络设置, 或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的;
|
||||
3. 每个数据节点的End Point就是输出的hostname外加端口号,比如h1.taosdata.com:6030
|
||||
3. 从应用运行的物理节点,ping taosd运行的数据节点,如果无法ping通,应用是无法连接taosd的,请检查应用所在物理节点的DNS设置或hosts文件;
|
||||
4. 每个数据节点的End Point就是输出的hostname外加端口号,比如h1.taosdata.com:6030
|
||||
|
||||
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030, 其与集群配置相关参数如下:
|
||||
|
||||
|
@ -30,8 +31,8 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
|
|||
// firstEp 是每个数据节点首次启动后连接的第一个数据节点
|
||||
firstEp h1.taosdata.com:6030
|
||||
|
||||
// 配置本数据节点的FQDN,如果本机只有一个hostname, 无需配置
|
||||
fqdn h1.taosdata.com
|
||||
// 必须配置为本数据节点的FQDN,如果本机只有一个hostname, 可注释掉本配置
|
||||
fqdn h1.taosdata.com
|
||||
|
||||
// 配置本数据节点的端口号,缺省是6030
|
||||
serverPort 6030
|
||||
|
@ -40,7 +41,7 @@ serverPort 6030
|
|||
arbitrator ha.taosdata.com:6042
|
||||
```
|
||||
|
||||
一定要修改的参数是firstEp和fqdn, 其他参数可不做任何修改,除非你很清楚为什么要修改。
|
||||
一定要修改的参数是firstEp和fqdn。在每个数据节点,firstEp需全部配置成一样,**但fqdn一定要配置成其所在数据节点的值**。其他参数可不做任何修改,除非你很清楚为什么要修改。
|
||||
|
||||
**加入到集群中的数据节点dnode,涉及集群相关的下表11项参数必须完全相同,否则不能成功加入到集群中。**
|
||||
|
||||
|
@ -114,6 +115,8 @@ taos>
|
|||
|
||||
## 数据节点管理
|
||||
|
||||
上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。
|
||||
|
||||
### 添加数据节点
|
||||
|
||||
执行CLI程序taos, 使用root账号登录进系统, 执行:
|
||||
|
|
|
@ -30,10 +30,12 @@
|
|||
#include "tlog.h"
|
||||
#include "twal.h"
|
||||
|
||||
#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("ERROR CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||
#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("WARN CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||
#define cFatal(...) { if (cqDebugFlag & DEBUG_FATAL) { taosPrintLog("CQ FATAL ", 255, __VA_ARGS__); }}
|
||||
#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("CQ ERROR ", 255, __VA_ARGS__); }}
|
||||
#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("CQ WARN ", 255, __VA_ARGS__); }}
|
||||
#define cInfo(...) { if (cqDebugFlag & DEBUG_INFO) { taosPrintLog("CQ ", 255, __VA_ARGS__); }}
|
||||
#define cDebug(...) { if (cqDebugFlag & DEBUG_DEBUG) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||
#define cPrint(...) { taosPrintLog("CQ ", 255, __VA_ARGS__); }
|
||||
|
||||
typedef struct {
|
||||
int vgId;
|
||||
|
@ -94,7 +96,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
|
|||
|
||||
pthread_mutex_init(&pContext->mutex, NULL);
|
||||
|
||||
cTrace("vgId:%d, CQ is opened", pContext->vgId);
|
||||
cInfo("vgId:%d, CQ is opened", pContext->vgId);
|
||||
|
||||
return pContext;
|
||||
}
|
||||
|
@ -125,7 +127,7 @@ void cqClose(void *handle) {
|
|||
taosTmrCleanUp(pContext->tmrCtrl);
|
||||
pContext->tmrCtrl = NULL;
|
||||
|
||||
cTrace("vgId:%d, CQ is closed", pContext->vgId);
|
||||
cInfo("vgId:%d, CQ is closed", pContext->vgId);
|
||||
free(pContext);
|
||||
}
|
||||
|
||||
|
@ -133,7 +135,7 @@ void cqStart(void *handle) {
|
|||
SCqContext *pContext = handle;
|
||||
if (pContext->dbConn || pContext->master) return;
|
||||
|
||||
cTrace("vgId:%d, start all CQs", pContext->vgId);
|
||||
cInfo("vgId:%d, start all CQs", pContext->vgId);
|
||||
pthread_mutex_lock(&pContext->mutex);
|
||||
|
||||
pContext->master = 1;
|
||||
|
@ -149,7 +151,7 @@ void cqStart(void *handle) {
|
|||
|
||||
void cqStop(void *handle) {
|
||||
SCqContext *pContext = handle;
|
||||
cTrace("vgId:%d, stop all CQs", pContext->vgId);
|
||||
cInfo("vgId:%d, stop all CQs", pContext->vgId);
|
||||
if (pContext->dbConn == NULL || pContext->master == 0) return;
|
||||
|
||||
pthread_mutex_lock(&pContext->mutex);
|
||||
|
@ -160,7 +162,7 @@ void cqStop(void *handle) {
|
|||
if (pObj->pStream) {
|
||||
taos_close_stream(pObj->pStream);
|
||||
pObj->pStream = NULL;
|
||||
cTrace("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
cInfo("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
} else {
|
||||
taosTmrStop(pObj->tmrId);
|
||||
pObj->tmrId = 0;
|
||||
|
@ -188,7 +190,7 @@ void *cqCreate(void *handle, uint64_t uid, int tid, char *sqlStr, STSchema *pSch
|
|||
pObj->pSchema = tdDupSchema(pSchema);
|
||||
pObj->rowSize = schemaTLen(pSchema);
|
||||
|
||||
cTrace("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
cInfo("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
|
||||
pthread_mutex_lock(&pContext->mutex);
|
||||
|
||||
|
@ -228,7 +230,7 @@ void cqDrop(void *handle) {
|
|||
pObj->tmrId = 0;
|
||||
}
|
||||
|
||||
cTrace("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
cInfo("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
tdFreeSchema(pObj->pSchema);
|
||||
free(pObj->sqlStr);
|
||||
free(pObj);
|
||||
|
@ -262,7 +264,7 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
|||
pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, pObj, NULL);
|
||||
if (pObj->pStream) {
|
||||
pContext->num++;
|
||||
cTrace("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
cInfo("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
} else {
|
||||
cError("vgId:%d, id:%d CQ:%s, failed to open", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
}
|
||||
|
@ -278,7 +280,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
|
|||
STSchema *pSchema = pObj->pSchema;
|
||||
if (pObj->pStream == NULL) return;
|
||||
|
||||
cTrace("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
|
||||
|
||||
int size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize;
|
||||
char *buffer = calloc(size, 1);
|
||||
|
|
|
@ -257,20 +257,20 @@ void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCo
|
|||
HttpEncodeMethod *encode = pContext->encodeMethod;
|
||||
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
httpError("context:%p, fd:%d, user:%s, query error, taos:%p, code:%s:inprogress, sqlObj:%p", pContext, pContext->fd,
|
||||
pContext->user, pContext->session->taos, tstrerror(code), (SSqlObj *)result);
|
||||
httpError("context:%p, fd:%d, user:%s, query error, code:%s:inprogress, sqlObj:%p", pContext, pContext->fd,
|
||||
pContext->user, tstrerror(code), (SSqlObj *)result);
|
||||
return;
|
||||
}
|
||||
|
||||
if (code < 0) {
|
||||
SSqlObj *pObj = (SSqlObj *)result;
|
||||
if (code == TSDB_CODE_TSC_INVALID_SQL) {
|
||||
httpError("context:%p, fd:%d, user:%s, query error, taos:%p, code:%s, sqlObj:%p, error:%s", pContext,
|
||||
pContext->fd, pContext->user, pContext->session->taos, tstrerror(code), pObj, pObj->cmd.payload);
|
||||
httpError("context:%p, fd:%d, user:%s, query error, code:%s, sqlObj:%p, error:%s", pContext,
|
||||
pContext->fd, pContext->user, tstrerror(code), pObj, pObj->cmd.payload);
|
||||
httpSendTaosdInvalidSqlErrorResp(pContext, pObj->cmd.payload);
|
||||
} else {
|
||||
httpError("context:%p, fd:%d, user:%s, query error, taos:%p, code:%s, sqlObj:%p", pContext, pContext->fd,
|
||||
pContext->user, pContext->session->taos, tstrerror(code), pObj);
|
||||
httpError("context:%p, fd:%d, user:%s, query error, code:%s, sqlObj:%p", pContext, pContext->fd,
|
||||
pContext->user, tstrerror(code), pObj);
|
||||
httpSendErrorResp(pContext, code);
|
||||
}
|
||||
taos_free_result(result);
|
||||
|
|
|
@ -511,9 +511,9 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
|
|||
sInfo("%s, it is configured", pPeer->id);
|
||||
int ret = strcmp(pPeer->fqdn, tsNodeFqdn);
|
||||
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
|
||||
sDebug("%s, start to check peer connection", pPeer->id);
|
||||
int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
|
||||
if (pNode->vgId) checkMs = tsStatusInterval * 2000 + 100;
|
||||
if (pNode->vgId > 1) checkMs = tsStatusInterval * 2000 + checkMs;
|
||||
sDebug("%s, start to check peer connection after %d ms", pPeer->id, checkMs);
|
||||
taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, syncTmrCtrl, &pPeer->timer);
|
||||
}
|
||||
|
||||
|
|
|
@ -2085,6 +2085,8 @@ STimeWindow changeTableGroupByLastrow(STableGroupInfo *groupList) {
|
|||
totalNumOfTable++;
|
||||
taosArrayPush(pGroup, &keyInfo);
|
||||
} else {
|
||||
taosArrayDestroy(pGroup);
|
||||
|
||||
taosArrayRemove(groupList->pGroupList, j);
|
||||
numOfGroups -= 1;
|
||||
j -= 1;
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.getcwd())
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
import taos
|
||||
|
||||
|
||||
class TwoClients:
|
||||
def initConnection(self):
|
||||
self.host = "127.0.0.1"
|
||||
self.user = "root"
|
||||
self.password = "taosdata"
|
||||
self.config = "/home/xp/git/TDengine/sim/dnode1/cfg"
|
||||
|
||||
def run(self):
|
||||
tdDnodes.init("")
|
||||
tdDnodes.setTestCluster(False)
|
||||
tdDnodes.setValgrind(False)
|
||||
|
||||
tdDnodes.stopAll()
|
||||
tdDnodes.deploy(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
# first client create a stable and insert data
|
||||
conn1 = taos.connect(self.host, self.user, self.password, self.config)
|
||||
cursor1 = conn1.cursor()
|
||||
cursor1.execute("drop database if exists db")
|
||||
cursor1.execute("create database db")
|
||||
cursor1.execute("use db")
|
||||
cursor1.execute("create table tb (ts timestamp, id int) tags(loc nchar(30))")
|
||||
cursor1.execute("insert into t0 using tb tags('beijing') values(now, 1)")
|
||||
|
||||
# second client alter the table created by cleint
|
||||
conn2 = taos.connect(self.host, self.user, self.password, self.config)
|
||||
cursor2 = conn2.cursor()
|
||||
cursor2.execute("use db")
|
||||
cursor2.execute("alter table tb add column name nchar(30)")
|
||||
|
||||
# first client should not be able to use the origin metadata
|
||||
tdSql.init(cursor1, True)
|
||||
tdSql.error("insert into t0 values(now, 2)")
|
||||
|
||||
# first client should be able to insert data with udpated medadata
|
||||
tdSql.execute("insert into t0 values(now, 2, 'test')")
|
||||
tdSql.query("select * from tb")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
# second client drop the table
|
||||
cursor2.execute("drop table t0")
|
||||
cursor2.execute("create table t0 using tb tags('beijing')")
|
||||
|
||||
tdSql.execute("insert into t0 values(now, 2, 'test')")
|
||||
tdSql.query("select * from tb")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
# error expected for two clients drop the same cloumn
|
||||
cursor2.execute("alter table tb drop column name")
|
||||
tdSql.error("alter table tb drop column name")
|
||||
|
||||
cursor2.execute("alter table tb add column speed int")
|
||||
tdSql.error("alter table tb add column speed int")
|
||||
|
||||
|
||||
tdSql.execute("alter table tb add column size int")
|
||||
tdSql.query("describe tb")
|
||||
tdSql.checkRows(5)
|
||||
tdSql.checkData(0, 0, "ts")
|
||||
tdSql.checkData(1, 0, "id")
|
||||
tdSql.checkData(2, 0, "speed")
|
||||
tdSql.checkData(3, 0, "size")
|
||||
tdSql.checkData(4, 0, "loc")
|
||||
|
||||
|
||||
cursor1.close()
|
||||
cursor2.close()
|
||||
conn1.close()
|
||||
conn2.close()
|
||||
|
||||
clients = TwoClients()
|
||||
clients.initConnection()
|
||||
clients.run()
|
|
@ -187,6 +187,8 @@ python3 ./test.py -f functions/function_top.py
|
|||
#python3 ./test.py -f functions/function_twa.py
|
||||
python3 queryCount.py
|
||||
python3 ./test.py -f query/queryGroupbyWithInterval.py
|
||||
python3 client/twoClients.py
|
||||
python3 test.py -f query/queryInterval.py
|
||||
|
||||
# tools
|
||||
python3 test.py -f tools/taosdemo.py
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.ts = 1538548685000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute("create table st (ts timestamp, voltage int) tags (loc nchar(30))")
|
||||
tdSql.execute("insert into t0 using st tags('beijing') values(now, 220) (now - 15d, 221) (now - 30d, 225) (now - 35d, 228) (now - 45d, 222)")
|
||||
tdSql.execute("insert into t1 using st tags('shanghai') values(now, 220) (now - 60d, 221) (now - 50d, 225) (now - 40d, 228) (now - 20d, 222)")
|
||||
|
||||
tdSql.query("select avg(voltage) from st interval(1n)")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0, 1, 223.0)
|
||||
tdSql.checkData(1, 1, 225.0)
|
||||
tdSql.checkData(2, 1, 220.333333)
|
||||
|
||||
tdSql.query("select avg(voltage) from st interval(1n, 15d)")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0, 1, 224.8)
|
||||
tdSql.checkData(1, 1, 222.666666)
|
||||
tdSql.checkData(2, 1, 220.0)
|
||||
|
||||
tdSql.query("select avg(voltage) from st interval(1n, 15d) group by loc")
|
||||
tdSql.checkRows(6)
|
||||
tdSql.checkData(0, 1, 225.0)
|
||||
tdSql.checkData(1, 1, 223.0)
|
||||
tdSql.checkData(2, 1, 220.0)
|
||||
tdSql.checkData(3, 1, 224.666666)
|
||||
tdSql.checkData(4, 1, 222.0)
|
||||
tdSql.checkData(5, 1, 220.0)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -123,8 +123,8 @@ class TDSql:
|
|||
|
||||
def checkData(self, row, col, data):
|
||||
self.checkRowCol(row, col)
|
||||
if self.queryResult[row][col] != data:
|
||||
if str(self.queryResult[row][col]) != str(data):
|
||||
if self.queryResult[row][col] != data:
|
||||
if str(self.queryResult[row][col]) == str(data):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
return
|
||||
|
|
|
@ -3,7 +3,7 @@ sleep 3000
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c http -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxSQLLength -v 7340032
|
||||
system sh/cfg.sh -n dnode1 -c maxSQLLength -v 340032
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
|
|
Loading…
Reference in New Issue