Merge remote-tracking branch 'origin/develop' into feature/http
This commit is contained in:
commit
8aec38f659
|
@ -56,4 +56,4 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21
|
||||||
## 多列模型 vs 单列模型
|
## 多列模型 vs 单列模型
|
||||||
TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。
|
TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。
|
||||||
|
|
||||||
TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型就会显得简单。
|
TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。
|
||||||
|
|
|
@ -9,8 +9,8 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
|
||||||
## 准备工作
|
## 准备工作
|
||||||
|
|
||||||
**第一步**:如果搭建集群的节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
**第一步**:如果搭建集群的节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
|
||||||
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/)
|
**注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/);
|
||||||
**注意2:**客户端也需要配置,并且把集群中所有服务器的FQDN配置到客户端中(windows中 C:\Windows\System32\drivers\etc\hosts)。
|
**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。
|
||||||
|
|
||||||
**第二步**:建议关闭防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
|
**第二步**:建议关闭防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预
|
||||||
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关:
|
**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关:
|
||||||
|
|
||||||
```
|
```
|
||||||
// firstEp 是每个节点启动后连接的第一个节点
|
// firstEp 集群中所有节点的配置都是一致的,对其第一次访问后,就获得了整个集群的信息
|
||||||
firstEp h1.taosdata.com:6030
|
firstEp h1.taosdata.com:6030
|
||||||
|
|
||||||
// 配置本节点的FQDN,如果本机只有一个hostname, 无需配置
|
// 配置本节点的FQDN,如果本机只有一个hostname, 无需配置
|
||||||
|
@ -34,7 +34,7 @@ fqdn h1.taosdata.com
|
||||||
// 配置本节点的端口号,缺省是6030
|
// 配置本节点的端口号,缺省是6030
|
||||||
serverPort 6030
|
serverPort 6030
|
||||||
|
|
||||||
// 副本数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
|
// 服务端节点数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分
|
||||||
arbitrator ha.taosdata.com:6042
|
arbitrator ha.taosdata.com:6042
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -10,8 +10,6 @@ import java.sql.SQLException;
|
||||||
import java.sql.Statement;
|
import java.sql.Statement;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
public class SubscribeTest extends BaseTest {
|
public class SubscribeTest extends BaseTest {
|
||||||
Connection connection = null;
|
Connection connection = null;
|
||||||
Statement statement = null;
|
Statement statement = null;
|
||||||
|
|
|
@ -1348,7 +1348,7 @@ static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDa
|
||||||
int dcol = 0; // loop iter for SDataCols object
|
int dcol = 0; // loop iter for SDataCols object
|
||||||
while (dcol < pDataCols->numOfCols) {
|
while (dcol < pDataCols->numOfCols) {
|
||||||
SDataCol *pDataCol = &(pDataCols->cols[dcol]);
|
SDataCol *pDataCol = &(pDataCols->cols[dcol]);
|
||||||
if (ccol >= pCompData->numOfCols) {
|
if (dcol != 0 && ccol >= pCompData->numOfCols) {
|
||||||
// Set current column as NULL and forward
|
// Set current column as NULL and forward
|
||||||
dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints);
|
dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints);
|
||||||
dcol++;
|
dcol++;
|
||||||
|
|
|
@ -9,21 +9,20 @@
|
||||||
<version>1.0-SNAPSHOT</version>
|
<version>1.0-SNAPSHOT</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<build>
|
<build>
|
||||||
<pluginManagement>
|
|
||||||
<plugins>
|
<plugins>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-plugins</artifactId>
|
<artifactId>maven-plugins</artifactId>
|
||||||
<version>30</version>
|
<version>30</version>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-assembly-plugin</artifactId>
|
<artifactId>maven-assembly-plugin</artifactId>
|
||||||
<version>3.0.0</version>
|
<version>3.0.0</version>
|
||||||
</plugin>
|
</plugin>
|
||||||
</plugins>
|
|
||||||
</pluginManagement>
|
|
||||||
<plugins>
|
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-assembly-plugin</artifactId>
|
<artifactId>maven-assembly-plugin</artifactId>
|
||||||
|
@ -48,6 +47,7 @@
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-compiler-plugin</artifactId>
|
<artifactId>maven-compiler-plugin</artifactId>
|
||||||
|
|
|
@ -0,0 +1,174 @@
|
||||||
|
package com.taosdata.example;
|
||||||
|
|
||||||
|
import com.taosdata.jdbc.TSDBDriver;
|
||||||
|
|
||||||
|
import java.sql.*;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
|
public class JDBCConnectorChecker {
|
||||||
|
private static String host;
|
||||||
|
private static String dbName = "test";
|
||||||
|
private static String tbName = "weather";
|
||||||
|
private Connection connection;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get connection
|
||||||
|
**/
|
||||||
|
private void init() {
|
||||||
|
try {
|
||||||
|
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||||
|
Properties properties = new Properties();
|
||||||
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
|
||||||
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||||
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||||
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||||
|
System.out.println("get connection starting...");
|
||||||
|
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
|
||||||
|
if (connection != null)
|
||||||
|
System.out.println("[ OK ] Connection established.");
|
||||||
|
} catch (ClassNotFoundException | SQLException e) {
|
||||||
|
throw new RuntimeException("connection failed: " + host);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* create database
|
||||||
|
*/
|
||||||
|
private void createDatabase() {
|
||||||
|
String sql = "create database if not exists " + dbName;
|
||||||
|
exuete(sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* use database
|
||||||
|
*/
|
||||||
|
private void useDatabase() {
|
||||||
|
String sql = "use " + dbName;
|
||||||
|
exuete(sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* select
|
||||||
|
*/
|
||||||
|
private void checkSelect() {
|
||||||
|
final String sql = "select * from test.weather";
|
||||||
|
executeQuery(sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void executeQuery(String sql) {
|
||||||
|
try (Statement statement = connection.createStatement()) {
|
||||||
|
long start = System.currentTimeMillis();
|
||||||
|
ResultSet resultSet = statement.executeQuery(sql);
|
||||||
|
long end = System.currentTimeMillis();
|
||||||
|
printSql(sql, true, (end - start));
|
||||||
|
printResult(resultSet);
|
||||||
|
} catch (SQLException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void printResult(ResultSet resultSet) throws SQLException {
|
||||||
|
ResultSetMetaData metaData = resultSet.getMetaData();
|
||||||
|
while (resultSet.next()) {
|
||||||
|
for (int i = 1; i <= metaData.getColumnCount(); i++) {
|
||||||
|
String columnLabel = metaData.getColumnLabel(i);
|
||||||
|
String value = resultSet.getString(i);
|
||||||
|
System.out.printf("%s: %s\t", columnLabel, value);
|
||||||
|
}
|
||||||
|
System.out.println();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String formatString(String str) {
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
int blankCnt = (26 - str.length()) / 2;
|
||||||
|
for (int j = 0; j < blankCnt; j++)
|
||||||
|
sb.append(" ");
|
||||||
|
sb.append(str);
|
||||||
|
for (int j = 0; j < blankCnt; j++)
|
||||||
|
sb.append(" ");
|
||||||
|
sb.append("|");
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* insert
|
||||||
|
*/
|
||||||
|
private void checkInsert() {
|
||||||
|
final String sql = "insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)";
|
||||||
|
exuete(sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* create table
|
||||||
|
*/
|
||||||
|
private void createTable() {
|
||||||
|
final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int)";
|
||||||
|
exuete(sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
private final void printSql(String sql, boolean succeed, long cost) {
|
||||||
|
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
private final void exuete(String sql) {
|
||||||
|
try (Statement statement = connection.createStatement()) {
|
||||||
|
long start = System.currentTimeMillis();
|
||||||
|
boolean execute = statement.execute(sql);
|
||||||
|
long end = System.currentTimeMillis();
|
||||||
|
printSql(sql, execute, (end - start));
|
||||||
|
} catch (SQLException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void close() {
|
||||||
|
try {
|
||||||
|
if (connection != null) {
|
||||||
|
this.connection.close();
|
||||||
|
System.out.println("connection closed.");
|
||||||
|
}
|
||||||
|
} catch (SQLException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void checkDropTable() {
|
||||||
|
final String sql = "drop table if exists " + dbName + "." + tbName + "";
|
||||||
|
exuete(sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
for (int i = 0; i < args.length; i++) {
|
||||||
|
if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||||
|
host = args[++i];
|
||||||
|
}
|
||||||
|
if ("-db".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||||
|
dbName = args[++i];
|
||||||
|
}
|
||||||
|
if ("-t".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||||
|
tbName = args[++i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (host == null) {
|
||||||
|
System.out.println("Usage: java -jar JDBCConnectorChecker.jar -host <hostname>");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
JDBCConnectorChecker checker = new JDBCConnectorChecker();
|
||||||
|
checker.init();
|
||||||
|
checker.createDatabase();
|
||||||
|
checker.useDatabase();
|
||||||
|
checker.checkDropTable();
|
||||||
|
checker.createTable();
|
||||||
|
checker.checkInsert();
|
||||||
|
checker.checkSelect();
|
||||||
|
checker.checkDropTable();
|
||||||
|
checker.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -1763,9 +1763,9 @@ class TaskCreateDb(StateTransitionTask):
|
||||||
return state.canCreateDb()
|
return state.canCreateDb()
|
||||||
|
|
||||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||||
|
# self.execWtSql(wt, "create database db replica {}".format(Dice.throw(3)+1))
|
||||||
self.execWtSql(wt, "create database db")
|
self.execWtSql(wt, "create database db")
|
||||||
|
|
||||||
|
|
||||||
class TaskDropDb(StateTransitionTask):
|
class TaskDropDb(StateTransitionTask):
|
||||||
@classmethod
|
@classmethod
|
||||||
def getEndState(cls):
|
def getEndState(cls):
|
||||||
|
@ -1832,7 +1832,7 @@ class TdSuperTable:
|
||||||
return dbc.query("SELECT * FROM db.{}".format(self._stName)) > 0
|
return dbc.query("SELECT * FROM db.{}".format(self._stName)) > 0
|
||||||
|
|
||||||
def ensureTable(self, dbc: DbConn, regTableName: str):
|
def ensureTable(self, dbc: DbConn, regTableName: str):
|
||||||
sql = "select tbname from {} where tbname in ('{}')".format(self._stName, regTableName)
|
sql = "select tbname from db.{} where tbname in ('{}')".format(self._stName, regTableName)
|
||||||
if dbc.query(sql) >= 1 : # reg table exists already
|
if dbc.query(sql) >= 1 : # reg table exists already
|
||||||
return
|
return
|
||||||
sql = "CREATE TABLE {} USING {} tags ({})".format(
|
sql = "CREATE TABLE {} USING {} tags ({})".format(
|
||||||
|
@ -1916,9 +1916,9 @@ class TaskReadData(StateTransitionTask):
|
||||||
'max(speed)',
|
'max(speed)',
|
||||||
'first(speed)',
|
'first(speed)',
|
||||||
'last(speed)',
|
'last(speed)',
|
||||||
# 'top(speed)', # TODO: not supported?
|
'top(speed, 50)', # TODO: not supported?
|
||||||
# 'bottom(speed)', # TODO: not supported?
|
'bottom(speed, 50)', # TODO: not supported?
|
||||||
# 'percentile(speed, 10)', # TODO: TD-1316
|
'apercentile(speed, 10)', # TODO: TD-1316
|
||||||
'last_row(speed)',
|
'last_row(speed)',
|
||||||
# Transformation Functions
|
# Transformation Functions
|
||||||
# 'diff(speed)', # TODO: no supported?!
|
# 'diff(speed)', # TODO: no supported?!
|
||||||
|
@ -1928,7 +1928,9 @@ class TaskReadData(StateTransitionTask):
|
||||||
None
|
None
|
||||||
])
|
])
|
||||||
try:
|
try:
|
||||||
|
# Run the query against the regular table first
|
||||||
dbc.execute("select {} from db.{}".format(aggExpr, rTbName))
|
dbc.execute("select {} from db.{}".format(aggExpr, rTbName))
|
||||||
|
# Then run it against the super table
|
||||||
if aggExpr not in ['stddev(speed)']: #TODO: STDDEV not valid for super tables?!
|
if aggExpr not in ['stddev(speed)']: #TODO: STDDEV not valid for super tables?!
|
||||||
dbc.execute("select {} from db.{}".format(aggExpr, sTable.getName()))
|
dbc.execute("select {} from db.{}".format(aggExpr, sTable.getName()))
|
||||||
except taos.error.ProgrammingError as err:
|
except taos.error.ProgrammingError as err:
|
||||||
|
@ -2022,7 +2024,7 @@ class TaskRestartService(StateTransitionTask):
|
||||||
return state.canDropFixedSuperTable() # Basicallly when we have the super table
|
return state.canDropFixedSuperTable() # Basicallly when we have the super table
|
||||||
return False # don't run this otherwise
|
return False # don't run this otherwise
|
||||||
|
|
||||||
CHANCE_TO_RESTART_SERVICE = 100
|
CHANCE_TO_RESTART_SERVICE = 200
|
||||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||||
if not gConfig.auto_start_service: # only execute when we are in -a mode
|
if not gConfig.auto_start_service: # only execute when we are in -a mode
|
||||||
print("_a", end="", flush=True)
|
print("_a", end="", flush=True)
|
||||||
|
|
|
@ -185,6 +185,7 @@ python3 ./test.py -f functions/function_stddev.py
|
||||||
python3 ./test.py -f functions/function_sum.py
|
python3 ./test.py -f functions/function_sum.py
|
||||||
python3 ./test.py -f functions/function_top.py
|
python3 ./test.py -f functions/function_top.py
|
||||||
#python3 ./test.py -f functions/function_twa.py
|
#python3 ./test.py -f functions/function_twa.py
|
||||||
|
python3 queryCount.py
|
||||||
|
|
||||||
# tools
|
# tools
|
||||||
python3 test.py -f tools/taosdemo.py
|
python3 test.py -f tools/taosdemo.py
|
||||||
|
|
|
@ -42,6 +42,8 @@ class TDTestCase:
|
||||||
# join 3 tables -- bug exists
|
# join 3 tables -- bug exists
|
||||||
tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
|
tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
|
||||||
|
|
||||||
|
tdSql.error("select * from stb1 whern c1 > 'test' limit 100")
|
||||||
|
|
||||||
# query show stable
|
# query show stable
|
||||||
tdSql.query("show stables")
|
tdSql.query("show stables")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
|
|
|
@ -42,6 +42,9 @@ class TDTestCase:
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
|
||||||
for i in range(len(self.types)):
|
for i in range(len(self.types)):
|
||||||
|
tdSql.execute("drop table if exists t0")
|
||||||
|
tdSql.execute("drop table if exists t1")
|
||||||
|
|
||||||
print("======== checking type %s ==========" % self.types[i])
|
print("======== checking type %s ==========" % self.types[i])
|
||||||
tdSql.execute("create table t0 (ts timestamp, col %s)" % self.types[i])
|
tdSql.execute("create table t0 (ts timestamp, col %s)" % self.types[i])
|
||||||
tdSql.execute("insert into t0 values (%d, NULL)" % (self.ts))
|
tdSql.execute("insert into t0 values (%d, NULL)" % (self.ts))
|
||||||
|
|
|
@ -0,0 +1,91 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
import threading
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import *
|
||||||
|
|
||||||
|
|
||||||
|
class QueryCountMultiThread:
|
||||||
|
def initConnection(self):
|
||||||
|
self.records = 10000000
|
||||||
|
self.numOfTherads = 50
|
||||||
|
self.ts = 1537146000000
|
||||||
|
self.host = "127.0.0.1"
|
||||||
|
self.user = "root"
|
||||||
|
self.password = "taosdata"
|
||||||
|
self.config = "/home/xp/git/TDengine/sim/dnode1/cfg"
|
||||||
|
self.conn = taos.connect(
|
||||||
|
self.host,
|
||||||
|
self.user,
|
||||||
|
self.password,
|
||||||
|
self.config)
|
||||||
|
|
||||||
|
def insertData(self, threadID):
|
||||||
|
cursor = self.conn.cursor()
|
||||||
|
print("Thread %d: starting" % threadID)
|
||||||
|
base = 200000 * threadID
|
||||||
|
for i in range(200):
|
||||||
|
query = "insert into tb values"
|
||||||
|
for j in range(1000):
|
||||||
|
query += "(%d, %d, 'test')" % (self.ts + base + i * 1000 + j, base + i * 1000 + j)
|
||||||
|
cursor.execute(query)
|
||||||
|
cursor.close()
|
||||||
|
print("Thread %d: finishing" % threadID)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdDnodes.init("")
|
||||||
|
tdDnodes.setTestCluster(False)
|
||||||
|
tdDnodes.setValgrind(False)
|
||||||
|
|
||||||
|
tdDnodes.stopAll()
|
||||||
|
tdDnodes.deploy(1)
|
||||||
|
tdDnodes.start(1)
|
||||||
|
|
||||||
|
cursor = self.conn.cursor()
|
||||||
|
cursor.execute("drop database if exists db")
|
||||||
|
cursor.execute("create database db")
|
||||||
|
cursor.execute("use db")
|
||||||
|
cursor.execute("create table tb (ts timestamp, id int, name nchar(30))")
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
threads = []
|
||||||
|
for i in range(50):
|
||||||
|
thread = threading.Thread(target=self.insertData, args=(i,))
|
||||||
|
threads.append(thread)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
for i in range(50):
|
||||||
|
threads[i].join()
|
||||||
|
|
||||||
|
cursor = self.conn.cursor()
|
||||||
|
cursor.execute("use db")
|
||||||
|
sql = "select count(*) from tb"
|
||||||
|
cursor.execute(sql)
|
||||||
|
data = cursor.fetchall()
|
||||||
|
|
||||||
|
if(data[0][0] == 10000000):
|
||||||
|
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%d" % (sql, 0, 0, data[0][0], 10000000))
|
||||||
|
else:
|
||||||
|
tdLog.exit("queryCount.py failed: sql:%s failed, row:%d col:%d data:%d != expect:%d" % (sql, 0, 0, data[0][0], 10000000))
|
||||||
|
|
||||||
|
cursor.close()
|
||||||
|
self.conn.close()
|
||||||
|
|
||||||
|
q = QueryCountMultiThread()
|
||||||
|
q.initConnection()
|
||||||
|
q.run()
|
Loading…
Reference in New Issue